Blender  V2.93
kernel_holdout_emission_blurring_pathtermination_ao.h
Go to the documentation of this file.
1 /*
2  * Copyright 2011-2015 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
19 /* This kernel takes care of the logic to process "material of type holdout",
20  * indirect primitive emission, bsdf blurring, probabilistic path termination
21  * and AO.
22  *
23  * This kernels determines the rays for which a shadow_blocked() function
24  * associated with AO should be executed. Those rays for which a
25  * shadow_blocked() function for AO must be executed are marked with flag
26  * RAY_SHADOW_RAY_CAST_ao and enqueued into the queue
27  * QUEUE_SHADOW_RAY_CAST_AO_RAYS
28  *
29  * Ray state of rays that are terminated in this kernel are changed to RAY_UPDATE_BUFFER
30  *
31  * Note on Queues:
32  * This kernel fetches rays from the queue QUEUE_ACTIVE_AND_REGENERATED_RAYS
33  * and processes only the rays of state RAY_ACTIVE.
34  * There are different points in this kernel where a ray may terminate and
35  * reach RAY_UPDATE_BUFFER state. These rays are enqueued into
36  * QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. These rays will still be present
37  * in QUEUE_ACTIVE_AND_REGENERATED_RAYS queue, but since their ray-state has
38  * been changed to RAY_UPDATE_BUFFER, there is no problem.
39  *
40  * State of queues when this kernel is called:
41  * At entry,
42  * - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE and
43  * RAY_REGENERATED rays
44  * - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
45  * RAY_TO_REGENERATE rays.
46  * - QUEUE_SHADOW_RAY_CAST_AO_RAYS will be empty.
47  * At exit,
48  * - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE,
49  * RAY_REGENERATED and RAY_UPDATE_BUFFER rays.
50  * - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
51  * RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
52  * - QUEUE_SHADOW_RAY_CAST_AO_RAYS will be filled with rays marked with
53  * flag RAY_SHADOW_RAY_CAST_AO
54  */
55 
57  KernelGlobals *kg, ccl_local_param BackgroundAOLocals *locals)
58 {
59  if (ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
60  locals->queue_atomics_bg = 0;
61  locals->queue_atomics_ao = 0;
62  }
64 
65 #ifdef __AO__
66  char enqueue_flag = 0;
67 #endif
68  int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
69  ray_index = get_ray_index(kg,
70  ray_index,
72  kernel_split_state.queue_data,
73  kernel_split_params.queue_size,
74  0);
75 
76  if (ray_index != QUEUE_EMPTY_SLOT) {
77  ccl_global PathState *state = 0x0;
78  float3 throughput;
79 
80  ccl_global char *ray_state = kernel_split_state.ray_state;
81  ShaderData *sd = kernel_split_sd(sd, ray_index);
82 
83  if (IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
84  uint buffer_offset = kernel_split_state.buffer_offset[ray_index];
85  ccl_global float *buffer = kernel_split_params.tile.buffer + buffer_offset;
86 
87  ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
88  ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
89  PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
90 
91  throughput = kernel_split_state.throughput[ray_index];
92  state = &kernel_split_state.path_state[ray_index];
93 
94  if (!kernel_path_shader_apply(kg, sd, state, ray, throughput, emission_sd, L, buffer)) {
95  kernel_split_path_end(kg, ray_index);
96  }
97  }
98 
99  if (IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
100  /* Path termination. this is a strange place to put the termination, it's
101  * mainly due to the mixed in MIS that we use. gives too many unneeded
102  * shader evaluations, only need emission if we are going to terminate.
103  */
104  float probability = path_state_continuation_probability(kg, state, throughput);
105 
106  if (probability == 0.0f) {
107  kernel_split_path_end(kg, ray_index);
108  }
109  else if (probability < 1.0f) {
110  float terminate = path_state_rng_1D(kg, state, PRNG_TERMINATE);
111  if (terminate >= probability) {
112  kernel_split_path_end(kg, ray_index);
113  }
114  else {
115  kernel_split_state.throughput[ray_index] = throughput / probability;
116  }
117  }
118 
119 #ifdef __DENOISING_FEATURES__
120  if (IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
121  PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
122  kernel_update_denoising_features(kg, sd, state, L);
123  }
124 #endif
125  }
126 
127 #ifdef __AO__
128  if (IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
129  /* ambient occlusion */
130  if (kernel_data.integrator.use_ambient_occlusion) {
131  enqueue_flag = 1;
132  }
133  }
134 #endif /* __AO__ */
135  }
136 
137 #ifdef __AO__
138  /* Enqueue to-shadow-ray-cast rays. */
139  enqueue_ray_index_local(ray_index,
141  enqueue_flag,
142  kernel_split_params.queue_size,
143  &locals->queue_atomics_ao,
144  kernel_split_state.queue_data,
145  kernel_split_params.queue_index);
146 #endif
147 }
148 
unsigned int uint
Definition: BLI_sys_types.h:83
#define kernel_data
#define ccl_global_id(d)
#define ccl_global_size(d)
#define ccl_local_param
#define ccl_device
ccl_device_inline uint ccl_local_id(uint d)
#define ccl_global
#define CCL_NAMESPACE_END
CCL_NAMESPACE_BEGIN ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(KernelGlobals *kg, ccl_local_param BackgroundAOLocals *locals)
ccl_device_forceinline bool kernel_path_shader_apply(KernelGlobals *kg, ShaderData *sd, ccl_addr_space PathState *state, ccl_addr_space Ray *ray, float3 throughput, ShaderData *emission_sd, PathRadiance *L, ccl_global float *buffer)
Definition: kernel_path.h:254
ccl_device_inline float path_state_continuation_probability(KernelGlobals *kg, ccl_addr_space PathState *state, const float3 throughput)
ccl_device int get_ray_index(KernelGlobals *kg, int thread_index, int queue_number, ccl_global int *queues, int queuesize, int empty_queue)
Definition: kernel_queues.h:53
ccl_device void enqueue_ray_index_local(int ray_index, int queue_number, char enqueue_flag, int queuesize, ccl_local_param unsigned int *local_queue_atomics, ccl_global int *Queue_data, ccl_global int *Queue_index)
Definition: kernel_queues.h:71
ccl_device_inline float path_state_rng_1D(KernelGlobals *kg, const ccl_addr_space PathState *state, int dimension)
CCL_NAMESPACE_BEGIN ccl_device_inline void kernel_split_path_end(KernelGlobals *kg, int ray_index)
#define kernel_split_params
#define kernel_split_sd(sd, ray_index)
#define kernel_split_state
__kernel void ccl_constant KernelData ccl_global void ccl_global char ccl_global int ccl_global char ccl_global unsigned int ccl_global float * buffer
__kernel void ccl_constant KernelData ccl_global void ccl_global char * ray_state
#define AS_SHADER_DATA(shader_data_tiny_storage)
#define IS_STATE(ray_state, ray_index, state)
@ PRNG_TERMINATE
Definition: kernel_types.h:245
ShaderData
#define QUEUE_EMPTY_SLOT
@ RAY_ACTIVE
@ QUEUE_SHADOW_RAY_CAST_AO_RAYS
@ QUEUE_ACTIVE_AND_REGENERATED_RAYS
static ulong state[N]
#define L
#define CCL_LOCAL_MEM_FENCE
Definition: util_atomic.h:32
#define ccl_barrier(flags)
Definition: util_atomic.h:33