19 #if defined(__BRANCHED_PATH__) && defined(__SUBSURFACE__)
21 ccl_device_inline void kernel_split_branched_path_subsurface_indirect_light_init(KernelGlobals *
kg,
24 kernel_split_branched_path_indirect_loop_init(
kg, ray_index);
28 branched_state->ss_next_closure = 0;
29 branched_state->ss_next_sample = 0;
31 branched_state->num_hits = 0;
32 branched_state->next_hit = 0;
38 KernelGlobals *
kg,
int ray_index)
46 for (
int i = branched_state->ss_next_closure; i < sd->num_closure; i++) {
55 float bssrdf_roughness =
bssrdf->roughness;
58 if (branched_state->ss_next_sample == 0 && branched_state->next_hit == 0 &&
59 branched_state->next_closure == 0 && branched_state->next_sample == 0) {
63 int num_samples =
kernel_data.integrator.subsurface_samples * 3;
64 float num_samples_inv = 1.0f / num_samples;
65 uint bssrdf_rng_hash =
cmj_hash(branched_state->path_state.rng_hash, i);
69 for (
int j = branched_state->ss_next_sample; j < num_samples; j++) {
71 *hit_state = branched_state->path_state;
72 hit_state->rng_hash = bssrdf_rng_hash;
76 float bssrdf_u, bssrdf_v;
78 kg, bssrdf_rng_hash, hit_state, j, num_samples,
PRNG_BSDF_U, &bssrdf_u, &bssrdf_v);
81 if (branched_state->next_hit == 0 && branched_state->next_closure == 0 &&
82 branched_state->next_sample == 0) {
83 uint lcg_state = branched_state->lcg_state;
87 kg, &ss_isect_private, sd, hit_state, sc, &lcg_state, bssrdf_u, bssrdf_v,
true);
89 branched_state->lcg_state = lcg_state;
90 *ss_isect = ss_isect_private;
96 Ray volume_ray = branched_state->ray;
97 bool need_update_volume_stack =
kernel_data.integrator.use_volumes &&
102 for (
int hit = branched_state->next_hit; hit < branched_state->num_hits; hit++) {
109 kg, &ss_isect_private, hit, bssrdf_sd, hit_state, bssrdf_type, bssrdf_roughness);
110 *ss_isect = ss_isect_private;
113 if (need_update_volume_stack) {
119 hit_state->volume_stack[k] = branched_state->path_state.volume_stack[k];
122 kernel_volume_stack_update_for_subsurface(
123 kg, emission_sd, &volume_ray, hit_state->volume_stack);
128 if (branched_state->next_closure == 0 && branched_state->next_sample == 0) {
133 kernel_branched_path_surface_connect_light(
kg,
137 branched_state->throughput,
146 if (kernel_split_branched_path_surface_indirect_light_iter(
147 kg, ray_index, num_samples_inv, bssrdf_sd,
false,
false)) {
148 branched_state->ss_next_closure = i;
149 branched_state->ss_next_sample = j;
150 branched_state->next_hit = hit;
155 branched_state->next_closure = 0;
158 branched_state->next_hit = 0;
161 branched_state->ss_next_sample = 0;
164 branched_state->ss_next_closure = sd->num_closure;
166 branched_state->waiting_on_shared_samples = (branched_state->shared_sample_count > 0);
167 if (branched_state->waiting_on_shared_samples) {
171 kernel_split_branched_path_indirect_loop_end(
kg, ray_index);
181 if (thread_index == 0) {
201 #ifdef __SUBSURFACE__
215 # ifdef __BRANCHED_PATH__
219 if (kernel_path_subsurface_scatter(
220 kg, sd, emission_sd,
L,
state, ray, throughput, ss_indirect)) {
223 # ifdef __BRANCHED_PATH__
226 kernel_split_branched_path_subsurface_indirect_light_init(
kg, ray_index);
228 if (kernel_split_branched_path_subsurface_indirect_light_iter(
kg, ray_index)) {
236 # ifdef __BRANCHED_PATH__
255 if (kernel_split_branched_path_subsurface_indirect_light_iter(
kg, ray_index)) {
ccl_device_inline float3 ray_offset(float3 P, float3 Ng)
ccl_device_inline void path_radiance_sum_indirect(PathRadiance *L)
ccl_device_inline void path_radiance_reset_indirect(PathRadiance *L)
#define ccl_global_size(d)
#define ccl_device_inline
#define ccl_device_noinline
#define CCL_NAMESPACE_END
ccl_device_inline uint cmj_hash(uint i, uint p)
ccl_device_inline void path_state_branch(ccl_addr_space PathState *state, int branch, int num_branches)
ccl_device int get_ray_index(KernelGlobals *kg, int thread_index, int queue_number, ccl_global int *queues, int queuesize, int empty_queue)
ccl_device_inline uint lcg_state_init_addrspace(ccl_addr_space PathState *state, uint scramble)
ccl_device_inline void path_branched_rng_2D(KernelGlobals *kg, uint rng_hash, const ccl_addr_space PathState *state, int branch, int num_branches, int dimension, float *fx, float *fy)
CCL_NAMESPACE_BEGIN ccl_device_inline void kernel_split_path_end(KernelGlobals *kg, int ray_index)
#define kernel_split_params
#define kernel_split_sd(sd, ray_index)
#define kernel_split_state
__kernel void ccl_constant KernelData ccl_global void ccl_global char * ray_state
ccl_device_inline int subsurface_scatter_multi_intersect(KernelGlobals *kg, LocalIntersection *ss_isect, ShaderData *sd, ccl_addr_space PathState *state, const ShaderClosure *sc, uint *lcg_state, float bssrdf_u, float bssrdf_v, bool all)
ccl_device_noinline void subsurface_scatter_multi_setup(KernelGlobals *kg, LocalIntersection *ss_isect, int hit, ShaderData *sd, ccl_addr_space PathState *state, ClosureType type, float roughness)
CCL_NAMESPACE_BEGIN ccl_device void kernel_subsurface_scatter(KernelGlobals *kg)
#define IS_FLAG(ray_state, ray_index, flag)
#define AS_SHADER_DATA(shader_data_tiny_storage)
#define IS_STATE(ray_state, ray_index, state)
@ PATH_RAY_SHADOW_CATCHER
@ SD_OBJECT_INTERSECTS_VOLUME
#define ADD_RAY_FLAG(ray_state, ray_index, flag)
#define VOLUME_STACK_SIZE
#define ASSIGN_RAY_STATE(ray_state, ray_index, state)
@ RAY_BRANCHED_SUBSURFACE_INDIRECT
@ RAY_SUBSURFACE_INDIRECT_NEXT_ITER
@ QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS
@ QUEUE_SUBSURFACE_INDIRECT_ITER
@ QUEUE_ACTIVE_AND_REGENERATED_RAYS
closure color bssrdf(string method, normal N, vector radius, color albedo) BUILTIN
#define CLOSURE_IS_BSSRDF(type)
__forceinline bool all(const avxb &b)
ccl_device_inline float2 normalize_len(const float2 &a, float *t)