Blender V4.3
eevee_film.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
14
15#include "BLI_hash.h"
16#include "BLI_rect.h"
17#include "BLI_set.hh"
18
19#include "BKE_compositor.hh"
20
21#include "GPU_debug.hh"
22#include "GPU_framebuffer.hh"
23#include "GPU_texture.hh"
24
25#include "DRW_render.hh"
26#include "RE_pipeline.h"
27
28#include "eevee_film.hh"
29#include "eevee_instance.hh"
30
31namespace blender::eevee {
32
33/* -------------------------------------------------------------------- */
36
37void Film::init_aovs(const Set<std::string> &passes_used_by_viewport_compositor)
38{
39 Vector<ViewLayerAOV *> aovs;
40
41 aovs_info.display_id = -1;
42 aovs_info.display_is_value = false;
43 aovs_info.value_len = aovs_info.color_len = 0;
44
45 if (inst_.is_viewport()) {
46 /* Viewport case. */
47 if (inst_.v3d->shading.render_pass == EEVEE_RENDER_PASS_AOV) {
48 /* AOV display, request only a single AOV. */
50 &inst_.view_layer->aovs, inst_.v3d->shading.aov_name, offsetof(ViewLayerAOV, name));
51
52 /* AOV found in view layer. */
53 if (aov) {
54 aovs.append(aov);
55 aovs_info.display_id = 0;
56 aovs_info.display_is_value = (aov->type == AOV_TYPE_VALUE);
57 }
58 }
59
61 LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
62 /* Already added as a display pass. No need to add again. */
63 if (!aovs.is_empty() && aovs.last() == aov) {
64 continue;
65 }
66
67 if (passes_used_by_viewport_compositor.contains(aov->name)) {
68 aovs.append(aov);
69 }
70 }
71 }
72 }
73 else {
74 /* Render case. */
75 LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
76 aovs.append(aov);
77 }
78 }
79
80 if (aovs.size() > AOV_MAX) {
81 inst_.info_append_i18n("Error: Too many AOVs");
82 return;
83 }
84
85 for (ViewLayerAOV *aov : aovs) {
86 bool is_value = (aov->type == AOV_TYPE_VALUE);
87 int &index = is_value ? aovs_info.value_len : aovs_info.color_len;
88 uint &hash = is_value ? aovs_info.hash_value[index].x : aovs_info.hash_color[index].x;
90 index++;
91 }
92
93 if (!aovs.is_empty()) {
94 enabled_categories_ |= PASS_CATEGORY_AOV;
95 }
96}
97
99{
100 GPUTexture *pass_tx = this->get_aov_texture(aov);
101
102 if (pass_tx == nullptr) {
103 return nullptr;
104 }
105
107
108 return (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
109}
110
112{
113 bool is_value = (aov->type == AOV_TYPE_VALUE);
114 Texture &accum_tx = is_value ? value_accum_tx_ : color_accum_tx_;
115
116 Span<uint4> aovs_hash(is_value ? aovs_info.hash_value : aovs_info.hash_color,
117 is_value ? aovs_info.value_len : aovs_info.color_len);
118 /* Find AOV index. */
120 int aov_index = -1;
121 int i = 0;
122 for (uint4 candidate_hash : aovs_hash) {
123 if (candidate_hash.x == hash) {
124 aov_index = i;
125 break;
126 }
127 i++;
128 }
129
130 if (aov_index == -1) {
131 return nullptr;
132 }
133
134 accum_tx.ensure_layer_views();
135
136 int index = aov_index + (is_value ? data_.aov_value_id : data_.aov_color_id);
137 return accum_tx.layer_view(index);
138}
139
141
142/* -------------------------------------------------------------------- */
145
146void Film::sync_mist()
147{
148 const CameraData &cam = inst_.camera.data_get();
149 const ::World *world = inst_.scene->world;
150 float mist_start = world ? world->miststa : cam.clip_near;
151 float mist_distance = world ? world->mistdist : fabsf(cam.clip_far - cam.clip_near);
152 int mist_type = world ? world->mistype : int(WO_MIST_LINEAR);
153
154 switch (mist_type) {
156 data_.mist_exponent = 2.0f;
157 break;
158 case WO_MIST_LINEAR:
159 data_.mist_exponent = 1.0f;
160 break;
162 data_.mist_exponent = 0.5f;
163 break;
164 }
165
166 data_.mist_scale = 1.0 / mist_distance;
167 data_.mist_bias = -mist_start / mist_distance;
168}
169
171
172/* -------------------------------------------------------------------- */
175
176inline bool operator==(const FilmData &a, const FilmData &b)
177{
178 return (a.extent == b.extent) && (a.offset == b.offset) &&
179 (a.render_extent == b.render_extent) && (a.overscan == b.overscan) &&
180 (a.filter_radius == b.filter_radius) && (a.scaling_factor == b.scaling_factor) &&
181 (a.background_opacity == b.background_opacity);
182}
183
184inline bool operator!=(const FilmData &a, const FilmData &b)
185{
186 return !(a == b);
187}
188
190
191/* -------------------------------------------------------------------- */
194
196{
198
199 /* We enforce the use of combined pass to be compliant with Cycles and EEVEE-Legacy (#122188). */
201
202#define ENABLE_FROM_LEGACY(name_legacy, name_eevee) \
203 SET_FLAG_FROM_TEST(result, \
204 (view_layer->passflag & SCE_PASS_##name_legacy) != 0, \
205 EEVEE_RENDER_PASS_##name_eevee);
206
208 ENABLE_FROM_LEGACY(MIST, MIST)
210 ENABLE_FROM_LEGACY(POSITION, POSITION)
211 ENABLE_FROM_LEGACY(SHADOW, SHADOW)
212 ENABLE_FROM_LEGACY(AO, AO)
213 ENABLE_FROM_LEGACY(EMIT, EMIT)
214 ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
215 ENABLE_FROM_LEGACY(DIFFUSE_COLOR, DIFFUSE_COLOR)
216 ENABLE_FROM_LEGACY(GLOSSY_COLOR, SPECULAR_COLOR)
217 ENABLE_FROM_LEGACY(DIFFUSE_DIRECT, DIFFUSE_LIGHT)
218 ENABLE_FROM_LEGACY(GLOSSY_DIRECT, SPECULAR_LIGHT)
219 ENABLE_FROM_LEGACY(ENVIRONMENT, ENVIRONMENT)
220 ENABLE_FROM_LEGACY(VECTOR, VECTOR)
221
222#undef ENABLE_FROM_LEGACY
223
233
234 return result;
235}
236
237/* Get all pass types used by the viewport compositor from the set of all needed passes. */
239 const Set<std::string> &viewport_compositor_needed_passes, const ViewLayer *view_layer)
240{
241 const eViewLayerEEVEEPassType scene_enabled_passes = enabled_passes(view_layer);
242
243 /* Go over all possible pass types, check if their possible pass names exist in the viewport
244 * compositor needed passes, and if true, mark them as needed. */
245 eViewLayerEEVEEPassType viewport_compositor_enabled_passes = eViewLayerEEVEEPassType(0);
246 for (const int i : IndexRange(EEVEE_RENDER_PASS_MAX_BIT + 1)) {
247 /* Mask by the scene enabled passes, because some pass types like EEVEE_RENDER_PASS_UNUSED_8
248 * have no corresponding pass names, so they will assert later. */
249 eViewLayerEEVEEPassType pass_type = eViewLayerEEVEEPassType(scene_enabled_passes & (1 << i));
250 if (pass_type == 0) {
251 continue;
252 }
253
254 for (const std::string &pass_name : Film::pass_to_render_pass_names(pass_type, view_layer)) {
255 if (viewport_compositor_needed_passes.contains(pass_name)) {
256 viewport_compositor_enabled_passes |= pass_type;
257 }
258 }
259 }
260
261 return viewport_compositor_enabled_passes;
262}
263
264void Film::init(const int2 &extent, const rcti *output_rect)
265{
266 using namespace math;
267
268 Sampling &sampling = inst_.sampling;
269 Scene &scene = *inst_.scene;
270
271 /* Compute the passes needed by the viewport compositor. */
272 Set<std::string> passes_used_by_viewport_compositor;
273 if (this->is_viewport_compositor_enabled()) {
274 passes_used_by_viewport_compositor = bke::compositor::get_used_passes(scene, inst_.view_layer);
275 viewport_compositor_enabled_passes_ = get_viewport_compositor_enabled_passes(
276 passes_used_by_viewport_compositor, inst_.view_layer);
277 }
278
279 enabled_categories_ = PassCategory(0);
280 init_aovs(passes_used_by_viewport_compositor);
281
282 {
283 /* Enable passes that need to be rendered. */
284 if (inst_.is_viewport()) {
285 /* Viewport Case. */
286 enabled_passes_ = eViewLayerEEVEEPassType(inst_.v3d->shading.render_pass) |
287 viewport_compositor_enabled_passes_;
288
289 if (inst_.overlays_enabled() || inst_.gpencil_engine_enabled()) {
290 /* Overlays and Grease Pencil needs the depth for correct compositing.
291 * Using the render pass ensure we store the center depth. */
292 enabled_passes_ |= EEVEE_RENDER_PASS_Z;
293 }
294 }
295 else {
296 /* Render Case. */
297 enabled_passes_ = enabled_passes(inst_.view_layer);
298 }
299
300 /* Filter obsolete passes. */
302
303 if (scene.r.mode & R_MBLUR) {
304 /* Disable motion vector pass if motion blur is enabled. */
305 enabled_passes_ &= ~EEVEE_RENDER_PASS_VECTOR;
306 }
307 }
308 {
309 data_.scaling_factor = 1;
310 if (inst_.is_viewport()) {
311 data_.scaling_factor = BKE_render_preview_pixel_size(&inst_.scene->r);
312 }
313 /* Sharpen the LODs (1.5x) to avoid TAA filtering causing over-blur (see #122941). */
314 data_.texture_lod_bias = 1.0f / (data_.scaling_factor * 1.5f);
315 }
316 {
317 rcti fallback_rect;
318 if (BLI_rcti_is_empty(output_rect)) {
319 BLI_rcti_init(&fallback_rect, 0, extent[0], 0, extent[1]);
320 output_rect = &fallback_rect;
321 }
322
323 display_extent = extent;
324
325 data_.extent = int2(BLI_rcti_size_x(output_rect), BLI_rcti_size_y(output_rect));
326 data_.offset = int2(output_rect->xmin, output_rect->ymin);
327 data_.extent_inv = 1.0f / float2(data_.extent);
328 data_.render_extent = divide_ceil(data_.extent, int2(data_.scaling_factor));
329 data_.overscan = overscan_pixels_get(inst_.camera.overscan(), data_.render_extent);
330 data_.render_extent += data_.overscan * 2;
331
332 /* Disable filtering if sample count is 1. */
333 data_.filter_radius = (sampling.sample_count() == 1) ? 0.0f :
334 clamp_f(scene.r.gauss, 0.0f, 100.0f);
335 data_.cryptomatte_samples_len = inst_.view_layer->cryptomatte_levels;
336
337 data_.background_opacity = (scene.r.alphamode == R_ALPHAPREMUL) ? 0.0f : 1.0f;
338 if (inst_.is_viewport() && false /* TODO(fclem): StudioLight */) {
339 data_.background_opacity = inst_.v3d->shading.studiolight_background;
340 }
341
355
356 data_.exposure_scale = pow2f(scene.view_settings.exposure);
357 if (enabled_passes_ & data_passes) {
358 enabled_categories_ |= PASS_CATEGORY_DATA;
359 }
360 if (enabled_passes_ & color_passes_1) {
361 enabled_categories_ |= PASS_CATEGORY_COLOR_1;
362 }
363 if (enabled_passes_ & color_passes_2) {
364 enabled_categories_ |= PASS_CATEGORY_COLOR_2;
365 }
366 if (enabled_passes_ & color_passes_3) {
367 enabled_categories_ |= PASS_CATEGORY_COLOR_3;
368 }
369 }
370 {
371 /* Set pass offsets. */
372
373 data_.display_id = aovs_info.display_id;
374 data_.display_storage_type = aovs_info.display_is_value ? PASS_STORAGE_VALUE :
376
377 /* Combined is in a separate buffer. */
378 data_.combined_id = (enabled_passes_ & EEVEE_RENDER_PASS_COMBINED) ? 0 : -1;
379 /* Depth is in a separate buffer. */
380 data_.depth_id = (enabled_passes_ & EEVEE_RENDER_PASS_Z) ? 0 : -1;
381
382 data_.color_len = 0;
383 data_.value_len = 0;
384
385 auto pass_index_get = [&](eViewLayerEEVEEPassType pass_type) {
386 ePassStorageType storage_type = pass_storage_type(pass_type);
387 int index = (enabled_passes_ & pass_type) ?
388 (storage_type == PASS_STORAGE_VALUE ? data_.value_len : data_.color_len)++ :
389 -1;
390 if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) {
391 data_.display_id = index;
392 data_.display_storage_type = storage_type;
393 }
394 return index;
395 };
396
397 data_.mist_id = pass_index_get(EEVEE_RENDER_PASS_MIST);
398 data_.normal_id = pass_index_get(EEVEE_RENDER_PASS_NORMAL);
399 data_.position_id = pass_index_get(EEVEE_RENDER_PASS_POSITION);
400 data_.vector_id = pass_index_get(EEVEE_RENDER_PASS_VECTOR);
401 data_.diffuse_light_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_LIGHT);
402 data_.diffuse_color_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_COLOR);
403 data_.specular_light_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_LIGHT);
404 data_.specular_color_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_COLOR);
405 data_.volume_light_id = pass_index_get(EEVEE_RENDER_PASS_VOLUME_LIGHT);
406 data_.emission_id = pass_index_get(EEVEE_RENDER_PASS_EMIT);
407 data_.environment_id = pass_index_get(EEVEE_RENDER_PASS_ENVIRONMENT);
408 data_.shadow_id = pass_index_get(EEVEE_RENDER_PASS_SHADOW);
409 data_.ambient_occlusion_id = pass_index_get(EEVEE_RENDER_PASS_AO);
410 data_.transparent_id = pass_index_get(EEVEE_RENDER_PASS_TRANSPARENT);
411
412 data_.aov_color_id = data_.color_len;
413 data_.aov_value_id = data_.value_len;
414
415 data_.aov_color_len = aovs_info.color_len;
416 data_.aov_value_len = aovs_info.value_len;
417
418 data_.color_len += data_.aov_color_len;
419 data_.value_len += data_.aov_value_len;
420
421 int cryptomatte_id = 0;
422 auto cryptomatte_index_get = [&](eViewLayerEEVEEPassType pass_type) {
423 int index = -1;
424 if (enabled_passes_ & pass_type) {
425 index = cryptomatte_id;
426 cryptomatte_id += divide_ceil_u(data_.cryptomatte_samples_len, 2u);
427
428 if (inst_.is_viewport() && inst_.v3d->shading.render_pass == pass_type) {
429 data_.display_id = index;
430 data_.display_storage_type = PASS_STORAGE_CRYPTOMATTE;
431 }
432 }
433 return index;
434 };
435 data_.cryptomatte_object_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT);
436 data_.cryptomatte_asset_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET);
437 data_.cryptomatte_material_id = cryptomatte_index_get(EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL);
438
439 if ((enabled_passes_ &
442 {
443 enabled_categories_ |= PASS_CATEGORY_CRYPTOMATTE;
444 }
445 }
446 {
447 int2 weight_extent = (inst_.camera.is_panoramic() || (data_.scaling_factor > 1)) ?
448 data_.extent :
449 int2(1);
450
451 eGPUTextureFormat color_format = GPU_RGBA16F;
452 eGPUTextureFormat float_format = GPU_R16F;
453 eGPUTextureFormat weight_format = GPU_R32F;
454 eGPUTextureFormat depth_format = GPU_R32F;
455 eGPUTextureFormat cryptomatte_format = GPU_RGBA32F;
456
457 int reset = 0;
458 reset += depth_tx_.ensure_2d(depth_format, data_.extent);
459 reset += combined_tx_.current().ensure_2d(color_format, data_.extent);
460 reset += combined_tx_.next().ensure_2d(color_format, data_.extent);
461 /* Two layers, one for nearest sample weight and one for weight accumulation. */
462 reset += weight_tx_.current().ensure_2d_array(weight_format, weight_extent, 2);
463 reset += weight_tx_.next().ensure_2d_array(weight_format, weight_extent, 2);
464 reset += color_accum_tx_.ensure_2d_array(color_format,
465 (data_.color_len > 0) ? data_.extent : int2(1),
466 (data_.color_len > 0) ? data_.color_len : 1);
467 reset += value_accum_tx_.ensure_2d_array(float_format,
468 (data_.value_len > 0) ? data_.extent : int2(1),
469 (data_.value_len > 0) ? data_.value_len : 1);
470 /* Divided by two as two cryptomatte samples fit in pixel (RG, BA). */
471 int cryptomatte_array_len = cryptomatte_layer_len_get() *
472 divide_ceil_u(data_.cryptomatte_samples_len, 2u);
473 reset += cryptomatte_tx_.ensure_2d_array(cryptomatte_format,
474 (cryptomatte_array_len > 0) ? data_.extent : int2(1),
475 (cryptomatte_array_len > 0) ? cryptomatte_array_len :
476 1);
477
478 if (reset > 0) {
479 data_.use_history = 0;
480 use_reprojection_ = false;
481
482 /* Avoid NaN in uninitialized texture memory making history blending dangerous. */
483 color_accum_tx_.clear(float4(0.0f));
484 value_accum_tx_.clear(float4(0.0f));
485 combined_tx_.current().clear(float4(0.0f));
486 weight_tx_.current().clear(float4(0.0f));
487 depth_tx_.clear(float4(0.0f));
488 cryptomatte_tx_.clear(float4(0.0f));
489 }
490 }
491}
492
494{
495 /* We use a fragment shader for viewport because we need to output the depth.
496 *
497 * Compute shader is also used to work around Metal/Intel iGPU issues concerning
498 * read write support for array textures. In this case the copy_ps_ is used to
499 * copy the right color/value to the framebuffer. */
500 use_compute_ = !inst_.is_viewport() ||
502
503 eShaderType shader = use_compute_ ? FILM_COMP : FILM_FRAG;
504
505 /* TODO(fclem): Shader variation for panoramic & scaled resolution. */
506
507 GPUShader *sh = inst_.shaders.static_shader_get(shader);
508 accumulate_ps_.init();
509 init_pass(accumulate_ps_, sh);
510 /* Sync with rendering passes. */
512 if (use_compute_) {
513 accumulate_ps_.dispatch(int3(math::divide_ceil(data_.extent, int2(FILM_GROUP_SIZE)), 1));
514 }
515 else {
516 accumulate_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
517 }
518
519 copy_ps_.init();
520 if (use_compute_ && inst_.is_viewport()) {
521 init_pass(copy_ps_, inst_.shaders.static_shader_get(FILM_COPY));
522 copy_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
523 }
524
525 const int cryptomatte_layer_count = cryptomatte_layer_len_get();
526 const bool is_cryptomatte_pass_enabled = cryptomatte_layer_count > 0;
527 const bool do_cryptomatte_sorting = !inst_.is_viewport() ||
529 cryptomatte_post_ps_.init();
530 if (is_cryptomatte_pass_enabled && do_cryptomatte_sorting) {
531 cryptomatte_post_ps_.state_set(DRW_STATE_NO_DRAW);
532 cryptomatte_post_ps_.shader_set(inst_.shaders.static_shader_get(FILM_CRYPTOMATTE_POST));
533 cryptomatte_post_ps_.bind_image("cryptomatte_img", &cryptomatte_tx_);
534 cryptomatte_post_ps_.bind_resources(inst_.uniform_data);
535 cryptomatte_post_ps_.push_constant("cryptomatte_layer_len", cryptomatte_layer_count);
536 cryptomatte_post_ps_.push_constant("cryptomatte_samples_per_layer",
537 inst_.view_layer->cryptomatte_levels);
538 int2 dispatch_size = math::divide_ceil(int2(cryptomatte_tx_.size()), int2(FILM_GROUP_SIZE));
539 cryptomatte_post_ps_.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
540 cryptomatte_post_ps_.dispatch(int3(UNPACK2(dispatch_size), 1));
541 }
542}
543
544void Film::init_pass(PassSimple &pass, GPUShader *sh)
545{
547 RenderBuffers &rbuffers = inst_.render_buffers;
548 VelocityModule &velocity = inst_.velocity;
549
550 pass.specialize_constant(sh, "enabled_categories", uint(enabled_categories_));
551 pass.specialize_constant(sh, "samples_len", &data_.samples_len);
552 pass.specialize_constant(sh, "use_reprojection", &use_reprojection_);
553 pass.specialize_constant(sh, "scaling_factor", data_.scaling_factor);
554 pass.specialize_constant(sh, "combined_id", &data_.combined_id);
555 pass.specialize_constant(sh, "display_id", &data_.display_id);
556 pass.specialize_constant(sh, "normal_id", &data_.normal_id);
558 pass.shader_set(sh);
559 /* For viewport, only previous motion is supported.
560 * Still bind previous step to avoid undefined behavior. */
561 eVelocityStep step_next = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
562
563 pass.bind_resources(inst_.uniform_data);
564 pass.bind_ubo("camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
565 pass.bind_ubo("camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
566 pass.bind_ubo("camera_next", &(*velocity.camera_steps[step_next]));
567 pass.bind_texture("depth_tx", &rbuffers.depth_tx);
568 pass.bind_texture("combined_tx", &combined_final_tx_);
569 pass.bind_texture("vector_tx", &rbuffers.vector_tx);
570 pass.bind_texture("rp_color_tx", &rbuffers.rp_color_tx);
571 pass.bind_texture("rp_value_tx", &rbuffers.rp_value_tx);
572 pass.bind_texture("cryptomatte_tx", &rbuffers.cryptomatte_tx);
573 /* NOTE(@fclem): 16 is the max number of sampled texture in many implementations.
574 * If we need more, we need to pack more of the similar passes in the same textures as arrays or
575 * use image binding instead. */
576 pass.bind_image("in_weight_img", &weight_tx_.current());
577 pass.bind_image("out_weight_img", &weight_tx_.next());
578 pass.bind_texture("in_combined_tx", &combined_tx_.current(), filter);
579 pass.bind_image("out_combined_img", &combined_tx_.next());
580 pass.bind_image("depth_img", &depth_tx_);
581 pass.bind_image("color_accum_img", &color_accum_tx_);
582 pass.bind_image("value_accum_img", &value_accum_tx_);
583 pass.bind_image("cryptomatte_img", &cryptomatte_tx_);
584 copy_ps_.bind_resources(inst_.uniform_data);
585}
586
588{
589 use_reprojection_ = inst_.sampling.interactive_mode();
590
591 /* Just bypass the reprojection and reset the accumulation. */
592 if (!use_reprojection_ && inst_.sampling.is_reset()) {
593 use_reprojection_ = false;
594 data_.use_history = false;
595 }
596
597 aovs_info.push_update();
598
599 sync_mist();
600}
601
603{
604 float2 jitter = inst_.sampling.rng_2d_get(SAMPLING_FILTER_U);
605
606 if (!use_box_filter && data_.filter_radius < M_SQRT1_2 && !inst_.camera.is_panoramic()) {
607 /* For filter size less than a pixel, change sampling strategy and use a uniform disk
608 * distribution covering the filter shape. This avoids putting samples in areas without any
609 * weights. */
610 /* TODO(fclem): Importance sampling could be a better option here. */
611 /* NOTE: We bias the disk to encompass most of the energy of the filter to avoid energy issues
612 * with motion blur at low sample. */
613 const float bias = 0.5f;
614 jitter = Sampling::sample_disk(jitter) * bias * data_.filter_radius;
615 }
616 else {
617 /* Jitter the size of a whole pixel. [-0.5..0.5] */
618 jitter -= 0.5f;
619 }
620
621 if (data_.scaling_factor > 1) {
622 /* In this case, the jitter sequence is the same for the number of film pixel a render pixel
623 * covers. This allows to add a manual offset to the different film pixels to ensure they get
624 * appropriate coverage instead of waiting that random sampling covers all the area. This
625 * ensures a much faster convergence. */
626 const int scale = data_.scaling_factor;
627 const int render_pixel_per_final_pixel = square_i(scale);
628 /* TODO(fclem): Random in Z-order curve. */
629 /* Works great for the scaling factor we have. */
630 int prime = (render_pixel_per_final_pixel / 2) - 1;
631 /* For now just randomize in scan-lines using a prime number. */
632 uint64_t index = (inst_.sampling.sample_index() * prime) % render_pixel_per_final_pixel;
633 int2 pixel_co = int2(index % scale, index / scale);
634 /* The jitter is applied on render target pixels. Make it proportional to film pixel. */
635 jitter /= float(scale);
636 /* Offset from the render pixel center to the center of film pixel. */
637 jitter += ((float2(pixel_co) + 0.5f) / scale) - 0.5f;
638 }
639 return jitter;
640}
641
643{
644 if (inst_.is_viewport() && use_reprojection_) {
645 /* Enable motion vector rendering but not the accumulation buffer. */
646 return enabled_passes_ | EEVEE_RENDER_PASS_VECTOR;
647 }
648 return enabled_passes_;
649}
650
652{
653 int result = 0;
654 result += data_.cryptomatte_object_id == -1 ? 0 : 1;
655 result += data_.cryptomatte_asset_id == -1 ? 0 : 1;
656 result += data_.cryptomatte_material_id == -1 ? 0 : 1;
657 return result;
658}
659
661{
662 if (data_.cryptomatte_material_id != -1) {
663 return 3;
664 }
665 if (data_.cryptomatte_asset_id != -1) {
666 return 2;
667 }
668 if (data_.cryptomatte_object_id != -1) {
669 return 1;
670 }
671 return 0;
672}
673
674void Film::update_sample_table()
675{
676 /* Offset in render target pixels. */
678
679 int filter_radius_ceil = ceilf(data_.filter_radius);
680 float filter_radius_sqr = square_f(data_.filter_radius);
681
682 data_.samples_len = 0;
683 if (data_.scaling_factor > 1) {
684 /* For this case there might be no valid samples for some pixels.
685 * Still visit all four neighbors to have the best weight available.
686 * Note that weight is computed on the GPU as it is different for each sample. */
687 /* TODO(fclem): Make it work for filters larger than then scaling_factor. */
688 for (int y = 0; y <= 1; y++) {
689 for (int x = 0; x <= 1; x++) {
690 FilmSample &sample = data_.samples[data_.samples_len];
691 sample.texel = int2(x, y);
692 sample.weight = -1.0f; /* Computed on GPU. */
693 data_.samples_len++;
694 }
695 }
696 data_.samples_weight_total = -1.0f; /* Computed on GPU. */
697 }
698 else if (use_box_filter || data_.filter_radius < 0.01f) {
699 /* Disable gather filtering. */
700 data_.samples[0].texel = int2(0, 0);
701 data_.samples[0].weight = 1.0f;
702 data_.samples_weight_total = 1.0f;
703 data_.samples_len = 1;
704 }
705 /* NOTE: Threshold determined by hand until we don't hit the assert below. */
706 else if (data_.filter_radius < 2.20f) {
707 /* Small filter Size. */
708 int closest_index = 0;
709 float closest_distance = FLT_MAX;
710 data_.samples_weight_total = 0.0f;
711 /* TODO(fclem): For optimization, could try Z-tile ordering. */
712 for (int y = -filter_radius_ceil; y <= filter_radius_ceil; y++) {
713 for (int x = -filter_radius_ceil; x <= filter_radius_ceil; x++) {
714 float2 pixel_offset = float2(x, y) - data_.subpixel_offset;
715 float distance_sqr = math::length_squared(pixel_offset);
716 if (distance_sqr < filter_radius_sqr) {
717 if (data_.samples_len >= FILM_PRECOMP_SAMPLE_MAX) {
718 BLI_assert_msg(0, "Precomputed sample table is too small.");
719 break;
720 }
721 FilmSample &sample = data_.samples[data_.samples_len];
722 sample.texel = int2(x, y);
723 sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
724 data_.samples_weight_total += sample.weight;
725
726 if (distance_sqr < closest_distance) {
727 closest_distance = distance_sqr;
728 closest_index = data_.samples_len;
729 }
730 data_.samples_len++;
731 }
732 }
733 }
734 /* Put the closest one in first position. */
735 if (closest_index != 0) {
736 std::swap(data_.samples[closest_index], data_.samples[0]);
737 }
738 }
739 else {
740 /* Large Filter Size. */
741 MutableSpan<FilmSample> sample_table(data_.samples, FILM_PRECOMP_SAMPLE_MAX);
742 /* To avoid hitting driver TDR and slowing rendering too much we use random sampling. */
743 /* TODO(fclem): This case needs more work. We could distribute the samples better to avoid
744 * loading the same pixel twice. */
745 data_.samples_len = sample_table.size();
746 data_.samples_weight_total = 0.0f;
747
748 int i = 0;
749 for (FilmSample &sample : sample_table) {
750 /* TODO(fclem): Own RNG. */
751 float2 random_2d = inst_.sampling.rng_2d_get(SAMPLING_SSS_U);
752 /* This randomization makes sure we converge to the right result but also makes nearest
753 * neighbor filtering not converging rapidly. */
754 random_2d.x = (random_2d.x + i) / float(FILM_PRECOMP_SAMPLE_MAX);
755
756 float2 pixel_offset = math::floor(Sampling::sample_spiral(random_2d) * data_.filter_radius);
757 sample.texel = int2(pixel_offset);
758
759 float distance_sqr = math::length_squared(pixel_offset - data_.subpixel_offset);
760 sample.weight = film_filter_weight(data_.filter_radius, distance_sqr);
761 data_.samples_weight_total += sample.weight;
762 i++;
763 }
764 }
765}
766
767void Film::accumulate(View &view, GPUTexture *combined_final_tx)
768{
769 if (inst_.is_viewport()) {
773 /* Clear when using render borders. */
774 if (data_.extent != int2(GPU_texture_width(dtxl->color), GPU_texture_height(dtxl->color))) {
775 float4 clear_color = {0.0f, 0.0f, 0.0f, 0.0f};
776 GPU_framebuffer_clear_color(dfbl->default_fb, clear_color);
777 }
778 GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
779 }
780
781 update_sample_table();
782
783 combined_final_tx_ = combined_final_tx;
784
785 data_.display_only = false;
786 inst_.uniform_data.push_update();
787
788 inst_.manager->submit(accumulate_ps_, view);
789 inst_.manager->submit(copy_ps_, view);
790
791 combined_tx_.swap();
792 weight_tx_.swap();
793
794 /* Use history after first sample. */
795 if (data_.use_history == 0) {
796 data_.use_history = 1;
797 }
798}
799
801{
802 BLI_assert(inst_.is_viewport());
803
804 /* Acquire dummy render buffers for correct binding. They will not be used. */
805 inst_.render_buffers.acquire(int2(1));
806
809 GPU_framebuffer_viewport_set(dfbl->default_fb, UNPACK2(data_.offset), UNPACK2(data_.extent));
810
811 combined_final_tx_ = inst_.render_buffers.combined_tx;
812
813 data_.display_only = true;
814 inst_.uniform_data.push_update();
815
816 draw::View drw_view("MainView", DRW_view_default_get());
817
818 DRW_manager_get()->submit(accumulate_ps_, drw_view);
819
820 inst_.render_buffers.release();
821
822 /* IMPORTANT: Do not swap! No accumulation has happened. */
823}
824
826{
827 DRW_manager_get()->submit(cryptomatte_post_ps_);
828}
829
830float *Film::read_pass(eViewLayerEEVEEPassType pass_type, int layer_offset)
831{
832 GPUTexture *pass_tx = this->get_pass_texture(pass_type, layer_offset);
833
835
836 float *result = (float *)GPU_texture_read(pass_tx, GPU_DATA_FLOAT, 0);
837
838 if (pass_is_float3(pass_type)) {
839 /* Convert result in place as we cannot do this conversion on GPU. */
840 for (const int px : IndexRange(GPU_texture_width(pass_tx) * GPU_texture_height(pass_tx))) {
841 *(reinterpret_cast<float3 *>(result) + px) = *(reinterpret_cast<float3 *>(result + px * 4));
842 }
843 }
844
845 return result;
846}
847
848GPUTexture *Film::get_pass_texture(eViewLayerEEVEEPassType pass_type, int layer_offset)
849{
850 ePassStorageType storage_type = pass_storage_type(pass_type);
851 const bool is_value = storage_type == PASS_STORAGE_VALUE;
852 const bool is_cryptomatte = storage_type == PASS_STORAGE_CRYPTOMATTE;
853
854 Texture &accum_tx = (pass_type == EEVEE_RENDER_PASS_COMBINED) ?
855 combined_tx_.current() :
856 (pass_type == EEVEE_RENDER_PASS_Z) ?
857 depth_tx_ :
858 (is_cryptomatte ? cryptomatte_tx_ :
859 (is_value ? value_accum_tx_ : color_accum_tx_));
860
861 int index = pass_id_get(pass_type);
862 if (index == -1) {
863 return nullptr;
864 }
865
866 accum_tx.ensure_layer_views();
867 return accum_tx.layer_view(index + layer_offset);
868}
869
871{
872 return inst_.is_viewport() && DRW_is_viewport_compositor_enabled();
873}
874
875/* Gets the appropriate shader to write the given pass type. This is because passes of different
876 * types are stored in different textures types and formats. */
878{
879 switch (pass_type) {
884 default:
885 break;
886 }
887
888 switch (Film::pass_storage_type(pass_type)) {
895 }
896
898}
899
900/* Gets the appropriate shader to write the given AOV pass. */
902{
903 switch (aov->type) {
904 case AOV_TYPE_VALUE:
906 case AOV_TYPE_COLOR:
908 }
909
911}
912
914{
915 this->cryptomatte_sort();
916
917 /* Write standard passes. */
918 for (const int i : IndexRange(EEVEE_RENDER_PASS_MAX_BIT + 1)) {
920 viewport_compositor_enabled_passes_ & (1 << i));
921 if (pass_type == 0) {
922 continue;
923 }
924
925 /* The compositor will use the viewport color texture as the combined pass because the viewport
926 * texture will include Grease Pencil, so no need to write the combined pass from the engine
927 * side. */
928 if (pass_type == EEVEE_RENDER_PASS_COMBINED) {
929 continue;
930 }
931
932 Vector<std::string> pass_names = Film::pass_to_render_pass_names(pass_type, inst_.view_layer);
933 for (const int64_t pass_offset : IndexRange(pass_names.size())) {
934 GPUTexture *pass_texture = this->get_pass_texture(pass_type, pass_offset);
935 if (!pass_texture) {
936 continue;
937 }
938
939 /* Allocate passes that spans the entire display extent, even when border rendering, then
940 * copy the border region while zeroing the rest. That's because the compositor doesn't have
941 * a distinction between display and data windows at the moment, so it expects passes to have
942 * the extent of the viewport. Furthermore, we still do not support passes from Cycles and
943 * external engines, so the viewport size assumption holds at the compositor side to support
944 * all cases for now. */
945 const char *pass_name = pass_names[pass_offset].c_str();
946 draw::TextureFromPool &output_pass_texture = DRW_viewport_pass_texture_get(pass_name);
947 output_pass_texture.acquire(this->display_extent, GPU_texture_format(pass_texture));
948
949 PassSimple write_pass_ps = {"Film.WriteViewportCompositorPass"};
950 const eShaderType write_shader_type = get_write_pass_shader_type(pass_type);
951 write_pass_ps.shader_set(inst_.shaders.static_shader_get(write_shader_type));
952 write_pass_ps.push_constant("offset", data_.offset);
953 write_pass_ps.bind_texture("input_tx", pass_texture);
954 write_pass_ps.bind_image("output_img", output_pass_texture);
955 write_pass_ps.barrier(GPU_BARRIER_TEXTURE_FETCH);
956 write_pass_ps.dispatch(math::divide_ceil(this->display_extent, int2(FILM_GROUP_SIZE)));
957 inst_.manager->submit(write_pass_ps);
958 }
959 }
960
961 /* Write AOV passes. */
962 LISTBASE_FOREACH (ViewLayerAOV *, aov, &inst_.view_layer->aovs) {
963 if ((aov->flag & AOV_CONFLICT) != 0) {
964 continue;
965 }
966 GPUTexture *pass_texture = this->get_aov_texture(aov);
967 if (!pass_texture) {
968 continue;
969 }
970
971 /* See above comment regarding the allocation extent. */
972 draw::TextureFromPool &output_pass_texture = DRW_viewport_pass_texture_get(aov->name);
973 output_pass_texture.acquire(this->display_extent, GPU_texture_format(pass_texture));
974
975 PassSimple write_pass_ps = {"Film.WriteViewportCompositorPass"};
976 const eShaderType write_shader_type = get_aov_write_pass_shader_type(aov);
977 write_pass_ps.shader_set(inst_.shaders.static_shader_get(write_shader_type));
978 write_pass_ps.push_constant("offset", data_.offset);
979 write_pass_ps.bind_texture("input_tx", pass_texture);
980 write_pass_ps.bind_image("output_img", output_pass_texture);
981 write_pass_ps.barrier(GPU_BARRIER_TEXTURE_FETCH);
982 write_pass_ps.dispatch(math::divide_ceil(this->display_extent, int2(FILM_GROUP_SIZE)));
983 inst_.manager->submit(write_pass_ps);
984 }
985}
986
988
989} // namespace blender::eevee
int BKE_render_preview_pixel_size(const RenderData *r)
Definition scene.cc:2892
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
BLI_INLINE unsigned int BLI_hash_string(const char *str)
Definition BLI_hash.h:71
void * BLI_findstring(const struct ListBase *listbase, const char *id, int offset) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
#define LISTBASE_FOREACH(type, var, list)
MINLINE uint divide_ceil_u(uint a, uint b)
MINLINE float pow2f(float x)
MINLINE float clamp_f(float value, float min, float max)
MINLINE int square_i(int a)
MINLINE float square_f(float a)
#define M_SQRT1_2
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition BLI_rect.h:193
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition rct.c:418
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition BLI_rect.h:189
bool BLI_rcti_is_empty(const struct rcti *rect)
unsigned int uint
#define UNPACK2(a)
#define SET_FLAG_FROM_TEST(value, test, flag)
@ VIEW_LAYER_CRYPTOMATTE_MATERIAL
@ VIEW_LAYER_CRYPTOMATTE_ASSET
@ VIEW_LAYER_CRYPTOMATTE_OBJECT
#define EEVEE_RENDER_PASS_MAX_BIT
eViewLayerEEVEEPassType
@ EEVEE_RENDER_PASS_UNUSED_8
@ EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL
@ EEVEE_RENDER_PASS_AO
@ EEVEE_RENDER_PASS_NORMAL
@ EEVEE_RENDER_PASS_UNUSED_14
@ EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT
@ EEVEE_RENDER_PASS_DIFFUSE_LIGHT
@ EEVEE_RENDER_PASS_VOLUME_LIGHT
@ EEVEE_RENDER_PASS_AOV
@ EEVEE_RENDER_PASS_DIFFUSE_COLOR
@ EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET
@ EEVEE_RENDER_PASS_Z
@ EEVEE_RENDER_PASS_ENVIRONMENT
@ EEVEE_RENDER_PASS_COMBINED
@ EEVEE_RENDER_PASS_SPECULAR_LIGHT
@ EEVEE_RENDER_PASS_VECTOR
@ EEVEE_RENDER_PASS_SPECULAR_COLOR
@ EEVEE_RENDER_PASS_EMIT
@ EEVEE_RENDER_PASS_MIST
@ EEVEE_RENDER_PASS_TRANSPARENT
@ EEVEE_RENDER_PASS_SHADOW
@ EEVEE_RENDER_PASS_POSITION
@ AOV_TYPE_COLOR
@ AOV_TYPE_VALUE
@ AOV_CONFLICT
struct ViewLayerAOV ViewLayerAOV
@ R_ALPHAPREMUL
@ R_MBLUR
@ WO_MIST_QUADRATIC
@ WO_MIST_INVERSE_QUADRATIC
@ WO_MIST_LINEAR
static AppView * view
void GPU_framebuffer_bind(GPUFrameBuffer *framebuffer)
void GPU_framebuffer_clear_color(GPUFrameBuffer *fb, const float clear_col[4])
void GPU_framebuffer_viewport_set(GPUFrameBuffer *framebuffer, int x, int y, int width, int height)
@ GPU_DRIVER_ANY
@ GPU_OS_MAC
@ GPU_DEVICE_INTEL
bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
@ GPU_PRIM_TRIS
void GPU_memory_barrier(eGPUBarrier barrier)
Definition gpu_state.cc:374
@ GPU_BARRIER_TEXTURE_FETCH
Definition GPU_state.hh:37
@ GPU_BARRIER_SHADER_IMAGE_ACCESS
Definition GPU_state.hh:35
@ GPU_BARRIER_TEXTURE_UPDATE
Definition GPU_state.hh:39
int GPU_texture_height(const GPUTexture *texture)
int GPU_texture_width(const GPUTexture *texture)
void * GPU_texture_read(GPUTexture *texture, eGPUDataFormat data_format, int mip_level)
@ GPU_DATA_FLOAT
eGPUTextureFormat
@ GPU_SAMPLER_FILTERING_LINEAR
eGPUTextureFormat GPU_texture_format(const GPUTexture *texture)
#define Z
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Gabor Generate Gabor noise Gradient Generate interpolated color and intensity values based on the input vector Magic Generate a psychedelic color texture Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a or normal between world
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value NORMAL
struct GPUShader GPUShader
void reset()
clear internal cached data and reset random seed
bool contains(const Key &key) const
Definition BLI_set.hh:291
int64_t size() const
void append(const T &value)
const T & last(const int64_t n=0) const
bool is_empty() const
bool contains(const Key &key) const
Definition BLI_set.hh:291
int64_t size() const
void submit(PassSimple &pass, View &view)
void acquire(int2 extent, eGPUTextureFormat format, eGPUTextureUsage usage=GPU_TEXTURE_USAGE_GENERAL)
bool ensure_layer_views(bool cube_as_array=false)
GPUTexture * layer_view(int layer)
void bind_texture(const char *name, GPUTexture *texture, GPUSamplerState state=sampler_auto)
void bind_resources(U &resources)
Definition draw_pass.hh:426
void bind_image(const char *name, GPUTexture *image)
void specialize_constant(GPUShader *shader, const char *name, const float &data)
void dispatch(int group_len)
Definition draw_pass.hh:874
void barrier(eGPUBarrier type)
Definition draw_pass.hh:943
void state_set(DRWState state, int clip_plane_count=0)
Definition draw_pass.hh:954
void bind_ubo(const char *name, GPUUniformBuf *buffer)
void push_constant(const char *name, const float &data)
void shader_set(GPUShader *shader)
Definition draw_pass.hh:971
const CameraData & data_get() const
float2 pixel_jitter_get() const
int pass_id_get(eViewLayerEEVEEPassType pass_type) const
static bool pass_is_float3(eViewLayerEEVEEPassType pass_type)
GPUTexture * get_aov_texture(ViewLayerAOV *aov)
void accumulate(View &view, GPUTexture *combined_final_tx)
static const Vector< std::string > pass_to_render_pass_names(eViewLayerEEVEEPassType pass_type, const ViewLayer *view_layer)
int cryptomatte_layer_max_get() const
int cryptomatte_layer_len_get() const
static ePassStorageType pass_storage_type(eViewLayerEEVEEPassType pass_type)
float * read_pass(eViewLayerEEVEEPassType pass_type, int layer_offset)
void write_viewport_compositor_passes()
static int overscan_pixels_get(float overscan, int2 extent)
bool is_viewport_compositor_enabled() const
AOVsInfoDataBuf aovs_info
Definition eevee_film.hh:53
void init(const int2 &full_extent, const rcti *output_rect)
float * read_aov(ViewLayerAOV *aov)
Definition eevee_film.cc:98
GPUTexture * get_pass_texture(eViewLayerEEVEEPassType pass_type, int layer_offset)
eViewLayerEEVEEPassType enabled_passes_get() const
static constexpr bool use_box_filter
Definition eevee_film.hh:55
UniformDataModule uniform_data
static float2 sample_disk(const float2 &rand)
uint64_t sample_count() const
static float2 sample_spiral(const float2 &rand)
local_group_size(16, 16) .push_constant(Type b
additional_info("compositor_sum_float_shared") .push_constant(Type additional_info("compositor_sum_float_shared") .push_constant(Type GPU_RGBA32F
#define ceilf(x)
#define offsetof(t, d)
#define fabsf(x)
blender::draw::Manager * DRW_manager_get()
DefaultFramebufferList * DRW_viewport_framebuffer_list_get()
blender::draw::TextureFromPool & DRW_viewport_pass_texture_get(const char *pass_name)
DefaultTextureList * DRW_viewport_texture_list_get()
bool DRW_is_viewport_compositor_enabled()
const DRWView * DRW_view_default_get()
@ DRW_STATE_NO_DRAW
Definition draw_state.hh:27
@ DRW_STATE_WRITE_DEPTH
Definition draw_state.hh:29
@ DRW_STATE_WRITE_COLOR
Definition draw_state.hh:30
@ DRW_STATE_DEPTH_ALWAYS
Definition draw_state.hh:36
#define FILM_GROUP_SIZE
#define ENABLE_FROM_LEGACY(name_legacy, name_eevee)
draw_view in_light_buf[] float
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
#define AOV_MAX
#define FILM_PRECOMP_SAMPLE_MAX
DO_INLINE void filter(lfVector *V, fmatrix3x3 *S)
Set< std::string > get_used_passes(const Scene &scene, const ViewLayer *view_layer)
detail::Pass< command::DrawCommandBuf > PassSimple
@ FILM_PASS_CONVERT_CRYPTOMATTE
static eShaderType get_write_pass_shader_type(eViewLayerEEVEEPassType pass_type)
static float film_filter_weight(float filter_radius, float sample_distance_sqr)
bool operator!=(const CameraData &a, const CameraData &b)
static eShaderType get_aov_write_pass_shader_type(const ViewLayerAOV *aov)
static eViewLayerEEVEEPassType enabled_passes(const ViewLayer *view_layer)
bool operator==(const CameraData &a, const CameraData &b)
static eViewLayerEEVEEPassType get_viewport_compositor_enabled_passes(const Set< std::string > &viewport_compositor_needed_passes, const ViewLayer *view_layer)
T length_squared(const VecBase< T, Size > &a)
MatBase< T, NumCol, NumRow > scale(const MatBase< T, NumCol, NumRow > &mat, const VectorT &scale)
T floor(const T &a)
VecBase< T, Size > divide_ceil(const VecBase< T, Size > &a, const VecBase< T, Size > &b)
void index(const bNode &, void *r_value)
VecBase< uint32_t, 4 > uint4
VecBase< float, 4 > float4
VecBase< int32_t, 2 > int2
VecBase< float, 2 > float2
VecBase< int32_t, 3 > int3
VecBase< float, 3 > float3
#define hash
Definition noise.c:154
#define FLT_MAX
Definition stdcycles.h:14
__int64 int64_t
Definition stdint.h:89
unsigned __int64 uint64_t
Definition stdint.h:90
GPUFrameBuffer * default_fb
ColorManagedViewSettings view_settings
struct RenderData r
struct World * world
struct ViewLayerEEVEE eevee
short cryptomatte_flag
FilmSample samples[FILM_PRECOMP_SAMPLE_MAX]
float x
int ymin
int xmin