Blender V4.5
draw_command.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2022 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "GPU_batch.hh"
10#include "GPU_capabilities.hh"
11#include "GPU_compute.hh"
12#include "GPU_debug.hh"
13
14#include "draw_command.hh"
15#include "draw_pass.hh"
16#include "draw_shader.hh"
17#include "draw_view.hh"
18
19#include <bitset>
20#include <sstream>
21
23
24static gpu::Batch *procedural_batch_get(GPUPrimType primitive)
25{
26 switch (primitive) {
27 case GPU_PRIM_POINTS:
29 case GPU_PRIM_LINES:
31 case GPU_PRIM_TRIS:
35 default:
36 /* Add new one as needed. */
38 return nullptr;
39 }
40}
41
42/* -------------------------------------------------------------------- */
45
47{
48 state.shader_use_specialization = !GPU_shader_get_default_constant_state(shader).is_empty();
49 if (assign_if_different(state.shader, shader) || state.shader_use_specialization) {
50 GPU_shader_bind(shader, state.specialization_constants_get());
51 }
52 /* Signal that we can reload the default for a different specialization later on.
53 * However, we keep the specialization_constants state around for compute shaders. */
54 state.specialization_constants_in_use = false;
55}
56
61
63{
64 /* TODO(fclem): Require framebuffer bind to always be part of the pass so that we can track it
65 * inside RecordingState. */
66 GPUFrameBuffer *framebuffer = GPU_framebuffer_active_get();
67 /* Unpack to the real enum type. */
68 const GPUAttachmentState states[9] = {
78 };
79 GPU_framebuffer_subpass_transition_array(framebuffer, states, ARRAY_SIZE(states));
80}
81
114
135
137{
138 /* All specialization constants should exist as they are not optimized out like uniforms. */
139 BLI_assert(location != -1);
140
141 if (state.specialization_constants_in_use == false) {
142 state.specialization_constants = GPU_shader_get_default_constant_state(this->shader);
143 state.specialization_constants_in_use = true;
144 }
145
146 switch (type) {
148 state.specialization_constants.set_value(location, int_value);
149 break;
151 state.specialization_constants.set_value(location, *int_ref);
152 break;
154 state.specialization_constants.set_value(location, uint_value);
155 break;
157 state.specialization_constants.set_value(location, *uint_ref);
158 break;
160 state.specialization_constants.set_value(location, float_value);
161 break;
163 state.specialization_constants.set_value(location, *float_ref);
164 break;
166 state.specialization_constants.set_value(location, bool_value);
167 break;
169 state.specialization_constants.set_value(location, *bool_ref);
170 break;
171 }
172}
173
175{
176 state.front_facing_set(handle.has_inverted_handedness());
177
178 if (GPU_shader_draw_parameters_support() == false) {
179 GPU_batch_resource_id_buf_set(batch, state.resource_id_buf);
180 }
181
182 /* Use same logic as in `finalize_commands`. */
183 uint instance_first = 0;
184 if (handle.raw > 0) {
185 instance_first = state.instance_offset;
186 state.instance_offset += instance_len;
187 }
188
190
192 /* Expanded draw-call. */
194 batch->prim_type,
199
200 if (expanded_range.is_empty()) {
201 /* Nothing to draw, and can lead to asserts in GPU_batch_bind_as_resources. */
202 return;
203 }
204
205 GPU_batch_bind_as_resources(batch, state.shader, state.specialization_constants_get());
206
207 gpu::Batch *gpu_batch = procedural_batch_get(GPUPrimType(expand_prim_type));
208 GPU_batch_set_shader(gpu_batch, state.shader, state.specialization_constants_get());
210 gpu_batch, expanded_range.start(), expanded_range.size(), instance_first, instance_len);
211 }
212 else {
213 /* Regular draw-call. */
214 GPU_batch_set_shader(batch, state.shader, state.specialization_constants_get());
216 }
217}
218
220{
221 DrawMultiBuf::DrawCommandBuf &indirect_buf = multi_draw_buf->command_buf_;
222 DrawMultiBuf::DrawGroupBuf &groups = multi_draw_buf->group_buf_;
223
224 uint group_index = this->group_first;
225 while (group_index != uint(-1)) {
226 const DrawGroup &group = groups[group_index];
227
228 if (group.vertex_len > 0) {
229 gpu::Batch *batch = group.desc.gpu_batch;
230
232 /* Bind original batch as resource and use a procedural batch to issue the draw-call. */
234 group.desc.gpu_batch, state.shader, state.specialization_constants_get());
236 }
237
238 if (GPU_shader_draw_parameters_support() == false) {
239 GPU_batch_resource_id_buf_set(batch, state.resource_id_buf);
240 }
241
242 GPU_batch_set_shader(batch, state.shader, state.specialization_constants_get());
243
244 constexpr intptr_t stride = sizeof(DrawCommand);
245 /* We have 2 indirect command reserved per draw group. */
246 intptr_t offset = stride * group_index * 2;
247
248 /* Draw negatively scaled geometry first. */
249 if (group.len - group.front_facing_len > 0) {
250 state.front_facing_set(true);
251 GPU_batch_draw_indirect(batch, indirect_buf, offset);
252 }
253
254 if (group.front_facing_len > 0) {
255 state.front_facing_set(false);
256 GPU_batch_draw_indirect(batch, indirect_buf, offset + stride);
257 }
258 }
259
260 group_index = group.next;
261 }
262}
263
265{
266 state.front_facing_set(handle.has_inverted_handedness());
267
269}
270
272{
273 if (is_reference) {
275 state.shader, size_ref->x, size_ref->y, size_ref->z, state.specialization_constants_get());
276 }
277 else {
279 state.shader, size.x, size.y, size.z, state.specialization_constants_get());
280 }
281}
282
284{
285 GPU_compute_dispatch_indirect(state.shader, *indirect_buf, state.specialization_constants_get());
286}
287
289{
291}
292
298
300{
301 GPUFrameBuffer *fb = GPU_framebuffer_active_get();
302 GPU_framebuffer_multi_clear(fb, (const float(*)[4])colors);
303}
304
305void StateSet::execute(RecordingState &recording_state) const
306{
307 bool state_changed = assign_if_different(recording_state.pipeline_state, new_state);
308 bool clip_changed = assign_if_different(recording_state.clip_plane_count, clip_plane_count);
309
310 if (!state_changed && !clip_changed) {
311 return;
312 }
313
321
324 }
325 else {
327 }
328
330 GPU_shadow_offset(true);
331 }
332 else {
333 GPU_shadow_offset(false);
334 }
335
336 /* TODO: this should be part of shader state. */
337 GPU_clip_distances(recording_state.clip_plane_count);
338
340 /* XXX `GPU_depth_range` is not a perfect solution
341 * since very distant geometries can still be occluded.
342 * Also the depth test precision of these geometries is impaired.
343 * However, it solves the selection for the vast majority of cases. */
344 GPU_depth_range(0.0f, 0.01f);
345 }
346 else {
347 GPU_depth_range(0.0f, 1.0f);
348 }
349
352 }
353 else {
355 }
356}
357
359{
360 RecordingState recording_state;
361 StateSet{state, 0}.execute(recording_state);
362
363 /* This function is used for cleaning the state for the viewport drawing.
364 * Make sure to reset textures resources to avoid feedback loop when rendering (see #131652). */
369
370 /* Remained of legacy draw manager. Kept it to avoid regression, but might become unneeded. */
372 GPU_line_smooth(false);
373 GPU_line_width(0.0f);
374}
375
382
384
385/* -------------------------------------------------------------------- */
388
389std::string ShaderBind::serialize() const
390{
391 return std::string(".shader_bind(") + GPU_shader_get_name(shader) + ")";
392}
393
394std::string FramebufferBind::serialize() const
395{
396 return std::string(".framebuffer_bind(") +
397 (*framebuffer == nullptr ? "nullptr" : GPU_framebuffer_get_name(*framebuffer)) + ")";
398}
399
401{
402 auto to_str = [](GPUAttachmentState state) {
403 return (state != GPU_ATTACHMENT_IGNORE) ?
404 ((state == GPU_ATTACHMENT_WRITE) ? "write" : "read") :
405 "ignore";
406 };
407
408 return std::string(".subpass_transition(\n") +
409 "depth=" + to_str(GPUAttachmentState(depth_state)) + ",\n" +
410 "color0=" + to_str(GPUAttachmentState(color_states[0])) + ",\n" +
411 "color1=" + to_str(GPUAttachmentState(color_states[1])) + ",\n" +
412 "color2=" + to_str(GPUAttachmentState(color_states[2])) + ",\n" +
413 "color3=" + to_str(GPUAttachmentState(color_states[3])) + ",\n" +
414 "color4=" + to_str(GPUAttachmentState(color_states[4])) + ",\n" +
415 "color5=" + to_str(GPUAttachmentState(color_states[5])) + ",\n" +
416 "color6=" + to_str(GPUAttachmentState(color_states[6])) + ",\n" +
417 "color7=" + to_str(GPUAttachmentState(color_states[7])) + "\n)";
418}
419
420std::string ResourceBind::serialize() const
421{
422 switch (type) {
423 case Type::Sampler:
424 return std::string(".bind_texture") + (is_reference ? "_ref" : "") + "(" +
425 std::to_string(slot) + ", sampler=" + sampler.to_string() + ")";
427 return std::string(".bind_vertbuf_as_texture") + (is_reference ? "_ref" : "") + "(" +
428 std::to_string(slot) + ")";
429 case Type::Image:
430 return std::string(".bind_image") + (is_reference ? "_ref" : "") + "(" +
431 std::to_string(slot) + ")";
432 case Type::UniformBuf:
433 return std::string(".bind_uniform_buf") + (is_reference ? "_ref" : "") + "(" +
434 std::to_string(slot) + ")";
435 case Type::StorageBuf:
436 return std::string(".bind_storage_buf") + (is_reference ? "_ref" : "") + "(" +
437 std::to_string(slot) + ")";
439 return std::string(".bind_uniform_as_ssbo") + (is_reference ? "_ref" : "") + "(" +
440 std::to_string(slot) + ")";
442 return std::string(".bind_vertbuf_as_ssbo") + (is_reference ? "_ref" : "") + "(" +
443 std::to_string(slot) + ")";
445 return std::string(".bind_indexbuf_as_ssbo") + (is_reference ? "_ref" : "") + "(" +
446 std::to_string(slot) + ")";
447 default:
449 return "";
450 }
451}
452
453std::string PushConstant::serialize() const
454{
455 std::stringstream ss;
456 for (int i = 0; i < array_len; i++) {
457 switch (comp_len) {
458 case 1:
459 switch (type) {
460 case Type::IntValue:
461 ss << int1_value;
462 break;
464 ss << int_ref[i];
465 break;
466 case Type::FloatValue:
467 ss << float1_value;
468 break;
470 ss << float_ref[i];
471 break;
472 }
473 break;
474 case 2:
475 switch (type) {
476 case Type::IntValue:
477 ss << int2_value;
478 break;
480 ss << int2_ref[i];
481 break;
482 case Type::FloatValue:
483 ss << float2_value;
484 break;
486 ss << float2_ref[i];
487 break;
488 }
489 break;
490 case 3:
491 switch (type) {
492 case Type::IntValue:
493 ss << int3_value;
494 break;
496 ss << int3_ref[i];
497 break;
498 case Type::FloatValue:
499 ss << float3_value;
500 break;
502 ss << float3_ref[i];
503 break;
504 }
505 break;
506 case 4:
507 switch (type) {
508 case Type::IntValue:
509 ss << int4_value;
510 break;
512 ss << int4_ref[i];
513 break;
514 case Type::FloatValue:
515 ss << float4_value;
516 break;
518 ss << float4_ref[i];
519 break;
520 }
521 break;
522 case 16:
523 switch (type) {
524 case Type::IntValue:
527 break;
528 case Type::FloatValue:
529 ss << float4x4(
530 (&float4_value)[0], (&float4_value)[1], (&float4_value)[2], (&float4_value)[3]);
531 break;
533 ss << *float4x4_ref;
534 break;
535 }
536 break;
537 }
538 if (i < array_len - 1) {
539 ss << ", ";
540 }
541 }
542
543 return std::string(".push_constant(") + std::to_string(location) + ", data=" + ss.str() + ")";
544}
545
547{
548 std::stringstream ss;
549 switch (type) {
550 case Type::IntValue:
551 ss << int_value;
552 break;
553 case Type::UintValue:
554 ss << uint_value;
555 break;
556 case Type::FloatValue:
557 ss << float_value;
558 break;
559 case Type::BoolValue:
560 ss << bool_value;
561 break;
563 ss << *int_ref;
564 break;
566 ss << *uint_ref;
567 break;
569 ss << *float_ref;
570 break;
572 ss << *bool_ref;
573 break;
574 }
575 return std::string(".specialize_constant(") + std::to_string(location) + ", data=" + ss.str() +
576 ")";
577}
578
579std::string Draw::serialize() const
580{
581 std::string inst_len = std::to_string(instance_len);
582 std::string vert_len = (vertex_len == uint(-1)) ? "from_batch" : std::to_string(vertex_len);
583 std::string vert_first = (vertex_first == uint(-1)) ? "from_batch" :
584 std::to_string(vertex_first);
585 return std::string(".draw(inst_len=") + inst_len + ", vert_len=" + vert_len +
586 ", vert_first=" + vert_first + ", res_id=" + std::to_string(handle.resource_index()) +
587 ")";
588}
589
590std::string DrawMulti::serialize(const std::string &line_prefix) const
591{
592 DrawMultiBuf::DrawGroupBuf &groups = multi_draw_buf->group_buf_;
593
594 MutableSpan<DrawPrototype> prototypes(multi_draw_buf->prototype_buf_.data(),
595 multi_draw_buf->prototype_count_);
596
597 /* This emulates the GPU sorting but without the unstable draw order. */
598 std::sort(
599 prototypes.begin(), prototypes.end(), [](const DrawPrototype &a, const DrawPrototype &b) {
600 return (a.group_id < b.group_id) ||
601 (a.group_id == b.group_id && a.res_handle > b.res_handle);
602 });
603
604 /* Compute prefix sum to have correct offsets. */
605 uint prefix_sum = 0u;
606 for (DrawGroup &group : groups) {
607 group.start = prefix_sum;
608 prefix_sum += group.front_facing_counter + group.back_facing_counter;
609 }
610
611 std::stringstream ss;
612
613 uint group_len = 0;
614 uint group_index = this->group_first;
615 while (group_index != uint(-1)) {
616 const DrawGroup &grp = groups[group_index];
617
618 ss << std::endl << line_prefix << " .group(id=" << group_index << ", len=" << grp.len << ")";
619
620 intptr_t offset = grp.start;
621
622 if (grp.back_facing_counter > 0) {
623 for (DrawPrototype &proto : prototypes.slice_safe({offset, grp.back_facing_counter})) {
624 BLI_assert(proto.group_id == group_index);
625 ResourceHandle handle(proto.res_handle);
627 ss << std::endl
628 << line_prefix << " .proto(instance_len=" << std::to_string(proto.instance_len)
629 << ", resource_id=" << std::to_string(handle.resource_index()) << ", back_face)";
630 }
631 offset += grp.back_facing_counter;
632 }
633
634 if (grp.front_facing_counter > 0) {
635 for (DrawPrototype &proto : prototypes.slice_safe({offset, grp.front_facing_counter})) {
636 BLI_assert(proto.group_id == group_index);
637 ResourceHandle handle(proto.res_handle);
639 ss << std::endl
640 << line_prefix << " .proto(instance_len=" << std::to_string(proto.instance_len)
641 << ", resource_id=" << std::to_string(handle.resource_index()) << ", front_face)";
642 }
643 }
644
645 group_index = grp.next;
646 group_len++;
647 }
648
649 ss << std::endl;
650
651 return line_prefix + ".draw_multi(" + std::to_string(group_len) + ")" + ss.str();
652}
653
654std::string DrawIndirect::serialize() const
655{
656 return std::string(".draw_indirect()");
657}
658
659std::string Dispatch::serialize() const
660{
661 int3 sz = is_reference ? *size_ref : size;
662 return std::string(".dispatch") + (is_reference ? "_ref" : "") + "(" + std::to_string(sz.x) +
663 ", " + std::to_string(sz.y) + ", " + std::to_string(sz.z) + ")";
664}
665
667{
668 return std::string(".dispatch_indirect()");
669}
670
671std::string Barrier::serialize() const
672{
673 /* TODO(@fclem): Better serialization... */
674 return std::string(".barrier(") + std::to_string(type) + ")";
675}
676
677std::string Clear::serialize() const
678{
679 std::stringstream ss;
681 ss << "color=" << color;
683 ss << ", ";
684 }
685 }
687 ss << "depth=" << depth;
689 ss << ", ";
690 }
691 }
693 ss << "stencil=0b" << std::bitset<8>(stencil) << ")";
694 }
695 return std::string(".clear(") + ss.str() + ")";
696}
697
698std::string ClearMulti::serialize() const
699{
700 std::stringstream ss;
702 ss << color << ", ";
703 }
704 return std::string(".clear_multi(colors={") + ss.str() + "})";
705}
706
707std::string StateSet::serialize() const
708{
709 /* TODO(@fclem): Better serialization... */
710 return std::string(".state_set(") + std::to_string(new_state) + ")";
711}
712
713std::string StencilSet::serialize() const
714{
715 std::stringstream ss;
716 ss << ".stencil_set(write_mask=0b" << std::bitset<8>(write_mask) << ", reference=0b"
717 << std::bitset<8>(reference) << ", compare_mask=0b" << std::bitset<8>(compare_mask) << ")";
718 return ss.str();
719}
720
722
723/* -------------------------------------------------------------------- */
726
727void DrawCommandBuf::finalize_commands(Vector<Header, 0> &headers,
728 Vector<Undetermined, 0> &commands,
729 SubPassVector &sub_passes,
730 uint &resource_id_count,
731 ResourceIdBuf &resource_id_buf)
732{
733 for (const Header &header : headers) {
734 if (header.type == Type::SubPass) {
736 auto &sub = sub_passes[int64_t(header.index)];
737 finalize_commands(
738 sub.headers_, sub.commands_, sub_passes, resource_id_count, resource_id_buf);
739 }
740
741 if (header.type != Type::Draw) {
742 continue;
743 }
744
745 Draw &cmd = commands[header.index].draw;
746
747 int batch_vert_len, batch_vert_first, batch_base_index, batch_inst_len;
748 /* Now that GPUBatches are guaranteed to be finished, extract their parameters. */
750 cmd.batch, &batch_vert_len, &batch_vert_first, &batch_base_index, &batch_inst_len);
751 /* Instancing attributes are not supported using the new pipeline since we use the base
752 * instance to set the correct resource_id. Workaround is a storage_buf + gl_InstanceID. */
753 BLI_assert(batch_inst_len == 1);
754
755 if (cmd.vertex_len == uint(-1)) {
756 cmd.vertex_len = batch_vert_len;
757 }
758
759 /* NOTE: Only do this if a handle is present. If a draw-call is using instancing with null
760 * handle, the shader should not rely on `resource_id` at ***all***. This allows procedural
761 * instanced draw-calls with lots of instances with no overhead. */
762 /* TODO(fclem): Think about either fixing this feature or removing support for instancing all
763 * together. */
764 if (cmd.handle.raw > 0) {
765 /* Save correct offset to start of resource_id buffer region for this draw. */
766 uint instance_first = resource_id_count;
767 resource_id_count += cmd.instance_len;
768 /* Ensure the buffer is big enough. */
769 resource_id_buf.get_or_resize(resource_id_count - 1);
770
771 /* Copy the resource id for all instances. */
772 uint index = cmd.handle.resource_index();
773 for (int i = instance_first; i < (instance_first + cmd.instance_len); i++) {
774 resource_id_buf[i] = index;
775 }
776 }
777 }
778}
779
781 Vector<Undetermined, 0> &commands,
782 SubPassVector &sub_passes)
783{
784 /* First instance ID contains the null handle with identity transform.
785 * This is referenced for draw-calls with no handle. */
786 resource_id_buf_.get_or_resize(0) = 0;
787 resource_id_count_ = 1;
788 finalize_commands(headers, commands, sub_passes, resource_id_count_, resource_id_buf_);
789 resource_id_buf_.push_update();
790}
791
793{
794 if (GPU_shader_draw_parameters_support() == false) {
795 state.resource_id_buf = resource_id_buf_;
796 }
797 else {
798 GPU_storagebuf_bind(resource_id_buf_, DRW_RESOURCE_ID_SLOT);
799 }
800}
801
803 Vector<Undetermined, 0> & /*commands*/,
804 VisibilityBuf &visibility_buf,
805 int visibility_word_per_draw,
806 int view_len,
807 bool use_custom_ids)
808{
809 GPU_debug_group_begin("DrawMultiBuf.bind");
810
811 resource_id_count_ = 0u;
812 for (DrawGroup &group : MutableSpan<DrawGroup>(group_buf_.data(), group_count_)) {
813 /* Compute prefix sum of all instance of previous group. */
814 group.start = resource_id_count_;
815 resource_id_count_ += group.len;
816
817 int batch_vert_len, batch_vert_first, batch_base_index, batch_inst_len;
818 /* Now that GPUBatches are guaranteed to be finished, extract their parameters. */
819 GPU_batch_draw_parameter_get(group.desc.gpu_batch,
820 &batch_vert_len,
821 &batch_vert_first,
822 &batch_base_index,
823 &batch_inst_len);
824
825 group.vertex_len = group.desc.vertex_len == 0 ? batch_vert_len : group.desc.vertex_len;
826 group.vertex_first = group.desc.vertex_first == -1 ? batch_vert_first :
827 group.desc.vertex_first;
828 group.base_index = batch_base_index;
829 /* Instancing attributes are not supported using the new pipeline since we use the base
830 * instance to set the correct resource_id. Workaround is a storage_buf + gl_InstanceID. */
831 BLI_assert(batch_inst_len == 1);
832 UNUSED_VARS_NDEBUG(batch_inst_len);
833
834 if (group.desc.expand_prim_type != GPU_PRIM_NONE) {
835 /* Expanded draw-call. */
837 group.desc.gpu_batch->prim_type,
838 GPUPrimType(group.desc.expand_prim_type),
839 group.vertex_len,
840 group.vertex_first,
841 group.desc.expand_prim_len);
842
843 group.vertex_first = vert_range.start();
844 group.vertex_len = vert_range.size();
845 /* Override base index to -1 as the generated draw-call will not use an index buffer and do
846 * the indirection manually inside the shader. */
847 group.base_index = -1;
848 }
849
850 /* Reset counters to 0 for the GPU. */
851 group.total_counter = group.front_facing_counter = group.back_facing_counter = 0;
852 }
853
854 group_buf_.push_update();
855 prototype_buf_.push_update();
856 /* Allocate enough for the expansion pass. */
857 resource_id_buf_.get_or_resize(resource_id_count_ * view_len * (use_custom_ids ? 2 : 1));
858 /* Two commands per group (inverted and non-inverted scale). */
859 command_buf_.get_or_resize(group_count_ * 2);
860
861 if (prototype_count_ > 0) {
862 GPUShader *shader = DRW_shader_draw_command_generate_get();
863 GPU_shader_bind(shader);
864 GPU_shader_uniform_1i(shader, "prototype_len", prototype_count_);
865 GPU_shader_uniform_1i(shader, "visibility_word_per_draw", visibility_word_per_draw);
866 GPU_shader_uniform_1i(shader, "view_len", view_len);
867 GPU_shader_uniform_1i(shader, "view_shift", log2_ceil_u(view_len));
868 GPU_shader_uniform_1b(shader, "use_custom_ids", use_custom_ids);
869 GPU_storagebuf_bind(group_buf_, GPU_shader_get_ssbo_binding(shader, "group_buf"));
870 GPU_storagebuf_bind(visibility_buf, GPU_shader_get_ssbo_binding(shader, "visibility_buf"));
871 GPU_storagebuf_bind(prototype_buf_, GPU_shader_get_ssbo_binding(shader, "prototype_buf"));
872 GPU_storagebuf_bind(command_buf_, GPU_shader_get_ssbo_binding(shader, "command_buf"));
873 GPU_storagebuf_bind(resource_id_buf_, DRW_RESOURCE_ID_SLOT);
874 GPU_compute_dispatch(shader, divide_ceil_u(prototype_count_, DRW_COMMAND_GROUP_SIZE), 1, 1);
875 /* TODO(@fclem): Investigate moving the barrier in the bind function. */
876 if (GPU_shader_draw_parameters_support() == false) {
878 }
879 else {
881 }
883 }
884
886}
887
889{
890 if (GPU_shader_draw_parameters_support() == false) {
891 state.resource_id_buf = resource_id_buf_;
892 }
893 else {
894 GPU_storagebuf_bind(resource_id_buf_, DRW_RESOURCE_ID_SLOT);
895 }
896}
897
899
900}; // namespace blender::draw::command
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE unsigned int log2_ceil_u(unsigned int x)
MINLINE uint divide_ceil_u(uint a, uint b)
unsigned int uint
#define ARRAY_SIZE(arr)
#define UNUSED_VARS_NDEBUG(...)
blender::gpu::Batch * GPU_batch_procedural_triangle_strips_get()
Definition gpu_batch.cc:548
blender::gpu::Batch * GPU_batch_procedural_lines_get()
Definition gpu_batch.cc:538
void GPU_batch_draw_advanced(blender::gpu::Batch *batch, int vertex_first, int vertex_count, int instance_first, int instance_count)
blender::IndexRange GPU_batch_draw_expanded_parameter_get(GPUPrimType input_prim_type, GPUPrimType output_prim_type, int vertex_count, int vertex_first, int output_primitive_cout)
Definition gpu_batch.cc:358
void GPU_batch_draw_indirect(blender::gpu::Batch *batch, GPUStorageBuf *indirect_buf, intptr_t offset)
void GPU_batch_resource_id_buf_set(blender::gpu::Batch *batch, GPUStorageBuf *resource_id_buf)
blender::gpu::Batch * GPU_batch_procedural_points_get()
Definition gpu_batch.cc:533
void GPU_batch_bind_as_resources(blender::gpu::Batch *batch, GPUShader *shader, const blender::gpu::shader::SpecializationConstants *constants=nullptr)
blender::gpu::Batch * GPU_batch_procedural_triangles_get()
Definition gpu_batch.cc:543
void GPU_batch_set_shader(blender::gpu::Batch *batch, GPUShader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_batch_draw_parameter_get(blender::gpu::Batch *batch, int *r_vertex_count, int *r_vertex_first, int *r_base_index, int *r_instance_count)
bool GPU_shader_draw_parameters_support()
GPUAttachmentState
@ GPU_ATTACHMENT_WRITE
@ GPU_ATTACHMENT_IGNORE
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_debug_group_end()
Definition gpu_debug.cc:33
void GPU_debug_group_begin(const char *name)
Definition gpu_debug.cc:22
const char * GPU_framebuffer_get_name(GPUFrameBuffer *fb)
GPUFrameBuffer * GPU_framebuffer_active_get()
eGPUFrameBufferBits
@ GPU_DEPTH_BIT
@ GPU_STENCIL_BIT
@ GPU_COLOR_BIT
void GPU_framebuffer_subpass_transition_array(GPUFrameBuffer *fb, const GPUAttachmentState *attachment_states, uint attachment_len)
void GPU_framebuffer_bind(GPUFrameBuffer *fb)
void GPU_framebuffer_multi_clear(GPUFrameBuffer *fb, const float(*clear_colors)[4])
void GPU_framebuffer_clear(GPUFrameBuffer *fb, eGPUFrameBufferBits buffers, const float clear_col[4], float clear_depth, unsigned int clear_stencil)
void GPU_indexbuf_bind_as_ssbo(blender::gpu::IndexBuf *elem, int binding)
GPUPrimType
@ GPU_PRIM_NONE
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_TRI_STRIP
@ GPU_PRIM_TRIS
const char * GPU_shader_get_name(GPUShader *shader)
void GPU_shader_uniform_1i(GPUShader *sh, const char *name, int value)
void GPU_shader_uniform_int_ex(GPUShader *shader, int location, int length, int array_size, const int *value)
void GPU_shader_bind(GPUShader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
int GPU_shader_get_ssbo_binding(GPUShader *shader, const char *name)
void GPU_shader_uniform_float_ex(GPUShader *shader, int location, int length, int array_size, const float *value)
void GPU_shader_uniform_1b(GPUShader *sh, const char *name, bool value)
const blender::gpu::shader::SpecializationConstants & GPU_shader_get_default_constant_state(GPUShader *sh)
void GPU_memory_barrier(eGPUBarrier barrier)
Definition gpu_state.cc:385
void GPU_program_point_size(bool enable)
Definition gpu_state.cc:180
void GPU_line_width(float width)
Definition gpu_state.cc:166
void GPU_line_smooth(bool enable)
Definition gpu_state.cc:78
void GPU_stencil_write_mask_set(uint write_mask)
Definition gpu_state.cc:210
void GPU_depth_range(float near, float far)
Definition gpu_state.cc:159
void GPU_stencil_reference_set(uint reference)
Definition gpu_state.cc:205
@ GPU_BARRIER_SHADER_STORAGE
Definition GPU_state.hh:48
@ GPU_BARRIER_VERTEX_ATTRIB_ARRAY
Definition GPU_state.hh:50
void GPU_stencil_compare_mask_set(uint compare_mask)
Definition gpu_state.cc:215
void GPU_point_size(float size)
Definition gpu_state.cc:172
void GPU_state_set(eGPUWriteMask write_mask, eGPUBlend blend, eGPUFaceCullTest culling_test, eGPUDepthTest depth_test, eGPUStencilTest stencil_test, eGPUStencilOp stencil_op, eGPUProvokingVertex provoking_vert)
Definition gpu_state.cc:129
void GPU_clip_control_unit_range(bool enable)
Definition gpu_state.cc:148
void GPU_clip_distances(int distances_enabled)
Definition gpu_state.cc:124
void GPU_shadow_offset(bool enable)
Definition gpu_state.cc:119
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
void GPU_storagebuf_sync_as_indirect_buffer(GPUStorageBuf *ssbo)
void GPU_storagebuf_debug_unbind_all()
void GPU_texture_bind_ex(GPUTexture *texture, GPUSamplerState state, int unit)
void GPU_texture_image_unbind_all()
void GPU_texture_image_bind(GPUTexture *texture, int unit)
void GPU_texture_unbind_all()
void GPU_uniformbuf_debug_unbind_all()
void GPU_uniformbuf_bind_as_ssbo(GPUUniformBuf *ubo, int slot)
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
void GPU_vertbuf_bind_as_ssbo(blender::gpu::VertBuf *verts, int binding)
void GPU_vertbuf_bind_as_texture(blender::gpu::VertBuf *verts, int binding)
long long int int64_t
constexpr int64_t size() const
constexpr bool is_empty() const
constexpr int64_t start() const
constexpr MutableSpan slice_safe(const int64_t start, const int64_t size) const
Definition BLI_span.hh:590
constexpr T * end() const
Definition BLI_span.hh:548
constexpr T * begin() const
Definition BLI_span.hh:544
void generate_commands(Vector< Header, 0 > &headers, Vector< Undetermined, 0 > &commands, SubPassVector &sub_passes)
void bind(RecordingState &state)
void generate_commands(Vector< Header, 0 > &headers, Vector< Undetermined, 0 > &commands, VisibilityBuf &visibility_buf, int visibility_word_per_draw, int view_len, bool use_custom_ids)
void bind(RecordingState &state)
#define DRW_RESOURCE_ID_SLOT
#define DRW_COMMAND_GROUP_SIZE
GPUShader * DRW_shader_draw_command_generate_get()
DRWState
Definition draw_state.hh:25
@ DRW_STATE_IN_FRONT_SELECT
Definition draw_state.hh:69
@ DRW_STATE_PROGRAM_POINT_SIZE
Definition draw_state.hh:74
@ DRW_STATE_CLIP_CONTROL_UNIT_RANGE
Definition draw_state.hh:68
@ DRW_STATE_SHADOW_OFFSET
Definition draw_state.hh:70
void GPU_compute_dispatch_indirect(GPUShader *shader, GPUStorageBuf *indirect_buf_, const blender::gpu::shader::SpecializationConstants *constants_state)
BLI_INLINE float fb(float length, float L)
static ulong state[N]
static gpu::Batch * procedural_batch_get(GPUPrimType primitive)
static eGPUDepthTest to_depth_test(DRWState state)
static eGPUBlend to_blend(DRWState state)
static eGPUProvokingVertex to_provoking_vertex(DRWState state)
static eGPUStencilOp to_stencil_op(DRWState state)
static eGPUStencilTest to_stencil_test(DRWState state)
static eGPUWriteMask to_write_mask(DRWState state)
StorageArrayBuffer< uint, 4, true > VisibilityBuf
Definition draw_view.hh:35
static eGPUFaceCullTest to_face_cull_test(DRWState state)
bool assign_if_different(T &old_value, T new_value)
MatBase< float, 4, 4 > float4x4
VecBase< float, 4 > float4
VecBase< int32_t, 3 > int3
std::string serialize() const
void execute(RecordingState &state) const
void execute(RecordingState &state) const
struct blender::draw::command::DrawGroup::@251306170140361114007106001100121365211105052341 desc
void execute(RecordingState &state) const
std::string serialize(const std::string &line_prefix) const
void execute(RecordingState &state) const
void execute(RecordingState &state) const
std::string serialize() const
void execute(RecordingState &state) const
enum blender::draw::command::PushConstant::Type type
enum blender::draw::command::ResourceBind::Type type
void execute(RecordingState &state) const
void execute(RecordingState &state) const
enum blender::draw::command::SpecializeConstant::Type type
void execute(RecordingState &state) const
static void set(DRWState state=DRW_STATE_DEFAULT)
i
Definition text_draw.cc:230