Blender  V2.93
draw_manager_exec.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2016, Blender Foundation.
17  */
18 
23 #include "draw_manager.h"
24 
25 #include "BLI_alloca.h"
26 #include "BLI_math.h"
27 #include "BLI_math_bits.h"
28 #include "BLI_memblock.h"
29 
30 #include "BKE_global.h"
31 
32 #include "GPU_platform.h"
33 #include "GPU_shader.h"
34 #include "GPU_state.h"
35 
36 #ifdef USE_GPU_SELECT
37 # include "GPU_select.h"
38 #endif
39 
41 {
42 #ifdef USE_GPU_SELECT
44  DST.select_id = id;
45 #endif
46 }
47 
48 #define DEBUG_UBO_BINDING
49 
50 typedef struct DRWCommandsState {
54  int base_inst;
56  bool neg_scale;
57  /* Resource location. */
64  /* Legacy matrix support. */
65  int obmat_loc;
66  int obinv_loc;
67  /* Uniform Attributes. */
69  /* Selection ID state. */
72  /* Drawing State */
76 
77 /* -------------------------------------------------------------------- */
82 {
83  /* Mask locked state. */
85 
86  if (DST.state == state) {
87  return;
88  }
89 
90  eGPUWriteMask write_mask = 0;
91  eGPUBlend blend = 0;
92  eGPUFaceCullTest culling_test = 0;
93  eGPUDepthTest depth_test = 0;
94  eGPUStencilTest stencil_test = 0;
95  eGPUStencilOp stencil_op = 0;
96  eGPUProvokingVertex provoking_vert = 0;
97 
99  write_mask |= GPU_WRITE_DEPTH;
100  }
102  write_mask |= GPU_WRITE_COLOR;
103  }
105  write_mask |= GPU_WRITE_STENCIL;
106  }
107 
109  case DRW_STATE_CULL_BACK:
110  culling_test = GPU_CULL_BACK;
111  break;
113  culling_test = GPU_CULL_FRONT;
114  break;
115  default:
116  culling_test = GPU_CULL_NONE;
117  break;
118  }
119 
122  depth_test = GPU_DEPTH_LESS;
123  break;
125  depth_test = GPU_DEPTH_LESS_EQUAL;
126  break;
128  depth_test = GPU_DEPTH_EQUAL;
129  break;
131  depth_test = GPU_DEPTH_GREATER;
132  break;
134  depth_test = GPU_DEPTH_GREATER_EQUAL;
135  break;
137  depth_test = GPU_DEPTH_ALWAYS;
138  break;
139  default:
140  depth_test = GPU_DEPTH_NONE;
141  break;
142  }
143 
146  stencil_op = GPU_STENCIL_OP_REPLACE;
148  break;
150  stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_PASS;
152  break;
154  stencil_op = GPU_STENCIL_OP_COUNT_DEPTH_FAIL;
156  break;
157  default:
158  stencil_op = GPU_STENCIL_OP_NONE;
160  break;
161  }
162 
165  stencil_test = GPU_STENCIL_ALWAYS;
166  break;
168  stencil_test = GPU_STENCIL_EQUAL;
169  break;
171  stencil_test = GPU_STENCIL_NEQUAL;
172  break;
173  default:
174  stencil_test = GPU_STENCIL_NONE;
175  break;
176  }
177 
178  switch (state & DRW_STATE_BLEND_ENABLED) {
179  case DRW_STATE_BLEND_ADD:
181  break;
184  break;
187  break;
190  break;
193  break;
194  case DRW_STATE_BLEND_OIT:
196  break;
197  case DRW_STATE_BLEND_MUL:
199  break;
200  case DRW_STATE_BLEND_SUB:
202  break;
205  break;
208  break;
211  break;
212  default:
214  break;
215  }
216 
218  write_mask, blend, culling_test, depth_test, stencil_test, stencil_op, provoking_vert);
219 
221  GPU_shadow_offset(true);
222  }
223  else {
224  GPU_shadow_offset(false);
225  }
226 
227  /* TODO this should be part of shader state. */
230  }
231  else {
233  }
234 
236  /* XXX `GPU_depth_range` is not a perfect solution
237  * since very distant geometries can still be occluded.
238  * Also the depth test precision of these geometries is impaired.
239  * However, it solves the selection for the vast majority of cases. */
240  GPU_depth_range(0.0f, 0.01f);
241  }
242  else {
243  GPU_depth_range(0.0f, 1.0f);
244  }
245 
248  }
249  else {
250  GPU_program_point_size(false);
251  }
252 
255  }
256  else {
258  }
259 
260  DST.state = state;
261 }
262 
263 static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
264 {
265  /* Reminders:
266  * - (compare_mask & reference) is what is tested against (compare_mask & stencil_value)
267  * stencil_value being the value stored in the stencil buffer.
268  * - (write-mask & reference) is what gets written if the test condition is fulfilled.
269  */
270  GPU_stencil_write_mask_set(write_mask);
271  GPU_stencil_reference_set(reference);
272  GPU_stencil_compare_mask_set(compare_mask);
273 }
274 
275 /* Reset state to not interfere with other UI draw-call. */
277 {
278  DST.state = ~state;
280 }
281 
282 static void drw_state_validate(void)
283 {
284  /* Cannot write to stencil buffer without stencil test. */
287  }
288  /* Cannot write to depth buffer without depth test. */
289  if ((DST.state & DRW_STATE_WRITE_DEPTH)) {
291  }
292 }
293 
301 {
302  DST.state_lock = state;
303 
304  /* We must get the current state to avoid overriding it. */
305  /* Not complete, but that just what we need for now. */
308  }
311 
312  switch (GPU_depth_test_get()) {
313  case GPU_DEPTH_ALWAYS:
315  break;
316  case GPU_DEPTH_LESS:
318  break;
321  break;
322  case GPU_DEPTH_EQUAL:
324  break;
325  case GPU_DEPTH_GREATER:
327  break;
330  break;
331  default:
332  break;
333  }
334  }
335 }
336 
337 void DRW_state_reset(void)
338 {
340 
343 
344  /* Should stay constant during the whole rendering. */
345  GPU_point_size(5);
346  GPU_line_smooth(false);
347  /* Bypass #U.pixelsize factor by using a factor of 0.0f. Will be clamped to 1.0f. */
348  GPU_line_width(0.0f);
349 }
350 
353 /* -------------------------------------------------------------------- */
357 static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
358 {
360  return (culling->mask & view->culling_mask) != 0;
361 }
362 
363 /* Set active view for rendering. */
365 {
367 }
368 
369 /* Return True if the given BoundSphere intersect the current view frustum */
370 static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere,
371  const float (*frustum_planes)[4],
372  const BoundSphere *bsphere)
373 {
374  /* Bypass test if radius is negative. */
375  if (bsphere->radius < 0.0f) {
376  return true;
377  }
378 
379  /* Do a rough test first: Sphere VS Sphere intersect. */
380  float center_dist_sq = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
381  float radius_sum = bsphere->radius + frustum_bsphere->radius;
382  if (center_dist_sq > square_f(radius_sum)) {
383  return false;
384  }
385  /* TODO we could test against the inscribed sphere of the frustum to early out positively. */
386 
387  /* Test against the 6 frustum planes. */
388  /* TODO order planes with sides first then far then near clip. Should be better culling
389  * heuristic when sculpting. */
390  for (int p = 0; p < 6; p++) {
391  float dist = plane_point_side_v3(frustum_planes[p], bsphere->center);
392  if (dist < -bsphere->radius) {
393  return false;
394  }
395  }
396  return true;
397 }
398 
399 static bool draw_culling_box_test(const float (*frustum_planes)[4], const BoundBox *bbox)
400 {
401  /* 6 view frustum planes */
402  for (int p = 0; p < 6; p++) {
403  /* 8 box vertices. */
404  for (int v = 0; v < 8; v++) {
405  float dist = plane_point_side_v3(frustum_planes[p], bbox->vec[v]);
406  if (dist > 0.0f) {
407  /* At least one point in front of this plane.
408  * Go to next plane. */
409  break;
410  }
411  if (v == 7) {
412  /* 8 points behind this plane. */
413  return false;
414  }
415  }
416  }
417  return true;
418 }
419 
420 static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
421 {
422  /* Test against the 8 frustum corners. */
423  for (int c = 0; c < 8; c++) {
424  float dist = plane_point_side_v3(plane, corners->vec[c]);
425  if (dist < 0.0f) {
426  return true;
427  }
428  }
429  return false;
430 }
431 
432 /* Return True if the given BoundSphere intersect the current view frustum.
433  * bsphere must be in world space. */
434 bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
435 {
436  view = view ? view : DST.view_default;
437  return draw_culling_sphere_test(&view->frustum_bsphere, view->frustum_planes, bsphere);
438 }
439 
440 /* Return True if the given BoundBox intersect the current view frustum.
441  * bbox must be in world space. */
442 bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
443 {
444  view = view ? view : DST.view_default;
445  return draw_culling_box_test(view->frustum_planes, bbox);
446 }
447 
448 /* Return True if the view frustum is inside or intersect the given plane.
449  * plane must be in world space. */
450 bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
451 {
452  view = view ? view : DST.view_default;
453  return draw_culling_plane_test(&view->frustum_corners, plane);
454 }
455 
456 /* Return True if the given box intersect the current view frustum.
457  * This function will have to be replaced when world space bb per objects is implemented. */
458 bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
459 {
460  view = view ? view : DST.view_default;
461  float tobmat[4][4];
462  transpose_m4_m4(tobmat, obmat);
463  for (int i = 6; i--;) {
464  float frustum_plane_local[4], bb_near[3], bb_far[3];
465  mul_v4_m4v4(frustum_plane_local, tobmat, view->frustum_planes[i]);
466  aabb_get_near_far_from_plane(frustum_plane_local, min, max, bb_near, bb_far);
467 
468  if (plane_point_side_v3(frustum_plane_local, bb_far) < 0.0f) {
469  return false;
470  }
471  }
472 
473  return true;
474 }
475 
477 {
478  view = view ? view : DST.view_default;
479  *corners = view->frustum_corners;
480 }
481 
482 void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
483 {
484  view = view ? view : DST.view_default;
485  memcpy(planes, view->frustum_planes, sizeof(float[6][4]));
486 }
487 
489 {
490  view = view->parent ? view->parent : view;
491 
492  /* TODO(fclem): multi-thread this. */
493  /* TODO(fclem): compute all dirty views at once. */
494  if (!view->is_dirty) {
495  return;
496  }
497 
498  BLI_memblock_iter iter;
500  DRWCullingState *cull;
501  while ((cull = BLI_memblock_iterstep(&iter))) {
502  if (cull->bsphere.radius < 0.0) {
503  cull->mask = 0;
504  }
505  else {
506  bool culled = !draw_culling_sphere_test(
507  &view->frustum_bsphere, view->frustum_planes, &cull->bsphere);
508 
509 #ifdef DRW_DEBUG_CULLING
510  if (G.debug_value != 0) {
511  if (culled) {
513  cull->bsphere.center, cull->bsphere.radius, (const float[4]){1, 0, 0, 1});
514  }
515  else {
517  cull->bsphere.center, cull->bsphere.radius, (const float[4]){0, 1, 0, 1});
518  }
519  }
520 #endif
521 
522  if (view->visibility_fn) {
523  culled = !view->visibility_fn(!culled, cull->user_data);
524  }
525 
526  SET_FLAG_FROM_TEST(cull->mask, culled, view->culling_mask);
527  }
528  }
529 
530  view->is_dirty = false;
531 }
532 
535 /* -------------------------------------------------------------------- */
540  DRWResourceHandle *handle,
541  float obmat_loc,
542  float obinv_loc)
543 {
544  /* Still supported for compatibility with gpu_shader_* but should be forbidden. */
546  if (obmat_loc != -1) {
547  GPU_shader_uniform_vector(shgroup->shader, obmat_loc, 16, 1, (float *)ob_mats->model);
548  }
549  if (obinv_loc != -1) {
550  GPU_shader_uniform_vector(shgroup->shader, obinv_loc, 16, 1, (float *)ob_mats->modelinverse);
551  }
552 }
553 
555 {
556  DST.batch = geom;
557 
558  GPU_batch_set_shader(geom, shgroup->shader);
559 }
560 
562  GPUBatch *geom,
563  int vert_first,
564  int vert_count,
565  int inst_first,
566  int inst_count,
567  int baseinst_loc)
568 {
569  /* inst_count can be -1. */
570  inst_count = max_ii(0, inst_count);
571 
572  if (baseinst_loc != -1) {
573  /* Fallback when ARB_shader_draw_parameters is not supported. */
574  GPU_shader_uniform_vector_int(shgroup->shader, baseinst_loc, 1, 1, (int *)&inst_first);
575  /* Avoids VAO reconfiguration on older hardware. (see GPU_batch_draw_advanced) */
576  inst_first = 0;
577  }
578 
579  /* bind vertex array */
580  if (DST.batch != geom) {
581  draw_geometry_bind(shgroup, geom);
582  }
583 
584  GPU_batch_draw_advanced(geom, vert_first, vert_count, inst_first, inst_count);
585 }
586 
588 {
589  if (state->inst_count == 0) {
590  return;
591  }
592  if (state->baseinst_loc == -1) {
593  /* bind vertex array */
594  if (DST.batch != state->batch) {
596  draw_geometry_bind(shgroup, state->batch);
597  }
598  GPU_draw_list_append(DST.draw_list, state->batch, state->base_inst, state->inst_count);
599  }
600  /* Fallback when unsupported */
601  else {
603  shgroup, state->batch, 0, 0, state->base_inst, state->inst_count, state->baseinst_loc);
604  }
605 }
606 
609  bool *use_tfeedback)
610 {
611  for (DRWUniformChunk *unichunk = shgroup->uniforms; unichunk; unichunk = unichunk->next) {
612  DRWUniform *uni = unichunk->uniforms;
613  for (int i = 0; i < unichunk->uniform_used; i++, uni++) {
614  switch (uni->type) {
617  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->ivalue);
618  break;
619  case DRW_UNIFORM_INT:
621  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
622  break;
625  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->fvalue);
626  break;
627  case DRW_UNIFORM_FLOAT:
629  shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
630  break;
631  case DRW_UNIFORM_TEXTURE:
632  GPU_texture_bind_ex(uni->texture, uni->sampler_state, uni->location, false);
633  break;
635  GPU_texture_bind_ex(*uni->texture_ref, uni->sampler_state, uni->location, false);
636  break;
637  case DRW_UNIFORM_IMAGE:
639  break;
642  break;
643  case DRW_UNIFORM_BLOCK:
644  GPU_uniformbuf_bind(uni->block, uni->location);
645  break;
648  break;
650  state->obmats_loc = uni->location;
652  break;
654  state->obinfos_loc = uni->location;
656  break;
658  state->obattrs_loc = uni->location;
660  uni->uniform_attrs);
661  DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
662  break;
664  state->chunkid_loc = uni->location;
665  GPU_shader_uniform_int(shgroup->shader, uni->location, 0);
666  break;
668  state->resourceid_loc = uni->location;
669  break;
671  BLI_assert(uni->pvalue && (*use_tfeedback == false));
672  *use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
673  ((GPUVertBuf *)uni->pvalue));
674  break;
675  /* Legacy/Fallback support. */
677  state->baseinst_loc = uni->location;
678  break;
680  state->obmat_loc = uni->location;
681  break;
683  state->obinv_loc = uni->location;
684  break;
685  }
686  }
687  }
688 }
689 
692  GPUBatch *batch,
693  const DRWResourceHandle *handle)
694 {
695  const bool is_instancing = (batch->inst[0] != NULL);
696  int start = 0;
697  int count = 1;
698  int tot = is_instancing ? GPU_vertbuf_get_vertex_len(batch->inst[0]) :
700  /* Hack : get "vbo" data without actually drawing. */
701  int *select_id = (void *)GPU_vertbuf_get_data(state->select_buf);
702 
703  /* Batching */
704  if (!is_instancing) {
705  /* FIXME: Meh a bit nasty. */
706  if (batch->prim_type == GPU_PRIM_TRIS) {
707  count = 3;
708  }
709  else if (batch->prim_type == GPU_PRIM_LINES) {
710  count = 2;
711  }
712  }
713 
714  while (start < tot) {
715  GPU_select_load_id(select_id[start]);
716  if (is_instancing) {
717  draw_geometry_execute(shgroup, batch, 0, 0, start, count, state->baseinst_loc);
718  }
719  else {
721  shgroup, batch, start, count, DRW_handle_id_get(handle), 0, state->baseinst_loc);
722  }
723  start += count;
724  }
725 }
726 
727 typedef struct DRWCommandIterator {
731 
733 {
734  iter->curr_chunk = shgroup->cmd.first;
735  iter->cmd_index = 0;
736 }
737 
739 {
740  if (iter->curr_chunk) {
741  if (iter->cmd_index == iter->curr_chunk->command_len) {
742  iter->curr_chunk = iter->curr_chunk->next;
743  iter->cmd_index = 0;
744  }
745  if (iter->curr_chunk) {
746  *cmd_type = command_type_get(iter->curr_chunk->command_type, iter->cmd_index);
747  if (iter->cmd_index < iter->curr_chunk->command_used) {
748  return iter->curr_chunk->commands + iter->cmd_index++;
749  }
750  }
751  }
752  return NULL;
753 }
754 
756 {
757  /* Front face is not a resource but it is inside the resource handle. */
758  bool neg_scale = DRW_handle_negative_scale_get(handle);
759  if (neg_scale != state->neg_scale) {
760  state->neg_scale = neg_scale;
762  }
763 
764  int chunk = DRW_handle_chunk_get(handle);
765  if (state->resource_chunk != chunk) {
766  if (state->chunkid_loc != -1) {
767  GPU_shader_uniform_int(DST.shader, state->chunkid_loc, chunk);
768  }
769  if (state->obmats_loc != -1) {
771  GPU_uniformbuf_bind(DST.vmempool->matrices_ubo[chunk], state->obmats_loc);
772  }
773  if (state->obinfos_loc != -1) {
775  GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[chunk], state->obinfos_loc);
776  }
777  if (state->obattrs_loc != -1) {
778  DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
779  DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, chunk, state->obattrs_loc);
780  }
781  state->resource_chunk = chunk;
782  }
783 
784  if (state->resourceid_loc != -1) {
785  int id = DRW_handle_id_get(handle);
786  if (state->resource_id != id) {
787  GPU_shader_uniform_int(DST.shader, state->resourceid_loc, id);
788  state->resource_id = id;
789  }
790  }
791 }
792 
794 {
795  draw_indirect_call(shgroup, state);
797 
798  state->batch = NULL;
799  state->inst_count = 0;
800  state->base_inst = -1;
801 }
802 
805  GPUBatch *batch,
806  DRWResourceHandle handle,
807  int vert_first,
808  int vert_count,
809  int inst_first,
810  int inst_count,
811  bool do_base_instance)
812 {
814 
815  draw_call_resource_bind(state, &handle);
816 
817  /* TODO This is Legacy. Need to be removed. */
818  if (state->obmats_loc == -1 && (state->obmat_loc != -1 || state->obinv_loc != -1)) {
819  draw_legacy_matrix_update(shgroup, &handle, state->obmat_loc, state->obinv_loc);
820  }
821 
822  if (G.f & G_FLAG_PICKSEL) {
823  if (state->select_buf != NULL) {
824  draw_select_buffer(shgroup, state, batch, &handle);
825  return;
826  }
827 
828  GPU_select_load_id(state->select_id);
829  }
830 
831  draw_geometry_execute(shgroup,
832  batch,
833  vert_first,
834  vert_count,
835  do_base_instance ? DRW_handle_id_get(&handle) : inst_first,
836  inst_count,
837  state->baseinst_loc);
838 }
839 
841 {
842  state->neg_scale = false;
843  state->resource_chunk = 0;
844  state->resource_id = -1;
845  state->base_inst = 0;
846  state->inst_count = 0;
847  state->batch = NULL;
848 
849  state->select_id = -1;
850  state->select_buf = NULL;
851 }
852 
853 /* NOTE: Does not support batches with instancing VBOs. */
856  DRWCommandDraw *call)
857 {
858  /* If any condition requires to interrupt the merging. */
859  bool neg_scale = DRW_handle_negative_scale_get(&call->handle);
860  int chunk = DRW_handle_chunk_get(&call->handle);
861  int id = DRW_handle_id_get(&call->handle);
862  if ((state->neg_scale != neg_scale) || /* Need to change state. */
863  (state->resource_chunk != chunk) || /* Need to change UBOs. */
864  (state->batch != call->batch) /* Need to change VAO. */
865  ) {
867 
868  state->batch = call->batch;
869  state->inst_count = 1;
870  state->base_inst = id;
871 
873  }
874  /* Is the id consecutive? */
875  else if (id != state->base_inst + state->inst_count) {
876  /* We need to add a draw command for the pending instances. */
877  draw_indirect_call(shgroup, state);
878  state->inst_count = 1;
879  state->base_inst = id;
880  }
881  /* We avoid a drawcall by merging with the precedent
882  * drawcall using instancing. */
883  else {
884  state->inst_count++;
885  }
886 }
887 
888 /* Flush remaining pending drawcalls. */
890 {
892 
893  /* Reset state */
894  if (state->neg_scale) {
896  }
897  if (state->obmats_loc != -1) {
899  }
900  if (state->obinfos_loc != -1) {
902  }
903  if (state->obattrs_loc != -1) {
904  DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
905  }
906 }
907 
908 static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
909 {
910  BLI_assert(shgroup->shader);
911 
913  .obmats_loc = -1,
914  .obinfos_loc = -1,
915  .obattrs_loc = -1,
916  .baseinst_loc = -1,
917  .chunkid_loc = -1,
918  .resourceid_loc = -1,
919  .obmat_loc = -1,
920  .obinv_loc = -1,
921  .obattrs_ubo = NULL,
922  .drw_state_enabled = 0,
923  .drw_state_disabled = 0,
924  };
925 
926  const bool shader_changed = (DST.shader != shgroup->shader);
927  bool use_tfeedback = false;
928 
929  if (shader_changed) {
930  if (DST.shader) {
932 
933  /* Unbinding can be costly. Skip in normal condition. */
934  if (G.debug & G_DEBUG_GPU) {
937  }
938  }
939  GPU_shader_bind(shgroup->shader);
940  DST.shader = shgroup->shader;
941  DST.batch = NULL;
942  }
943 
944  draw_update_uniforms(shgroup, &state, &use_tfeedback);
945 
946  drw_state_set(pass_state);
947 
948  /* Rendering Calls */
949  {
950  DRWCommandIterator iter;
951  DRWCommand *cmd;
952  eDRWCommandType cmd_type;
953 
954  draw_command_iter_begin(&iter, shgroup);
955 
957 
958  while ((cmd = draw_command_iter_step(&iter, &cmd_type))) {
959 
960  switch (cmd_type) {
961  case DRW_CMD_DRWSTATE:
962  case DRW_CMD_STENCIL:
963  draw_call_batching_flush(shgroup, &state);
964  break;
965  case DRW_CMD_DRAW:
969  continue;
970  }
971  break;
972  default:
973  break;
974  }
975 
976  switch (cmd_type) {
977  case DRW_CMD_CLEAR:
979  cmd->clear.clear_channels,
980  (float[4]){cmd->clear.r / 255.0f,
981  cmd->clear.g / 255.0f,
982  cmd->clear.b / 255.0f,
983  cmd->clear.a / 255.0f},
984  cmd->clear.depth,
985  cmd->clear.stencil);
986  break;
987  case DRW_CMD_DRWSTATE:
988  state.drw_state_enabled |= cmd->state.enable;
989  state.drw_state_disabled |= cmd->state.disable;
990  drw_state_set((pass_state & ~state.drw_state_disabled) | state.drw_state_enabled);
991  break;
992  case DRW_CMD_STENCIL:
994  break;
995  case DRW_CMD_SELECTID:
996  state.select_id = cmd->select_id.select_id;
997  state.select_buf = cmd->select_id.select_buf;
998  break;
999  case DRW_CMD_DRAW:
1000  if (!USE_BATCHING || state.obmats_loc == -1 || (G.f & G_FLAG_PICKSEL) ||
1001  cmd->draw.batch->inst[0]) {
1003  shgroup, &state, cmd->draw.batch, cmd->draw.handle, 0, 0, 0, 0, true);
1004  }
1005  else {
1006  draw_call_batching_do(shgroup, &state, &cmd->draw);
1007  }
1008  break;
1010  draw_call_single_do(shgroup,
1011  &state,
1012  cmd->procedural.batch,
1013  cmd->procedural.handle,
1014  0,
1015  cmd->procedural.vert_count,
1016  0,
1017  1,
1018  true);
1019  break;
1020  case DRW_CMD_DRAW_INSTANCE:
1021  draw_call_single_do(shgroup,
1022  &state,
1023  cmd->instance.batch,
1024  cmd->instance.handle,
1025  0,
1026  0,
1027  0,
1028  cmd->instance.inst_count,
1029  cmd->instance.use_attrs == 0);
1030  break;
1031  case DRW_CMD_DRAW_RANGE:
1032  draw_call_single_do(shgroup,
1033  &state,
1034  cmd->range.batch,
1035  cmd->range.handle,
1036  cmd->range.vert_first,
1037  cmd->range.vert_count,
1038  0,
1039  1,
1040  true);
1041  break;
1043  draw_call_single_do(shgroup,
1044  &state,
1045  cmd->instance_range.batch,
1046  cmd->instance_range.handle,
1047  0,
1048  0,
1051  false);
1052  break;
1053  }
1054  }
1055 
1056  draw_call_batching_finish(shgroup, &state);
1057  }
1058 
1059  if (use_tfeedback) {
1061  }
1062 }
1063 
1064 static void drw_update_view(void)
1065 {
1066  /* TODO(fclem): update a big UBO and only bind ranges here. */
1068 
1069  /* TODO get rid of this. */
1071 
1073 }
1074 
1075 static void drw_draw_pass_ex(DRWPass *pass,
1076  DRWShadingGroup *start_group,
1077  DRWShadingGroup *end_group)
1078 {
1079  if (pass->original) {
1080  start_group = pass->original->shgroups.first;
1081  end_group = pass->original->shgroups.last;
1082  }
1083 
1084  if (start_group == NULL) {
1085  return;
1086  }
1087 
1088  DST.shader = NULL;
1089 
1091  "DRW_render_instance_buffer_finish had not been called before drawing");
1092 
1094  drw_update_view();
1095  DST.view_active->is_dirty = false;
1097  }
1098 
1099  /* GPU_framebuffer_clear calls can change the state outside the DRW module.
1100  * Force reset the affected states to avoid problems later. */
1102 
1103  drw_state_set(pass->state);
1105 
1106  if (DST.view_active->is_inverted) {
1107  GPU_front_facing(true);
1108  }
1109 
1110  DRW_stats_query_start(pass->name);
1111 
1112  for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
1113  draw_shgroup(shgroup, pass->state);
1114  /* break if upper limit */
1115  if (shgroup == end_group) {
1116  break;
1117  }
1118  }
1119 
1120  if (DST.shader) {
1122  DST.shader = NULL;
1123  }
1124 
1125  if (DST.batch) {
1126  DST.batch = NULL;
1127  }
1128 
1129  /* Fix T67342 for some reason. AMD Pro driver bug. */
1130  if ((DST.state & DRW_STATE_BLEND_CUSTOM) != 0 &&
1133  }
1134 
1135  /* HACK: Rasterized discard can affect clear commands which are not
1136  * part of a DRWPass (as of now). So disable rasterized discard here
1137  * if it has been enabled. */
1138  if ((DST.state & DRW_STATE_RASTERIZER_ENABLED) == 0) {
1140  }
1141 
1142  /* Reset default. */
1143  if (DST.view_active->is_inverted) {
1144  GPU_front_facing(false);
1145  }
1146 
1148 }
1149 
1151 {
1152  for (; pass; pass = pass->next) {
1153  drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
1154  }
1155 }
1156 
1157 /* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
1158 void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
1159 {
1160  drw_draw_pass_ex(pass, start_group, end_group);
1161 }
1162 
@ G_DEBUG_GPU
Definition: BKE_global.h:151
@ G_FLAG_PICKSEL
Definition: BKE_global.h:111
#define BLI_assert(a)
Definition: BLI_assert.h:58
#define BLI_INLINE
MINLINE int max_ii(int a, int b)
MINLINE float square_f(float a)
MINLINE float plane_point_side_v3(const float plane[4], const float co[3])
void aabb_get_near_far_from_plane(const float plane_no[3], const float bbmin[3], const float bbmax[3], float bb_near[3], float bb_afar[3])
Definition: math_geom.c:660
void mul_v4_m4v4(float r[4], const float M[4][4], const float v[4])
Definition: math_matrix.c:854
void transpose_m4_m4(float R[4][4], const float M[4][4])
Definition: math_matrix.c:1384
MINLINE float len_squared_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
void BLI_memblock_iternew(BLI_memblock *mblk, BLI_memblock_iter *iter) ATTR_NONNULL()
Definition: BLI_memblock.c:163
void * BLI_memblock_iterstep(BLI_memblock_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
Definition: BLI_memblock.c:175
unsigned int uint
Definition: BLI_sys_types.h:83
#define SET_FLAG_FROM_TEST(value, test, flag)
#define DRW_STATE_WRITE_STENCIL_ENABLED
Definition: DRW_render.h:374
DRWState
Definition: DRW_render.h:312
@ DRW_STATE_STENCIL_EQUAL
Definition: DRW_render.h:332
@ DRW_STATE_CLIP_PLANES
Definition: DRW_render.h:354
@ DRW_STATE_BLEND_ALPHA
Definition: DRW_render.h:340
@ DRW_STATE_BLEND_ADD
Definition: DRW_render.h:336
@ DRW_STATE_BLEND_BACKGROUND
Definition: DRW_render.h:343
@ DRW_STATE_CULL_FRONT
Definition: DRW_render.h:329
@ DRW_STATE_STENCIL_ALWAYS
Definition: DRW_render.h:331
@ DRW_STATE_DEPTH_LESS
Definition: DRW_render.h:322
@ DRW_STATE_IN_FRONT_SELECT
Definition: DRW_render.h:352
@ DRW_STATE_BLEND_SUB
Definition: DRW_render.h:346
@ DRW_STATE_DEPTH_GREATER_EQUAL
Definition: DRW_render.h:326
@ DRW_STATE_WRITE_STENCIL_SHADOW_FAIL
Definition: DRW_render.h:319
@ DRW_STATE_DEPTH_EQUAL
Definition: DRW_render.h:324
@ DRW_STATE_PROGRAM_POINT_SIZE
Definition: DRW_render.h:357
@ DRW_STATE_WRITE_DEPTH
Definition: DRW_render.h:314
@ DRW_STATE_BLEND_OIT
Definition: DRW_render.h:344
@ DRW_STATE_LOGIC_INVERT
Definition: DRW_render.h:349
@ DRW_STATE_SHADOW_OFFSET
Definition: DRW_render.h:353
@ DRW_STATE_BLEND_ADD_FULL
Definition: DRW_render.h:338
@ DRW_STATE_WRITE_COLOR
Definition: DRW_render.h:315
@ DRW_STATE_BLEND_ALPHA_UNDER_PREMUL
Definition: DRW_render.h:350
@ DRW_STATE_DEPTH_LESS_EQUAL
Definition: DRW_render.h:323
@ DRW_STATE_WRITE_STENCIL_SHADOW_PASS
Definition: DRW_render.h:318
@ DRW_STATE_CULL_BACK
Definition: DRW_render.h:328
@ DRW_STATE_FIRST_VERTEX_CONVENTION
Definition: DRW_render.h:355
@ DRW_STATE_STENCIL_NEQUAL
Definition: DRW_render.h:333
@ DRW_STATE_DEPTH_ALWAYS
Definition: DRW_render.h:321
@ DRW_STATE_BLEND_CUSTOM
Definition: DRW_render.h:348
@ DRW_STATE_BLEND_ALPHA_PREMUL
Definition: DRW_render.h:342
@ DRW_STATE_DEPTH_GREATER
Definition: DRW_render.h:325
@ DRW_STATE_BLEND_MUL
Definition: DRW_render.h:345
@ DRW_STATE_WRITE_STENCIL
Definition: DRW_render.h:317
#define DRW_STATE_RASTERIZER_ENABLED
Definition: DRW_render.h:366
#define DRW_STATE_STENCIL_TEST_ENABLED
Definition: DRW_render.h:372
#define DRW_STATE_DEPTH_TEST_ENABLED
Definition: DRW_render.h:369
#define DRW_STATE_BLEND_ENABLED
Definition: DRW_render.h:362
#define DRW_STATE_DEFAULT
Definition: DRW_render.h:360
static AppView * view
GPUBatch
Definition: GPU_batch.h:93
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
Definition: gpu_batch.cc:222
void GPU_batch_draw_advanced(GPUBatch *, int v_first, int v_count, int i_first, int i_count)
Definition: gpu_batch.cc:255
void GPU_draw_list_append(GPUDrawList *list, GPUBatch *batch, int i_first, int i_count)
Definition: gpu_drawlist.cc:49
void GPU_draw_list_submit(GPUDrawList *list)
Definition: gpu_drawlist.cc:55
GPUFrameBuffer * GPU_framebuffer_active_get(void)
@ GPU_DRIVER_OFFICIAL
Definition: GPU_platform.h:53
@ GPU_OS_ANY
Definition: GPU_platform.h:49
@ GPU_DEVICE_ATI
Definition: GPU_platform.h:34
bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:36
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:37
bool GPU_select_load_id(unsigned int id)
Definition: gpu_select.c:108
void GPU_shader_unbind(void)
Definition: gpu_shader.cc:516
void GPU_shader_uniform_vector_int(GPUShader *shader, int location, int length, int arraysize, const int *value)
Definition: gpu_shader.cc:623
void GPU_shader_uniform_vector(GPUShader *shader, int location, int length, int arraysize, const float *value)
Definition: gpu_shader.cc:617
bool GPU_shader_transform_feedback_enable(GPUShader *shader, struct GPUVertBuf *vertbuf)
Definition: gpu_shader.cc:535
void GPU_shader_uniform_int(GPUShader *shader, int location, int value)
Definition: gpu_shader.cc:629
void GPU_shader_transform_feedback_disable(GPUShader *shader)
Definition: gpu_shader.cc:540
void GPU_shader_bind(GPUShader *shader)
Definition: gpu_shader.cc:494
void GPU_program_point_size(bool enable)
Definition: gpu_state.cc:191
eGPUBlend
Definition: GPU_state.h:54
@ GPU_BLEND_ADDITIVE_PREMULT
Definition: GPU_state.h:60
@ GPU_BLEND_INVERT
Definition: GPU_state.h:65
@ GPU_BLEND_OIT
Definition: GPU_state.h:68
@ GPU_BLEND_MULTIPLY
Definition: GPU_state.h:61
@ GPU_BLEND_NONE
Definition: GPU_state.h:55
@ GPU_BLEND_ALPHA
Definition: GPU_state.h:57
@ GPU_BLEND_CUSTOM
Definition: GPU_state.h:73
@ GPU_BLEND_ADDITIVE
Definition: GPU_state.h:59
@ GPU_BLEND_SUBTRACT
Definition: GPU_state.h:62
@ GPU_BLEND_ALPHA_UNDER_PREMUL
Definition: GPU_state.h:74
@ GPU_BLEND_BACKGROUND
Definition: GPU_state.h:70
@ GPU_BLEND_ALPHA_PREMULT
Definition: GPU_state.h:58
void GPU_line_width(float width)
Definition: gpu_state.cc:173
eGPUWriteMask
Definition: GPU_state.h:25
@ GPU_WRITE_STENCIL
Definition: GPU_state.h:32
@ GPU_WRITE_DEPTH
Definition: GPU_state.h:31
@ GPU_WRITE_COLOR
Definition: GPU_state.h:33
void GPU_line_smooth(bool enable)
Definition: gpu_state.cc:85
eGPUProvokingVertex
Definition: GPU_state.h:108
@ GPU_VERTEX_LAST
Definition: GPU_state.h:109
@ GPU_VERTEX_FIRST
Definition: GPU_state.h:110
void GPU_stencil_write_mask_set(uint write_mask)
Definition: gpu_state.cc:221
eGPUFaceCullTest
Definition: GPU_state.h:102
@ GPU_CULL_FRONT
Definition: GPU_state.h:104
@ GPU_CULL_NONE
Definition: GPU_state.h:103
@ GPU_CULL_BACK
Definition: GPU_state.h:105
void GPU_depth_range(float near, float far)
Definition: gpu_state.cc:161
void GPU_stencil_reference_set(uint reference)
Definition: gpu_state.cc:216
eGPUStencilOp
Definition: GPU_state.h:94
@ GPU_STENCIL_OP_COUNT_DEPTH_FAIL
Definition: GPU_state.h:99
@ GPU_STENCIL_OP_COUNT_DEPTH_PASS
Definition: GPU_state.h:98
@ GPU_STENCIL_OP_REPLACE
Definition: GPU_state.h:96
@ GPU_STENCIL_OP_NONE
Definition: GPU_state.h:95
void GPU_stencil_compare_mask_set(uint compare_mask)
Definition: gpu_state.cc:226
void GPU_front_facing(bool invert)
Definition: gpu_state.cc:65
void GPU_point_size(float size)
Definition: gpu_state.cc:179
bool GPU_depth_mask_get(void)
Definition: gpu_state.cc:293
eGPUDepthTest
Definition: GPU_state.h:77
@ GPU_DEPTH_GREATER
Definition: GPU_state.h:83
@ GPU_DEPTH_EQUAL
Definition: GPU_state.h:82
@ GPU_DEPTH_ALWAYS
Definition: GPU_state.h:79
@ GPU_DEPTH_GREATER_EQUAL
Definition: GPU_state.h:84
@ GPU_DEPTH_LESS
Definition: GPU_state.h:80
@ GPU_DEPTH_LESS_EQUAL
Definition: GPU_state.h:81
@ GPU_DEPTH_NONE
Definition: GPU_state.h:78
eGPUDepthTest GPU_depth_test_get(void)
Definition: gpu_state.cc:255
void GPU_state_set(eGPUWriteMask write_mask, eGPUBlend blend, eGPUFaceCullTest culling_test, eGPUDepthTest depth_test, eGPUStencilTest stencil_test, eGPUStencilOp stencil_op, eGPUProvokingVertex provoking_vert)
Definition: gpu_state.cc:136
eGPUStencilTest
Definition: GPU_state.h:87
@ GPU_STENCIL_EQUAL
Definition: GPU_state.h:90
@ GPU_STENCIL_NEQUAL
Definition: GPU_state.h:91
@ GPU_STENCIL_ALWAYS
Definition: GPU_state.h:89
@ GPU_STENCIL_NONE
Definition: GPU_state.h:88
void GPU_clip_distances(int distances_enabled)
Definition: gpu_state.cc:131
void GPU_provoking_vertex(eGPUProvokingVertex vert)
Definition: gpu_state.cc:70
void GPU_shadow_offset(bool enable)
Definition: gpu_state.cc:126
void GPU_texture_unbind_all(void)
Definition: gpu_texture.cc:427
void GPU_texture_image_bind(GPUTexture *tex, int unit)
Definition: gpu_texture.cc:432
void GPU_texture_bind_ex(GPUTexture *tex, eGPUSamplerState state, int unit, const bool set_number)
void GPU_uniformbuf_unbind_all(void)
void GPU_uniformbuf_unbind(GPUUniformBuf *ubo)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
struct GPUVertBuf GPUVertBuf
void * GPU_vertbuf_get_data(const GPUVertBuf *verts)
ATTR_WARN_UNUSED_RESULT const BMVert * v
struct DRW_Global G_draw
Definition: draw_common.c:45
void DRW_debug_sphere(const float center[3], const float radius, const float color[4])
Definition: draw_debug.c:126
DRWSparseUniformBuf * DRW_uniform_attrs_pool_find_ubo(GHash *table, struct GPUUniformAttrList *key)
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
DRWManager DST
Definition: draw_manager.c:111
@ DRW_UNIFORM_BLOCK_OBINFOS
Definition: draw_manager.h:291
@ DRW_UNIFORM_TFEEDBACK_TARGET
Definition: draw_manager.h:288
@ DRW_UNIFORM_TEXTURE_REF
Definition: draw_manager.h:283
@ DRW_UNIFORM_MODEL_MATRIX
Definition: draw_manager.h:297
@ DRW_UNIFORM_FLOAT_COPY
Definition: draw_manager.h:281
@ DRW_UNIFORM_MODEL_MATRIX_INVERSE
Definition: draw_manager.h:298
@ DRW_UNIFORM_FLOAT
Definition: draw_manager.h:280
@ DRW_UNIFORM_BASE_INSTANCE
Definition: draw_manager.h:296
@ DRW_UNIFORM_BLOCK_OBMATS
Definition: draw_manager.h:290
@ DRW_UNIFORM_IMAGE_REF
Definition: draw_manager.h:285
@ DRW_UNIFORM_RESOURCE_ID
Definition: draw_manager.h:294
@ DRW_UNIFORM_BLOCK
Definition: draw_manager.h:286
@ DRW_UNIFORM_TEXTURE
Definition: draw_manager.h:282
@ DRW_UNIFORM_RESOURCE_CHUNK
Definition: draw_manager.h:293
@ DRW_UNIFORM_IMAGE
Definition: draw_manager.h:284
@ DRW_UNIFORM_BLOCK_OBATTRS
Definition: draw_manager.h:292
@ DRW_UNIFORM_INT
Definition: draw_manager.h:278
@ DRW_UNIFORM_BLOCK_REF
Definition: draw_manager.h:287
@ DRW_UNIFORM_INT_COPY
Definition: draw_manager.h:279
BLI_INLINE uint32_t DRW_handle_chunk_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:138
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
#define USE_BATCHING
Definition: draw_manager.h:53
BLI_INLINE void * DRW_memblock_elem_from_handle(struct BLI_memblock *memblock, const DRWResourceHandle *handle)
Definition: draw_manager.h:158
eDRWCommandType
Definition: draw_manager.h:183
@ DRW_CMD_DRAW
Definition: draw_manager.h:185
@ DRW_CMD_DRWSTATE
Definition: draw_manager.h:192
@ DRW_CMD_DRAW_RANGE
Definition: draw_manager.h:186
@ DRW_CMD_CLEAR
Definition: draw_manager.h:191
@ DRW_CMD_STENCIL
Definition: draw_manager.h:193
@ DRW_CMD_DRAW_INSTANCE_RANGE
Definition: draw_manager.h:188
@ DRW_CMD_DRAW_PROCEDURAL
Definition: draw_manager.h:189
@ DRW_CMD_SELECTID
Definition: draw_manager.h:194
@ DRW_CMD_DRAW_INSTANCE
Definition: draw_manager.h:187
BLI_INLINE uint32_t DRW_handle_negative_scale_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:133
BLI_INLINE uint32_t DRW_handle_id_get(const DRWResourceHandle *handle)
Definition: draw_manager.h:143
uint32_t DRWResourceHandle
Definition: draw_manager.h:131
BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup, DRWResourceHandle *handle, float obmat_loc, float obinv_loc)
void DRW_view_set_active(DRWView *view)
static void draw_call_single_do(DRWShadingGroup *shgroup, DRWCommandsState *state, GPUBatch *batch, DRWResourceHandle handle, int vert_first, int vert_count, int inst_first, int inst_count, bool do_base_instance)
bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState *state)
static DRWCommand * draw_command_iter_step(DRWCommandIterator *iter, eDRWCommandType *cmd_type)
static bool draw_culling_sphere_test(const BoundSphere *frustum_bsphere, const float(*frustum_planes)[4], const BoundSphere *bsphere)
static void drw_state_validate(void)
static void draw_update_uniforms(DRWShadingGroup *shgroup, DRWCommandsState *state, bool *use_tfeedback)
bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup, GPUBatch *geom, int vert_first, int vert_count, int inst_first, int inst_count, int baseinst_loc)
static void draw_call_batching_flush(DRWShadingGroup *shgroup, DRWCommandsState *state)
BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup, DRWCommandsState *state, GPUBatch *batch, const DRWResourceHandle *handle)
void DRW_draw_pass(DRWPass *pass)
static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4])
void drw_state_set(DRWState state)
void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4])
static void draw_call_batching_do(DRWShadingGroup *shgroup, DRWCommandsState *state, DRWCommandDraw *call)
static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_mask)
void DRW_culling_frustum_corners_get(const DRWView *view, BoundBox *corners)
static void draw_command_iter_begin(DRWCommandIterator *iter, DRWShadingGroup *shgroup)
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
void DRW_state_reset(void)
struct DRWCommandsState DRWCommandsState
static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
void DRW_state_reset_ex(DRWState state)
static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHandle *handle)
static void drw_update_view(void)
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *state)
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, GPUBatch *geom)
void DRW_state_lock(DRWState state)
bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
static bool draw_culling_box_test(const float(*frustum_planes)[4], const BoundBox *bbox)
bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
void DRW_select_load_id(uint id)
struct DRWCommandIterator DRWCommandIterator
static void draw_compute_culling(DRWView *view)
static void draw_call_batching_start(DRWCommandsState *state)
void DRW_stats_query_end(void)
void DRW_stats_query_start(const char *name)
GPUBatch * batch
Definition: drawnode.c:3779
void GPU_framebuffer_clear(GPUFrameBuffer *gpu_fb, eGPUFrameBufferBits buffers, const float clear_col[4], float clear_depth, uint clear_stencil)
int count
static ulong state[N]
static unsigned c
Definition: RandGen.cpp:97
#define min(a, b)
Definition: sort.c:51
float vec[8][3]
float center[3]
Definition: DRW_render.h:87
float radius
Definition: DRW_render.h:87
struct DRWCommandChunk * next
Definition: draw_manager.h:442
uint32_t command_len
Definition: draw_manager.h:443
uint32_t command_used
Definition: draw_manager.h:444
DRWCommand commands[96]
Definition: draw_manager.h:448
uint64_t command_type[6]
Definition: draw_manager.h:446
eGPUFrameBufferBits clear_channels
Definition: draw_manager.h:251
DRWResourceHandle handle
Definition: draw_manager.h:222
DRWResourceHandle handle
Definition: draw_manager.h:215
DRWResourceHandle handle
Definition: draw_manager.h:229
DRWResourceHandle handle
Definition: draw_manager.h:208
GPUBatch * batch
Definition: draw_manager.h:201
DRWResourceHandle handle
Definition: draw_manager.h:202
DRWCommandChunk * curr_chunk
GPUVertBuf * select_buf
Definition: draw_manager.h:246
GPUVertBuf * select_buf
DRWSparseUniformBuf * obattrs_ubo
DRWState drw_state_disabled
DRWState drw_state_enabled
BoundSphere bsphere
Definition: draw_manager.h:106
uint select_id
Definition: draw_manager.h:559
DRWView * view_active
Definition: draw_manager.h:551
DRWView * view_previous
Definition: draw_manager.h:552
bool buffer_finish_called
Definition: draw_manager.h:548
DRWState state
Definition: draw_manager.h:517
GPUBatch * batch
Definition: draw_manager.h:514
GPUShader * shader
Definition: draw_manager.h:513
ViewportMemoryPool * vmempool
Definition: draw_manager.h:490
GPUDrawList * draw_list
Definition: draw_manager.h:576
DRWState state_lock
Definition: draw_manager.h:518
DRWView * view_default
Definition: draw_manager.h:550
DRWViewUboStorage view_storage_cpy
Definition: draw_manager.h:556
float modelinverse[4][4]
Definition: draw_manager.h:168
float model[4][4]
Definition: draw_manager.h:167
DRWState state
Definition: draw_manager.h:379
char name[MAX_PASS_NAME]
Definition: draw_manager.h:380
DRWShadingGroup * last
Definition: draw_manager.h:368
DRWShadingGroup * first
Definition: draw_manager.h:367
DRWPass * original
Definition: draw_manager.h:374
struct DRWPass::@294 shgroups
DRWPass * next
Definition: draw_manager.h:376
struct DRWCommandChunk * first
Definition: draw_manager.h:341
struct DRWUniformChunk * uniforms
Definition: draw_manager.h:337
DRWShadingGroup * next
Definition: draw_manager.h:334
struct DRWShadingGroup::@288 cmd
GPUShader * shader
Definition: draw_manager.h:336
struct DRWUniformChunk * next
Definition: draw_manager.h:435
int ivalue[4]
Definition: draw_manager.h:323
uint8_t arraysize
Definition: draw_manager.h:330
struct GPUUniformAttrList * uniform_attrs
Definition: draw_manager.h:325
GPUTexture ** texture_ref
Definition: draw_manager.h:311
GPUUniformBuf ** block_ref
Definition: draw_manager.h:318
GPUUniformBuf * block
Definition: draw_manager.h:317
uint8_t type
Definition: draw_manager.h:328
uint8_t length
Definition: draw_manager.h:329
GPUTexture * texture
Definition: draw_manager.h:310
eGPUSamplerState sampler_state
Definition: draw_manager.h:313
float fvalue[4]
Definition: draw_manager.h:321
const void * pvalue
Definition: draw_manager.h:306
DRWViewUboStorage storage
Definition: draw_manager.h:407
int clip_planes_len
Definition: draw_manager.h:409
bool is_dirty
Definition: draw_manager.h:411
bool is_inverted
Definition: draw_manager.h:413
struct GPUUniformBuf * view_ubo
Definition: draw_common.h:215
struct BLI_memblock * obmats
Definition: GPU_viewport.h:51
struct BLI_memblock * cullstates
Definition: GPU_viewport.h:53
struct GPUUniformBuf ** matrices_ubo
Definition: GPU_viewport.h:59
struct GHash * obattrs_ubo_pool
Definition: GPU_viewport.h:61
struct GPUUniformBuf ** obinfos_ubo
Definition: GPU_viewport.h:60
static int blend(const Tex *tex, const float texvec[3], TexResult *texres)
float max
DRWCommandSetStencil stencil
Definition: draw_manager.h:264
DRWCommandDraw draw
Definition: draw_manager.h:258
DRWCommandDrawInstance instance
Definition: draw_manager.h:260
DRWCommandDrawRange range
Definition: draw_manager.h:259
DRWCommandSetMutableState state
Definition: draw_manager.h:263
DRWCommandDrawInstanceRange instance_range
Definition: draw_manager.h:261
DRWCommandClear clear
Definition: draw_manager.h:266
DRWCommandDrawProcedural procedural
Definition: draw_manager.h:262
DRWCommandSetSelectID select_id
Definition: draw_manager.h:265
#define G(x, y, z)