Blender  V2.93
draw_cache_impl_mesh.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 by Blender Foundation.
17  * All rights reserved.
18  */
19 
26 #include "MEM_guardedalloc.h"
27 
28 #include "BLI_alloca.h"
29 #include "BLI_bitmap.h"
30 #include "BLI_buffer.h"
31 #include "BLI_edgehash.h"
32 #include "BLI_listbase.h"
33 #include "BLI_math_bits.h"
34 #include "BLI_math_vector.h"
35 #include "BLI_string.h"
36 #include "BLI_task.h"
37 #include "BLI_utildefines.h"
38 
39 #include "DNA_mesh_types.h"
40 #include "DNA_meshdata_types.h"
41 #include "DNA_object_types.h"
42 #include "DNA_scene_types.h"
43 
44 #include "BKE_customdata.h"
45 #include "BKE_deform.h"
46 #include "BKE_editmesh.h"
47 #include "BKE_editmesh_cache.h"
48 #include "BKE_editmesh_tangent.h"
49 #include "BKE_mesh.h"
50 #include "BKE_mesh_runtime.h"
51 #include "BKE_mesh_tangent.h"
52 #include "BKE_modifier.h"
53 #include "BKE_object_deform.h"
54 #include "BKE_paint.h"
55 #include "BKE_pbvh.h"
56 
57 #include "atomic_ops.h"
58 
59 #include "bmesh.h"
60 
61 #include "GPU_batch.h"
62 #include "GPU_material.h"
63 
64 #include "DRW_render.h"
65 
66 #include "ED_mesh.h"
67 #include "ED_uvedit.h"
68 
69 #include "draw_cache_extract.h"
70 #include "draw_cache_inline.h"
71 
72 #include "draw_cache_impl.h" /* own include */
73 
74 static void mesh_batch_cache_clear(Mesh *me);
75 
76 /* Return true is all layers in _b_ are inside _a_. */
78 {
79  return (*((uint64_t *)&a) & *((uint64_t *)&b)) == *((uint64_t *)&b);
80 }
81 
83 {
84  return *((uint64_t *)&a) == *((uint64_t *)&b);
85 }
86 
88 {
89  uint32_t *a_p = (uint32_t *)a;
90  uint32_t *b_p = (uint32_t *)&b;
91  atomic_fetch_and_or_uint32(a_p, *b_p);
92  atomic_fetch_and_or_uint32(a_p + 1, *(b_p + 1));
93 }
94 
96 {
97  *((uint64_t *)a) = 0;
98 }
99 
101 {
102  return (me->edit_mesh && me->edit_mesh->mesh_eval_final) ? me->edit_mesh->mesh_eval_final : me;
103 }
104 
105 static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
106 {
107  cd_used->edit_uv = 1;
108 }
109 
111 {
112  switch ((eMeshWrapperType)me->runtime.wrapper_type) {
114  return &me->ldata;
115  break;
117  return &me->edit_mesh->bm->ldata;
118  break;
119  }
120 
121  BLI_assert(0);
122  return &me->ldata;
123 }
124 
126 {
127  switch ((eMeshWrapperType)me->runtime.wrapper_type) {
129  return &me->vdata;
130  break;
132  return &me->edit_mesh->bm->vdata;
133  break;
134  }
135 
136  BLI_assert(0);
137  return &me->vdata;
138 }
139 
140 static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
141 {
142  const Mesh *me_final = editmesh_final_or_this(me);
143  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
144  int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
145  if (layer != -1) {
146  cd_used->uv |= (1 << layer);
147  }
148 }
149 
150 static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
151 {
152  const Mesh *me_final = editmesh_final_or_this(me);
153  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
154  int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
155  if (layer != -1) {
156  cd_used->uv |= (1 << layer);
157  }
158 }
159 
160 static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
161 {
162  const Mesh *me_final = editmesh_final_or_this(me);
163  const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
164 
165  int layer = CustomData_get_active_layer(cd_vdata, CD_PROP_COLOR);
166  if (layer != -1) {
167  cd_used->sculpt_vcol |= (1 << layer);
168  }
169 }
170 
172 {
173  const Mesh *me_final = editmesh_final_or_this(me);
174  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
175 
176  int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
177  if (layer != -1) {
178  cd_used->vcol |= (1 << layer);
179  }
180 }
181 
183  struct GPUMaterial **gpumat_array,
184  int gpumat_array_len)
185 {
186  const Mesh *me_final = editmesh_final_or_this(me);
187  const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
188  const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
189 
190  /* See: DM_vertex_attributes_from_gpu for similar logic */
191  DRW_MeshCDMask cd_used;
192  mesh_cd_layers_type_clear(&cd_used);
193 
194  for (int i = 0; i < gpumat_array_len; i++) {
195  GPUMaterial *gpumat = gpumat_array[i];
196  if (gpumat) {
197  ListBase gpu_attrs = GPU_material_attributes(gpumat);
198  LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
199  const char *name = gpu_attr->name;
200  int type = gpu_attr->type;
201  int layer = -1;
202 
203  if (type == CD_AUTO_FROM_NAME) {
204  /* We need to deduct what exact layer is used.
205  *
206  * We do it based on the specified name.
207  */
208  if (name[0] != '\0') {
209  layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
210  type = CD_MTFACE;
211 
212  if (layer == -1) {
213  if (U.experimental.use_sculpt_vertex_colors) {
214  layer = CustomData_get_named_layer(cd_vdata, CD_PROP_COLOR, name);
216  }
217  }
218 
219  if (layer == -1) {
220  layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name);
221  type = CD_MCOL;
222  }
223 
224 #if 0 /* Tangents are always from UV's - this will never happen. */
225  if (layer == -1) {
226  layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
227  type = CD_TANGENT;
228  }
229 #endif
230  if (layer == -1) {
231  continue;
232  }
233  }
234  else {
235  /* Fall back to the UV layer, which matches old behavior. */
236  type = CD_MTFACE;
237  }
238  }
239 
240  switch (type) {
241  case CD_MTFACE: {
242  if (layer == -1) {
243  layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
245  }
246  if (layer != -1) {
247  cd_used.uv |= (1 << layer);
248  }
249  break;
250  }
251  case CD_TANGENT: {
252  if (layer == -1) {
253  layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
255 
256  /* Only fallback to orco (below) when we have no UV layers, see: T56545 */
257  if (layer == -1 && name[0] != '\0') {
258  layer = CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
259  }
260  }
261  if (layer != -1) {
262  cd_used.tan |= (1 << layer);
263  }
264  else {
265  /* no UV layers at all => requesting orco */
266  cd_used.tan_orco = 1;
267  cd_used.orco = 1;
268  }
269  break;
270  }
271  case CD_PROP_COLOR: {
272  /* Sculpt Vertex Colors */
273  bool use_mloop_cols = false;
274  if (layer == -1) {
275  layer = (name[0] != '\0') ?
276  CustomData_get_named_layer(cd_vdata, CD_PROP_COLOR, name) :
278  /* Fallback to Vertex Color data */
279  if (layer == -1) {
280  layer = (name[0] != '\0') ?
281  CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
283  use_mloop_cols = true;
284  }
285  }
286  if (layer != -1) {
287  if (use_mloop_cols) {
288  cd_used.vcol |= (1 << layer);
289  }
290  else {
291  cd_used.sculpt_vcol |= (1 << layer);
292  }
293  }
294  break;
295  }
296  case CD_MCOL: {
297  /* Vertex Color Data */
298  if (layer == -1) {
299  layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
301  }
302  if (layer != -1) {
303  cd_used.vcol |= (1 << layer);
304  }
305 
306  break;
307  }
308  case CD_ORCO: {
309  cd_used.orco = 1;
310  break;
311  }
312  }
313  }
314  }
315  }
316  return cd_used;
317 }
318 
321 /* ---------------------------------------------------------------------- */
327 {
328  MEM_SAFE_FREE(wstate->defgroup_sel);
331 
332  memset(wstate, 0, sizeof(*wstate));
333 
334  wstate->defgroup_active = -1;
335 }
336 
338 static void drw_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst,
339  const struct DRW_MeshWeightState *wstate_src)
340 {
341  MEM_SAFE_FREE(wstate_dst->defgroup_sel);
342  MEM_SAFE_FREE(wstate_dst->defgroup_locked);
343  MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
344 
345  memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
346 
347  if (wstate_src->defgroup_sel) {
348  wstate_dst->defgroup_sel = MEM_dupallocN(wstate_src->defgroup_sel);
349  }
350  if (wstate_src->defgroup_locked) {
351  wstate_dst->defgroup_locked = MEM_dupallocN(wstate_src->defgroup_locked);
352  }
353  if (wstate_src->defgroup_unlocked) {
354  wstate_dst->defgroup_unlocked = MEM_dupallocN(wstate_src->defgroup_unlocked);
355  }
356 }
357 
358 static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
359 {
360  return ((!array1 && !array2) ||
361  (array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
362 }
363 
366  const struct DRW_MeshWeightState *b)
367 {
368  return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
369  a->flags == b->flags && a->alert_mode == b->alert_mode &&
370  a->defgroup_sel_count == b->defgroup_sel_count &&
371  drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
372  drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
373  drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
374 }
375 
377  Mesh *me,
378  const ToolSettings *ts,
379  bool paint_mode,
380  struct DRW_MeshWeightState *wstate)
381 {
382  /* Extract complete vertex weight group selection state and mode flags. */
383  memset(wstate, 0, sizeof(*wstate));
384 
385  wstate->defgroup_active = ob->actdef - 1;
386  wstate->defgroup_len = BLI_listbase_count(&ob->defbase);
387 
388  wstate->alert_mode = ts->weightuser;
389 
390  if (paint_mode && ts->multipaint) {
391  /* Multi-paint needs to know all selected bones, not just the active group.
392  * This is actually a relatively expensive operation, but caching would be difficult. */
394  ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
395 
396  if (wstate->defgroup_sel_count > 1) {
399 
402  wstate->defgroup_len,
403  wstate->defgroup_sel,
404  wstate->defgroup_sel,
405  &wstate->defgroup_sel_count);
406  }
407  }
408  /* With only one selected bone Multi-paint reverts to regular mode. */
409  else {
410  wstate->defgroup_sel_count = 0;
411  MEM_SAFE_FREE(wstate->defgroup_sel);
412  }
413  }
414 
415  if (paint_mode && ts->wpaint_lock_relative) {
416  /* Set of locked vertex groups for the lock relative mode. */
419 
420  /* Check that a deform group is active, and none of selected groups are locked. */
422  wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
424  wstate->defgroup_locked,
425  wstate->defgroup_sel,
426  wstate->defgroup_sel_count)) {
428 
429  /* Compute the set of locked and unlocked deform vertex groups. */
431  wstate->defgroup_locked,
432  wstate->defgroup_unlocked,
433  wstate->defgroup_locked, /* out */
434  wstate->defgroup_unlocked);
435  }
436  else {
439  }
440  }
441 }
442 
445 /* ---------------------------------------------------------------------- */
450 {
451  atomic_fetch_and_or_uint32((uint32_t *)(&cache->batch_requested), *(uint32_t *)&new_flag);
452 }
453 
454 /* GPUBatch cache management. */
455 
456 static bool mesh_batch_cache_valid(Mesh *me)
457 {
458  MeshBatchCache *cache = me->runtime.batch_cache;
459 
460  if (cache == NULL) {
461  return false;
462  }
463 
464  if (cache->is_editmode != (me->edit_mesh != NULL)) {
465  return false;
466  }
467 
468  if (cache->is_dirty) {
469  return false;
470  }
471 
472  if (cache->mat_len != mesh_render_mat_len_get(me)) {
473  return false;
474  }
475 
476  return true;
477 }
478 
479 static void mesh_batch_cache_init(Mesh *me)
480 {
481  MeshBatchCache *cache = me->runtime.batch_cache;
482 
483  if (!cache) {
484  cache = me->runtime.batch_cache = MEM_callocN(sizeof(*cache), __func__);
485  }
486  else {
487  memset(cache, 0, sizeof(*cache));
488  }
489 
490  cache->is_editmode = me->edit_mesh != NULL;
491 
492  if (cache->is_editmode == false) {
493  // cache->edge_len = mesh_render_edges_len_get(me);
494  // cache->tri_len = mesh_render_looptri_len_get(me);
495  // cache->poly_len = mesh_render_polys_len_get(me);
496  // cache->vert_len = mesh_render_verts_len_get(me);
497  }
498 
499  cache->mat_len = mesh_render_mat_len_get(me);
500  cache->surface_per_mat = MEM_callocN(sizeof(*cache->surface_per_mat) * cache->mat_len, __func__);
501  cache->final.tris_per_mat = MEM_callocN(sizeof(*cache->final.tris_per_mat) * cache->mat_len,
502  __func__);
503 
504  cache->is_dirty = false;
505  cache->batch_ready = 0;
506  cache->batch_requested = 0;
507 
509 }
510 
512 {
513  if (!mesh_batch_cache_valid(me)) {
516  }
517 }
518 
520 {
521  return me->runtime.batch_cache;
522 }
523 
525  const struct DRW_MeshWeightState *wstate)
526 {
527  if (!drw_mesh_weight_state_compare(&cache->weight_state, wstate)) {
528  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
529  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.weights);
530  }
532 
533  cache->batch_ready &= ~MBC_SURFACE_WEIGHTS;
534 
536  }
537 }
538 
540 {
543  for (int i = 0; i < cache->mat_len; i++) {
545  }
546 }
547 
549 {
551  for (int i = 0; i < cache->mat_len; i++) {
553  }
554  cache->batch_ready &= ~MBC_SURFACE;
555 }
556 
558 {
559  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
560  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv);
561  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.tan);
562  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.vcol);
563  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.orco);
564  }
565  /* Discard batches using vbo.uv. */
571 
574 }
575 
577 {
578  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
579  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_stretch_angle);
580  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_stretch_area);
581  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv);
582  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
583  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_uv);
584  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
585  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_tris);
586  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_lines);
587  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_points);
588  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_fdots);
589  }
597 
598  cache->tot_area = 0.0f;
599  cache->tot_uv_area = 0.0f;
600 
601  cache->batch_ready &= ~MBC_EDITUV;
602 
603  /* We discarded the vbo.uv so we need to reset the cd_used flag. */
604  cache->cd_used.uv = 0;
605  cache->cd_used.edit_uv = 0;
606 
607  /* Discard other batches that uses vbo.uv */
609 }
610 
612 {
613  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
614  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
615  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
616  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_tris);
617  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_lines);
618  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_points);
619  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_fdots);
620  }
628  cache->batch_ready &= ~MBC_EDITUV;
629 }
630 
632 {
633  MeshBatchCache *cache = me->runtime.batch_cache;
634  if (cache == NULL) {
635  return;
636  }
637  switch (mode) {
639  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
640  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edit_data);
641  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_nor);
642  }
656  /* Because visible UVs depends on edit mode selection, discard topology. */
658  break;
660  /* Paint mode selection flag is packed inside the nor attribute.
661  * Note that it can be slow if auto smooth is enabled. (see T63946) */
662  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
663  GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.lines_paint_mask);
664  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.pos_nor);
665  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.lnor);
666  }
668  /* Discard batches using vbo.pos_nor. */
677  /* Discard batches using vbo.lnor. */
681  break;
683  cache->is_dirty = true;
684  break;
688  break;
691  break;
693  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
694  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
695  GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
696  }
703  cache->batch_ready &= ~MBC_EDITUV;
704  break;
705  default:
706  BLI_assert(0);
707  }
708 }
709 
710 static void mesh_batch_cache_clear(Mesh *me)
711 {
712  MeshBatchCache *cache = me->runtime.batch_cache;
713  if (!cache) {
714  return;
715  }
716  FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
717  GPUVertBuf **vbos = (GPUVertBuf **)&mbufcache->vbo;
718  GPUIndexBuf **ibos = (GPUIndexBuf **)&mbufcache->ibo;
719  for (int i = 0; i < sizeof(mbufcache->vbo) / sizeof(void *); i++) {
720  GPU_VERTBUF_DISCARD_SAFE(vbos[i]);
721  }
722  for (int i = 0; i < sizeof(mbufcache->ibo) / sizeof(void *); i++) {
723  GPU_INDEXBUF_DISCARD_SAFE(ibos[i]);
724  }
725  }
726 
727  for (int i = 0; i < cache->mat_len; i++) {
729  }
731 
732  for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
733  GPUBatch **batch = (GPUBatch **)&cache->batch;
735  }
736 
740  cache->mat_len = 0;
741 
742  cache->batch_ready = 0;
744 }
745 
747 {
750 }
751 
754 /* ---------------------------------------------------------------------- */
759 {
760  DRW_MeshCDMask cd_needed;
761  mesh_cd_layers_type_clear(&cd_needed);
762  mesh_cd_calc_active_uv_layer(me, &cd_needed);
763 
764  BLI_assert(cd_needed.uv != 0 &&
765  "No uv layer available in texpaint, but batches requested anyway!");
766 
767  mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
768  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
769 }
770 
772 {
773  DRW_MeshCDMask cd_needed;
774  mesh_cd_layers_type_clear(&cd_needed);
775  mesh_cd_calc_active_mloopcol_layer(me, &cd_needed);
776 
777  BLI_assert(cd_needed.vcol != 0 &&
778  "No MLOOPCOL layer available in vertpaint, but batches requested anyway!");
779 
780  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
781 }
782 
784 {
785  DRW_MeshCDMask cd_needed;
786  mesh_cd_layers_type_clear(&cd_needed);
787  mesh_cd_calc_active_vcol_layer(me, &cd_needed);
788 
789  BLI_assert(cd_needed.sculpt_vcol != 0 &&
790  "No MPropCol layer available in Sculpt, but batches requested anyway!");
791 
792  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
793 }
794 
796 {
799  return DRW_batch_request(&cache->batch.all_verts);
800 }
801 
803 {
806  return DRW_batch_request(&cache->batch.all_edges);
807 }
808 
810 {
813  return cache->batch.surface;
814 }
815 
817 {
820  if (cache->no_loose_wire) {
821  return NULL;
822  }
823 
824  return DRW_batch_request(&cache->batch.loose_edges);
825 }
826 
828 {
831  return DRW_batch_request(&cache->batch.surface_weights);
832 }
833 
835 {
838  /* Even if is_manifold is not correct (not updated),
839  * the default (not manifold) is just the worst case. */
840  if (r_is_manifold) {
841  *r_is_manifold = cache->is_manifold;
842  }
843  return DRW_batch_request(&cache->batch.edge_detection);
844 }
845 
847 {
850  return DRW_batch_request(&cache->batch.wire_edges);
851 }
852 
854 {
858 }
859 
861  struct GPUMaterial **gpumat_array,
862  uint gpumat_array_len)
863 {
865  DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(me, gpumat_array, gpumat_array_len);
866 
867  BLI_assert(gpumat_array_len == cache->mat_len);
868 
869  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
871  return cache->surface_per_mat;
872 }
873 
875 {
877  texpaint_request_active_uv(cache, me);
879  return cache->surface_per_mat;
880 }
881 
883 {
885  texpaint_request_active_uv(cache, me);
887  return cache->batch.surface;
888 }
889 
891 {
893  texpaint_request_active_vcol(cache, me);
895  return cache->batch.surface;
896 }
897 
899 {
901  sculpt_request_active_vcol(cache, me);
903  return cache->batch.surface;
904 }
905 
907 {
908  return mesh_render_mat_len_get(me);
909 }
910 
912 {
914 
915  cache->cd_needed.sculpt_overlays = 1;
918 
919  return cache->batch.sculpt_overlays;
920 }
921 
924 /* ---------------------------------------------------------------------- */
929 {
931  /* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
933 
935  return cache->final.vbo.pos_nor;
936 }
937 
940 /* ---------------------------------------------------------------------- */
945 {
948  return DRW_batch_request(&cache->batch.edit_triangles);
949 }
950 
952 {
955  return DRW_batch_request(&cache->batch.edit_edges);
956 }
957 
959 {
962  return DRW_batch_request(&cache->batch.edit_vertices);
963 }
964 
966 {
969  return DRW_batch_request(&cache->batch.edit_vnor);
970 }
971 
973 {
976  return DRW_batch_request(&cache->batch.edit_lnor);
977 }
978 
980 {
983  return DRW_batch_request(&cache->batch.edit_fdots);
984 }
985 
987 {
990  return DRW_batch_request(&cache->batch.edit_skin_roots);
991 }
992 
995 /* ---------------------------------------------------------------------- */
1000 {
1001  MeshBatchCache *cache = mesh_batch_cache_get(me);
1004 }
1005 
1007 {
1008  MeshBatchCache *cache = mesh_batch_cache_get(me);
1011 }
1012 
1014 {
1015  MeshBatchCache *cache = mesh_batch_cache_get(me);
1018 }
1019 
1021 {
1022  MeshBatchCache *cache = mesh_batch_cache_get(me);
1025 }
1026 
1029 /* ---------------------------------------------------------------------- */
1034 {
1035  DRW_MeshCDMask cd_needed;
1036  mesh_cd_layers_type_clear(&cd_needed);
1037  mesh_cd_calc_active_uv_layer(me, &cd_needed);
1038  mesh_cd_calc_edit_uv_layer(me, &cd_needed);
1039 
1040  BLI_assert(cd_needed.edit_uv != 0 &&
1041  "No uv layer available in edituv, but batches requested anyway!");
1042 
1043  mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
1044  mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
1045 }
1046 
1047 /* Creates the GPUBatch for drawing the UV Stretching Area Overlay.
1048  * Optional retrieves the total area or total uv area of the mesh.
1049  *
1050  * The `cache->tot_area` and cache->tot_uv_area` update are calculation are
1051  * only valid after calling `DRW_mesh_batch_cache_create_requested`. */
1053  float **tot_area,
1054  float **tot_uv_area)
1055 {
1056  MeshBatchCache *cache = mesh_batch_cache_get(me);
1057  edituv_request_active_uv(cache, me);
1059 
1060  if (tot_area != NULL) {
1061  *tot_area = &cache->tot_area;
1062  }
1063  if (tot_uv_area != NULL) {
1064  *tot_uv_area = &cache->tot_uv_area;
1065  }
1067 }
1068 
1070 {
1071  MeshBatchCache *cache = mesh_batch_cache_get(me);
1072  edituv_request_active_uv(cache, me);
1075 }
1076 
1078 {
1079  MeshBatchCache *cache = mesh_batch_cache_get(me);
1080  edituv_request_active_uv(cache, me);
1082  return DRW_batch_request(&cache->batch.edituv_faces);
1083 }
1084 
1086 {
1087  MeshBatchCache *cache = mesh_batch_cache_get(me);
1088  edituv_request_active_uv(cache, me);
1090  return DRW_batch_request(&cache->batch.edituv_edges);
1091 }
1092 
1094 {
1095  MeshBatchCache *cache = mesh_batch_cache_get(me);
1096  edituv_request_active_uv(cache, me);
1098  return DRW_batch_request(&cache->batch.edituv_verts);
1099 }
1100 
1102 {
1103  MeshBatchCache *cache = mesh_batch_cache_get(me);
1104  edituv_request_active_uv(cache, me);
1106  return DRW_batch_request(&cache->batch.edituv_fdots);
1107 }
1108 
1110 {
1111  MeshBatchCache *cache = mesh_batch_cache_get(me);
1112  edituv_request_active_uv(cache, me);
1114  return DRW_batch_request(&cache->batch.wire_loops_uvs);
1115 }
1116 
1118 {
1119  MeshBatchCache *cache = mesh_batch_cache_get(me);
1120  texpaint_request_active_uv(cache, me);
1122  return DRW_batch_request(&cache->batch.wire_loops);
1123 }
1124 
1127 /* ---------------------------------------------------------------------- */
1131 /* Thread safety need to be assured by caller. Don't call this during drawing.
1132  * Note: For now this only free the shading batches / vbo if any cd layers is
1133  * not needed anymore. */
1135 {
1136  MeshBatchCache *cache = me->runtime.batch_cache;
1137 
1138  if (cache == NULL) {
1139  return;
1140  }
1141 
1142  if (mesh_cd_layers_type_equal(cache->cd_used_over_time, cache->cd_used)) {
1143  cache->lastmatch = ctime;
1144  }
1145 
1146  if (ctime - cache->lastmatch > U.vbotimeout) {
1148  }
1149 
1151 }
1152 
1153 #ifdef DEBUG
1154 /* Sanity check function to test if all requested batches are available. */
1155 static void drw_mesh_batch_cache_check_available(struct TaskGraph *task_graph, Mesh *me)
1156 {
1157  MeshBatchCache *cache = mesh_batch_cache_get(me);
1158  /* Make sure all requested batches have been setup. */
1159  /* Note: The next line creates a different scheduling than during release builds what can lead to
1160  * some issues (See T77867 where we needed to disable this function in order to debug what was
1161  * happening in release builds). */
1162  BLI_task_graph_work_and_wait(task_graph);
1163  for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
1164  BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
1165  }
1166  for (int i = 0; i < sizeof(cache->final.vbo) / sizeof(void *); i++) {
1167  BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->final.vbo)[i]));
1168  }
1169  for (int i = 0; i < sizeof(cache->final.ibo) / sizeof(void *); i++) {
1170  BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->final.ibo)[i]));
1171  }
1172  for (int i = 0; i < sizeof(cache->cage.vbo) / sizeof(void *); i++) {
1173  BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->cage.vbo)[i]));
1174  }
1175  for (int i = 0; i < sizeof(cache->cage.ibo) / sizeof(void *); i++) {
1176  BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->cage.ibo)[i]));
1177  }
1178  for (int i = 0; i < sizeof(cache->uv_cage.vbo) / sizeof(void *); i++) {
1179  BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->uv_cage.vbo)[i]));
1180  }
1181  for (int i = 0; i < sizeof(cache->uv_cage.ibo) / sizeof(void *); i++) {
1182  BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->uv_cage.ibo)[i]));
1183  }
1184 }
1185 #endif
1186 
1187 /* Can be called for any surface type. Mesh *me is the final mesh. */
1189  Object *ob,
1190  Mesh *me,
1191  const Scene *scene,
1192  const bool is_paint_mode,
1193  const bool use_hide)
1194 {
1195  BLI_assert(task_graph);
1196  const ToolSettings *ts = NULL;
1197  if (scene) {
1198  ts = scene->toolsettings;
1199  }
1200  MeshBatchCache *cache = mesh_batch_cache_get(me);
1201  bool cd_uv_update = false;
1202 
1203  /* Early out */
1204  if (cache->batch_requested == 0) {
1205 #ifdef DEBUG
1206  drw_mesh_batch_cache_check_available(task_graph, me);
1207 #endif
1208  return;
1209  }
1210 
1211  /* Sanity check. */
1212  if ((me->edit_mesh != NULL) && (ob->mode & OB_MODE_EDIT)) {
1214  }
1215 
1216  /* Don't check `DRW_object_is_in_edit_mode(ob)` here because it means the same mesh
1217  * may draw with edit-mesh data and regular mesh data.
1218  * In this case the custom-data layers used wont always match in `me->runtime.batch_cache`.
1219  * If we want to display regular mesh data, we should have a separate cache for the edit-mesh.
1220  * See T77359. */
1221  const bool is_editmode = (me->edit_mesh != NULL) &&
1222  /* In rare cases we have the edit-mode data but not the generated cache.
1223  * This can happen when switching an objects data to a mesh which
1224  * happens to be in edit-mode in another scene, see: T82952. */
1225  (me->edit_mesh->mesh_eval_final !=
1226  NULL) /* && DRW_object_is_in_edit_mode(ob) */;
1227 
1228  /* This could be set for paint mode too, currently it's only used for edit-mode. */
1229  const bool is_mode_active = is_editmode && DRW_object_is_in_edit_mode(ob);
1230 
1231  DRWBatchFlag batch_requested = cache->batch_requested;
1232  cache->batch_requested = 0;
1233 
1234  if (batch_requested & MBC_SURFACE_WEIGHTS) {
1235  /* Check vertex weights. */
1236  if ((cache->batch.surface_weights != NULL) && (ts != NULL)) {
1237  struct DRW_MeshWeightState wstate;
1238  BLI_assert(ob->type == OB_MESH);
1239  drw_mesh_weight_state_extract(ob, me, ts, is_paint_mode, &wstate);
1240  mesh_batch_cache_check_vertex_group(cache, &wstate);
1241  drw_mesh_weight_state_copy(&cache->weight_state, &wstate);
1242  drw_mesh_weight_state_clear(&wstate);
1243  }
1244  }
1245 
1246  if (batch_requested &
1249  /* Modifiers will only generate an orco layer if the mesh is deformed. */
1250  if (cache->cd_needed.orco != 0) {
1251  /* Orco is always extracted from final mesh. */
1252  Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
1253  if (CustomData_get_layer(&me_final->vdata, CD_ORCO) == NULL) {
1254  /* Skip orco calculation */
1255  cache->cd_needed.orco = 0;
1256  }
1257  }
1258 
1259  /* Verify that all surface batches have needed attribute layers.
1260  */
1261  /* TODO(fclem): We could be a bit smarter here and only do it per
1262  * material. */
1263  bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
1264  if (cd_overlap == false) {
1265  FOREACH_MESH_BUFFER_CACHE (cache, mbuffercache) {
1266  if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv) {
1267  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.uv);
1268  cd_uv_update = true;
1269  }
1270  if ((cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
1271  cache->cd_used.tan_orco != cache->cd_needed.tan_orco) {
1272  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.tan);
1273  }
1274  if (cache->cd_used.orco != cache->cd_needed.orco) {
1275  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.orco);
1276  }
1277  if (cache->cd_used.sculpt_overlays != cache->cd_needed.sculpt_overlays) {
1278  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.sculpt_data);
1279  }
1280  if (((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) ||
1281  ((cache->cd_used.sculpt_vcol & cache->cd_needed.sculpt_vcol) !=
1282  cache->cd_needed.sculpt_vcol)) {
1283  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.vcol);
1284  }
1285  }
1286  /* We can't discard batches at this point as they have been
1287  * referenced for drawing. Just clear them in place. */
1288  for (int i = 0; i < cache->mat_len; i++) {
1290  }
1292  cache->batch_ready &= ~(MBC_SURFACE);
1293 
1294  mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
1295  }
1298  }
1299 
1300  if (batch_requested & MBC_EDITUV) {
1301  /* Discard UV batches if sync_selection changes */
1302  const bool is_uvsyncsel = ts && (ts->uv_flag & UV_SYNC_SELECTION);
1303  if (cd_uv_update || (cache->is_uvsyncsel != is_uvsyncsel)) {
1304  cache->is_uvsyncsel = is_uvsyncsel;
1305  FOREACH_MESH_BUFFER_CACHE (cache, mbuffercache) {
1306  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.edituv_data);
1307  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.fdots_uv);
1308  GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.fdots_edituv_data);
1309  GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_tris);
1310  GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_lines);
1311  GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_points);
1312  GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_fdots);
1313  }
1314  /* We only clear the batches as they may already have been
1315  * referenced. */
1323  cache->batch_ready &= ~MBC_EDITUV;
1324  }
1325  }
1326 
1327  /* Second chance to early out */
1328  if ((batch_requested & ~cache->batch_ready) == 0) {
1329 #ifdef DEBUG
1330  drw_mesh_batch_cache_check_available(task_graph, me);
1331 #endif
1332  return;
1333  }
1334 
1335  /* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-PBVH).
1336  * This makes tools that sample the surface per step get wrong normals until a redraw happens.
1337  * Normal updates should be part of the brush loop and only run during the stroke when the
1338  * brush needs to sample the surface. The drawing code should only update the normals
1339  * per redraw when smooth shading is enabled. */
1340  const bool do_update_sculpt_normals = ob->sculpt && ob->sculpt->pbvh;
1341  if (do_update_sculpt_normals) {
1342  Mesh *mesh = ob->data;
1344  }
1345 
1346  cache->batch_ready |= batch_requested;
1347 
1348  const bool do_cage = (is_editmode &&
1350 
1351  const bool do_uvcage = is_editmode && !me->edit_mesh->mesh_eval_final->runtime.is_original;
1352 
1353  MeshBufferCache *mbufcache = &cache->final;
1354 
1355  /* Initialize batches and request VBO's & IBO's. */
1357  DRW_ibo_request(cache->batch.surface, &mbufcache->ibo.tris);
1358  /* Order matters. First ones override latest VBO's attributes. */
1359  DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.lnor);
1360  DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.pos_nor);
1361  if (cache->cd_used.uv != 0) {
1362  DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.uv);
1363  }
1364  if (cache->cd_used.vcol != 0 || cache->cd_used.sculpt_vcol != 0) {
1365  DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.vcol);
1366  }
1367  }
1369  DRW_vbo_request(cache->batch.all_verts, &mbufcache->vbo.pos_nor);
1370  }
1372  DRW_ibo_request(cache->batch.sculpt_overlays, &mbufcache->ibo.tris);
1373  DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.pos_nor);
1374  DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.sculpt_data);
1375  }
1377  DRW_ibo_request(cache->batch.all_edges, &mbufcache->ibo.lines);
1378  DRW_vbo_request(cache->batch.all_edges, &mbufcache->vbo.pos_nor);
1379  }
1381  DRW_ibo_request(NULL, &mbufcache->ibo.lines);
1382  DRW_ibo_request(cache->batch.loose_edges, &mbufcache->ibo.lines_loose);
1383  DRW_vbo_request(cache->batch.loose_edges, &mbufcache->vbo.pos_nor);
1384  }
1387  DRW_vbo_request(cache->batch.edge_detection, &mbufcache->vbo.pos_nor);
1388  }
1390  DRW_ibo_request(cache->batch.surface_weights, &mbufcache->ibo.tris);
1391  DRW_vbo_request(cache->batch.surface_weights, &mbufcache->vbo.pos_nor);
1392  DRW_vbo_request(cache->batch.surface_weights, &mbufcache->vbo.weights);
1393  }
1395  DRW_ibo_request(cache->batch.wire_loops, &mbufcache->ibo.lines_paint_mask);
1396  /* Order matters. First ones override latest VBO's attributes. */
1397  DRW_vbo_request(cache->batch.wire_loops, &mbufcache->vbo.lnor);
1398  DRW_vbo_request(cache->batch.wire_loops, &mbufcache->vbo.pos_nor);
1399  }
1401  DRW_ibo_request(cache->batch.wire_edges, &mbufcache->ibo.lines);
1402  DRW_vbo_request(cache->batch.wire_edges, &mbufcache->vbo.pos_nor);
1403  DRW_vbo_request(cache->batch.wire_edges, &mbufcache->vbo.edge_fac);
1404  }
1406  DRW_ibo_request(cache->batch.wire_loops_uvs, &mbufcache->ibo.edituv_lines);
1407  /* For paint overlay. Active layer should have been queried. */
1408  if (cache->cd_used.uv != 0) {
1409  DRW_vbo_request(cache->batch.wire_loops_uvs, &mbufcache->vbo.uv);
1410  }
1411  }
1413  DRW_ibo_request(cache->batch.edit_mesh_analysis, &mbufcache->ibo.tris);
1414  DRW_vbo_request(cache->batch.edit_mesh_analysis, &mbufcache->vbo.pos_nor);
1416  }
1417 
1418  /* Per Material */
1419  for (int i = 0; i < cache->mat_len; i++) {
1421  DRW_ibo_request(cache->surface_per_mat[i], &mbufcache->tris_per_mat[i]);
1422  /* Order matters. First ones override latest VBO's attributes. */
1423  DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.lnor);
1424  DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.pos_nor);
1425  if (cache->cd_used.uv != 0) {
1426  DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.uv);
1427  }
1428  if ((cache->cd_used.tan != 0) || (cache->cd_used.tan_orco != 0)) {
1429  DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.tan);
1430  }
1431  if (cache->cd_used.vcol != 0 || cache->cd_used.sculpt_vcol != 0) {
1432  DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.vcol);
1433  }
1434  if (cache->cd_used.orco != 0) {
1435  DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.orco);
1436  }
1437  }
1438  }
1439 
1440  mbufcache = (do_cage) ? &cache->cage : &cache->final;
1441 
1442  /* Edit Mesh */
1444  DRW_ibo_request(cache->batch.edit_triangles, &mbufcache->ibo.tris);
1445  DRW_vbo_request(cache->batch.edit_triangles, &mbufcache->vbo.pos_nor);
1446  DRW_vbo_request(cache->batch.edit_triangles, &mbufcache->vbo.edit_data);
1447  }
1449  DRW_ibo_request(cache->batch.edit_vertices, &mbufcache->ibo.points);
1450  DRW_vbo_request(cache->batch.edit_vertices, &mbufcache->vbo.pos_nor);
1451  DRW_vbo_request(cache->batch.edit_vertices, &mbufcache->vbo.edit_data);
1452  }
1454  DRW_ibo_request(cache->batch.edit_edges, &mbufcache->ibo.lines);
1455  DRW_vbo_request(cache->batch.edit_edges, &mbufcache->vbo.pos_nor);
1456  DRW_vbo_request(cache->batch.edit_edges, &mbufcache->vbo.edit_data);
1457  }
1459  DRW_ibo_request(cache->batch.edit_vnor, &mbufcache->ibo.points);
1460  DRW_vbo_request(cache->batch.edit_vnor, &mbufcache->vbo.pos_nor);
1461  }
1463  DRW_ibo_request(cache->batch.edit_lnor, &mbufcache->ibo.tris);
1464  DRW_vbo_request(cache->batch.edit_lnor, &mbufcache->vbo.pos_nor);
1465  DRW_vbo_request(cache->batch.edit_lnor, &mbufcache->vbo.lnor);
1466  }
1468  DRW_ibo_request(cache->batch.edit_fdots, &mbufcache->ibo.fdots);
1469  DRW_vbo_request(cache->batch.edit_fdots, &mbufcache->vbo.fdots_pos);
1470  DRW_vbo_request(cache->batch.edit_fdots, &mbufcache->vbo.fdots_nor);
1471  }
1473  DRW_vbo_request(cache->batch.edit_skin_roots, &mbufcache->vbo.skin_roots);
1474  }
1475 
1476  /* Selection */
1478  DRW_ibo_request(cache->batch.edit_selection_verts, &mbufcache->ibo.points);
1479  DRW_vbo_request(cache->batch.edit_selection_verts, &mbufcache->vbo.pos_nor);
1480  DRW_vbo_request(cache->batch.edit_selection_verts, &mbufcache->vbo.vert_idx);
1481  }
1483  DRW_ibo_request(cache->batch.edit_selection_edges, &mbufcache->ibo.lines);
1484  DRW_vbo_request(cache->batch.edit_selection_edges, &mbufcache->vbo.pos_nor);
1485  DRW_vbo_request(cache->batch.edit_selection_edges, &mbufcache->vbo.edge_idx);
1486  }
1488  DRW_ibo_request(cache->batch.edit_selection_faces, &mbufcache->ibo.tris);
1489  DRW_vbo_request(cache->batch.edit_selection_faces, &mbufcache->vbo.pos_nor);
1490  DRW_vbo_request(cache->batch.edit_selection_faces, &mbufcache->vbo.poly_idx);
1491  }
1493  DRW_ibo_request(cache->batch.edit_selection_fdots, &mbufcache->ibo.fdots);
1495  DRW_vbo_request(cache->batch.edit_selection_fdots, &mbufcache->vbo.fdot_idx);
1496  }
1497 
1503  mbufcache = (do_uvcage) ? &cache->uv_cage : &cache->final;
1504 
1505  /* Edit UV */
1507  DRW_ibo_request(cache->batch.edituv_faces, &mbufcache->ibo.edituv_tris);
1508  DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.uv);
1509  DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.edituv_data);
1510  }
1513  DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->vbo.uv);
1516  }
1519  DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->vbo.uv);
1522  }
1524  DRW_ibo_request(cache->batch.edituv_edges, &mbufcache->ibo.edituv_lines);
1525  DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.uv);
1526  DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.edituv_data);
1527  }
1529  DRW_ibo_request(cache->batch.edituv_verts, &mbufcache->ibo.edituv_points);
1530  DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.uv);
1531  DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.edituv_data);
1532  }
1534  DRW_ibo_request(cache->batch.edituv_fdots, &mbufcache->ibo.edituv_fdots);
1535  DRW_vbo_request(cache->batch.edituv_fdots, &mbufcache->vbo.fdots_uv);
1537  }
1538 
1539  /* Meh loose Scene const correctness here. */
1540  const bool use_subsurf_fdots = scene ? BKE_modifiers_uses_subsurf_facedots(scene, ob) : false;
1541 
1542  if (do_uvcage) {
1544  cache,
1545  cache->uv_cage,
1546  me,
1547  is_editmode,
1548  is_paint_mode,
1549  is_mode_active,
1550  ob->obmat,
1551  false,
1552  true,
1553  false,
1554  &cache->cd_used,
1555  scene,
1556  ts,
1557  true);
1558  }
1559 
1560  if (do_cage) {
1562  cache,
1563  cache->cage,
1564  me,
1565  is_editmode,
1566  is_paint_mode,
1567  is_mode_active,
1568  ob->obmat,
1569  false,
1570  false,
1571  use_subsurf_fdots,
1572  &cache->cd_used,
1573  scene,
1574  ts,
1575  true);
1576  }
1577 
1579  cache,
1580  cache->final,
1581  me,
1582  is_editmode,
1583  is_paint_mode,
1584  is_mode_active,
1585  ob->obmat,
1586  true,
1587  false,
1588  use_subsurf_fdots,
1589  &cache->cd_used,
1590  scene,
1591  ts,
1592  use_hide);
1593 
1594  /* Ensure that all requested batches have finished.
1595  * Ideally we want to remove this sync, but there are cases where this doesn't work.
1596  * See T79038 for example.
1597  *
1598  * An idea to improve this is to separate the Object mode from the edit mode draw caches. And
1599  * based on the mode the correct one will be updated. Other option is to look into using
1600  * drw_batch_cache_generate_requested_delayed. */
1601  BLI_task_graph_work_and_wait(task_graph);
1602 #ifdef DEBUG
1603  drw_mesh_batch_cache_check_available(task_graph, me);
1604 #endif
1605 }
1606 
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_active_layer(const struct CustomData *data, int type)
int CustomData_get_stencil_layer(const struct CustomData *data, int type)
void * CustomData_get_layer(const struct CustomData *data, int type)
int CustomData_get_named_layer(const struct CustomData *data, int type, const char *name)
Definition: customdata.c:2365
int CustomData_get_render_layer(const struct CustomData *data, int type)
support for deformation groups and hooks.
eMeshBatchDirtyMode
@ BKE_MESH_BATCH_DIRTY_UVEDIT_ALL
@ BKE_MESH_BATCH_DIRTY_SELECT_PAINT
@ BKE_MESH_BATCH_DIRTY_SHADING
@ BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT
@ BKE_MESH_BATCH_DIRTY_ALL
@ BKE_MESH_BATCH_DIRTY_SELECT
bool BKE_modifiers_uses_subsurf_facedots(const struct Scene *scene, struct Object *ob)
Functions for dealing with objects and deform verts, used by painting and tools.
bool BKE_object_defgroup_check_lock_relative(const bool *lock_flags, const bool *validmap, int index)
void BKE_object_defgroup_split_locked_validmap(int defbase_tot, const bool *locked, const bool *deform, bool *r_locked, bool *r_unlocked)
bool * BKE_object_defgroup_lock_flags_get(struct Object *ob, const int defbase_tot)
void BKE_object_defgroup_mirror_selection(struct Object *ob, int defbase_tot, const bool *selection, bool *dg_flags_sel, int *r_dg_flags_sel_tot)
bool BKE_object_defgroup_check_lock_relative_multi(int defbase_tot, const bool *lock_flags, const bool *selected, int sel_tot)
bool * BKE_object_defgroup_validmap_get(struct Object *ob, const int defbase_tot)
bool * BKE_object_defgroup_selected_get(struct Object *ob, int defbase_tot, int *r_dg_flags_sel_tot)
A BVH for high poly meshes.
void BKE_pbvh_update_normals(PBVH *pbvh, struct SubdivCCG *subdiv_ccg)
Definition: pbvh.c:2650
#define BLI_assert(a)
Definition: BLI_assert.h:58
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
unsigned int uint
Definition: BLI_sys_types.h:83
void BLI_task_graph_work_and_wait(struct TaskGraph *task_graph)
Definition: task_graph.cc:122
#define UNUSED(x)
@ CD_PROP_COLOR
@ CD_MLOOPCOL
@ CD_MLOOPUV
@ CD_AUTO_FROM_NAME
@ CD_TANGENT
eMeshWrapperType
@ ME_WRAPPER_TYPE_MDATA
@ ME_WRAPPER_TYPE_BMESH
#define ME_USING_MIRROR_X_VERTEX_GROUPS(_me)
@ OB_MODE_EDIT
Object is a sort of wrapper for general info.
@ OB_MESH
#define UV_SYNC_SELECTION
GPUBatch
Definition: GPU_batch.h:93
#define GPU_BATCH_CLEAR_SAFE(batch)
Definition: GPU_batch.h:207
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition: GPU_batch.h:199
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
ListBase GPU_material_attributes(GPUMaterial *material)
Definition: gpu_material.c:572
@ GPU_PRIM_LINES
Definition: GPU_primitive.h:36
@ GPU_PRIM_POINTS
Definition: GPU_primitive.h:35
@ GPU_PRIM_LINES_ADJ
Definition: GPU_primitive.h:43
@ GPU_PRIM_TRIS
Definition: GPU_primitive.h:37
struct GPUVertBuf GPUVertBuf
#define GPU_VERTBUF_DISCARD_SAFE(verts)
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
unsigned int U
Definition: btGjkEpa3.h:78
Scene scene
#define FOREACH_MESH_BUFFER_CACHE(batch_cache, mbc)
@ DRW_MESH_WEIGHT_STATE_MULTIPAINT
@ DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE
@ DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE
BLI_INLINE int mesh_render_mat_len_get(Mesh *me)
#define MBC_EDITUV
DRWBatchFlag
@ MBC_EDIT_VNOR
@ MBC_EDITUV_FACES_STRETCH_AREA
@ MBC_EDIT_EDGES
@ MBC_LOOSE_EDGES
@ MBC_EDITUV_EDGES
@ MBC_EDITUV_FACEDOTS
@ MBC_SCULPT_OVERLAYS
@ MBC_EDIT_SELECTION_EDGES
@ MBC_EDGE_DETECTION
@ MBC_WIRE_LOOPS_UVS
@ MBC_EDIT_LNOR
@ MBC_ALL_EDGES
@ MBC_EDITUV_FACES
@ MBC_EDITUV_FACES_STRETCH_ANGLE
@ MBC_EDIT_VERTICES
@ MBC_EDIT_MESH_ANALYSIS
@ MBC_EDIT_SELECTION_FACEDOTS
@ MBC_EDIT_FACEDOTS
@ MBC_SURFACE_WEIGHTS
@ MBC_WIRE_LOOPS
@ MBC_EDITUV_VERTS
@ MBC_SKIN_ROOTS
@ MBC_SURFACE
@ MBC_ALL_VERTS
@ MBC_EDIT_SELECTION_VERTS
@ MBC_EDIT_TRIANGLES
@ MBC_WIRE_EDGES
@ MBC_EDIT_SELECTION_FACES
void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph, MeshBatchCache *cache, MeshBufferCache mbc, Mesh *me, const bool is_editmode, const bool is_paint_mode, const bool is_mode_active, const float obmat[4][4], const bool do_final, const bool do_uvedit, const bool use_subsurf_fdots, const DRW_MeshCDMask *cd_layer_used, const Scene *scene, const ToolSettings *ts, const bool use_hide)
static void texpaint_request_active_uv(MeshBatchCache *cache, Mesh *me)
GPUVertBuf * DRW_mesh_batch_cache_pos_vertbuf_get(Mesh *me)
static void mesh_batch_cache_request_surface_batches(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_edit_facedots(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache *cache, DRWBatchFlag new_flag)
static void sculpt_request_active_vcol(MeshBatchCache *cache, Mesh *me)
static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
GPUBatch * DRW_mesh_batch_cache_get_edituv_faces(Mesh *me)
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_edit_vnors(Mesh *me)
static void mesh_batch_cache_init(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_uv_edges(Mesh *me)
static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
GPUBatch * DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh *me)
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
void DRW_mesh_batch_cache_free(Mesh *me)
static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
GPUBatch * DRW_mesh_batch_cache_get_sculpt_overlays(Mesh *me)
GPUBatch ** DRW_mesh_batch_cache_get_surface_shaded(Mesh *me, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
BLI_INLINE const CustomData * mesh_cd_vdata_get_from_mesh(const Mesh *me)
static void edituv_request_active_uv(MeshBatchCache *cache, Mesh *me)
static MeshBatchCache * mesh_batch_cache_get(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me)
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
BLI_INLINE const Mesh * editmesh_final_or_this(const Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edit_vertices(Mesh *me)
static void mesh_batch_cache_clear(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_surface_weights(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edituv_edges(Mesh *me)
static bool drw_mesh_weight_state_compare(const struct DRW_MeshWeightState *a, const struct DRW_MeshWeightState *b)
static void drw_mesh_weight_state_clear(struct DRW_MeshWeightState *wstate)
GPUBatch * DRW_mesh_batch_cache_get_surface(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edges_with_select_id(Mesh *me)
static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_surface_vertpaint(Mesh *me)
static bool mesh_batch_cache_valid(Mesh *me)
GPUBatch ** DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edit_edges(Mesh *me)
static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
GPUBatch * DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edge_detection(Mesh *me, bool *r_is_manifold)
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_all_edges(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edituv_facedots(Mesh *me)
int DRW_mesh_material_count_get(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edit_skin_roots(Mesh *me)
static void texpaint_request_active_vcol(MeshBatchCache *cache, Mesh *me)
static void mesh_cd_calc_active_mloopcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache, const struct DRW_MeshWeightState *wstate)
GPUBatch * DRW_mesh_batch_cache_get_edituv_verts(Mesh *me)
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
GPUBatch * DRW_mesh_batch_cache_get_edit_triangles(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_surface_edges(Mesh *me)
static void drw_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst, const struct DRW_MeshWeightState *wstate_src)
void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
GPUBatch * DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Mesh *me, float **tot_area, float **tot_uv_area)
GPUBatch * DRW_mesh_batch_cache_get_edit_lnors(Mesh *me)
void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph, Object *ob, Mesh *me, const Scene *scene, const bool is_paint_mode, const bool use_hide)
static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
GPUBatch * DRW_mesh_batch_cache_get_all_verts(Mesh *me)
void DRW_mesh_batch_cache_validate(Mesh *me)
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me, struct GPUMaterial **gpumat_array, int gpumat_array_len)
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
GPUBatch * DRW_mesh_batch_cache_get_surface_sculpt(Mesh *me)
static void drw_mesh_weight_state_extract(Object *ob, Mesh *me, const ToolSettings *ts, bool paint_mode, struct DRW_MeshWeightState *wstate)
GPUBatch * DRW_mesh_batch_cache_get_wireframes_face(Mesh *me)
BLI_INLINE const CustomData * mesh_cd_ldata_get_from_mesh(const Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Mesh *me)
GPUBatch * DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh *me)
BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
BLI_INLINE bool DRW_vbo_requested(GPUVertBuf *vbo)
BLI_INLINE bool DRW_batch_requested(GPUBatch *batch, int prim_type)
BLI_INLINE void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
BLI_INLINE bool DRW_ibo_requested(GPUIndexBuf *ibo)
BLI_INLINE GPUBatch * DRW_batch_request(GPUBatch **batch)
bool DRW_object_is_in_edit_mode(const Object *ob)
Definition: draw_manager.c:204
GPUBatch * batch
Definition: drawnode.c:3779
void *(* MEM_dupallocN)(const void *vmemh)
Definition: mallocn.c:42
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
static unsigned a[3]
Definition: RandGen.cpp:92
unsigned int uint32_t
Definition: stdint.h:83
unsigned __int64 uint64_t
Definition: stdint.h:93
struct Mesh * mesh_eval_final
Definition: BKE_editmesh.h:63
struct Mesh * mesh_eval_cage
Definition: BKE_editmesh.h:63
struct BMesh * bm
Definition: BKE_editmesh.h:52
CustomData vdata
Definition: bmesh_class.h:337
CustomData ldata
Definition: bmesh_class.h:337
uint32_t sculpt_overlays
DRW_MeshCDMask cd_used
GPUBatch * edit_selection_faces
GPUBatch * edituv_edges
DRW_MeshCDMask cd_needed
DRW_MeshCDMask cd_used_over_time
GPUBatch * edit_selection_edges
GPUBatch ** surface_per_mat
GPUBatch * sculpt_overlays
MeshBufferCache uv_cage
GPUBatch * edituv_verts
MeshBufferCache cage
DRWBatchFlag batch_requested
GPUBatch * edit_skin_roots
GPUBatch * edituv_faces_stretch_angle
GPUBatch * wire_loops_uvs
GPUBatch * edit_vertices
GPUBatch * edit_selection_verts
GPUBatch * loose_edges
GPUBatch * edit_mesh_analysis
GPUBatch * edituv_faces_stretch_area
GPUBatch * edge_detection
DRWBatchFlag batch_ready
struct DRW_MeshWeightState weight_state
GPUBatch * edituv_faces
MeshBufferCache final
GPUBatch * surface_weights
GPUBatch * edit_triangles
GPUBatch * edituv_fdots
GPUBatch * edit_selection_fdots
struct MeshBatchCache::@262 batch
GPUIndexBuf * edituv_tris
GPUVertBuf * fdots_uv
GPUIndexBuf * edituv_fdots
GPUVertBuf * edituv_data
GPUVertBuf * edge_fac
GPUVertBuf * edge_idx
GPUVertBuf * fdots_pos
GPUIndexBuf * lines_adjacency
GPUIndexBuf * lines_paint_mask
struct MeshBufferCache::@261 ibo
GPUIndexBuf * edituv_lines
GPUVertBuf * poly_idx
GPUVertBuf * edituv_stretch_area
GPUVertBuf * fdot_idx
GPUVertBuf * edituv_stretch_angle
GPUVertBuf * fdots_nor
GPUIndexBuf * tris
GPUIndexBuf ** tris_per_mat
GPUVertBuf * weights
GPUIndexBuf * lines
GPUVertBuf * mesh_analysis
GPUVertBuf * skin_roots
GPUIndexBuf * points
struct MeshBufferCache::@260 vbo
GPUIndexBuf * lines_loose
GPUIndexBuf * fdots
GPUVertBuf * vert_idx
GPUVertBuf * sculpt_data
GPUVertBuf * fdots_edituv_data
GPUVertBuf * edit_data
GPUIndexBuf * edituv_points
GPUVertBuf * pos_nor
void * batch_cache
struct SubdivCCG * subdiv_ccg
struct BMEditMesh * edit_mesh
struct CustomData pdata ldata
Mesh_Runtime runtime
ListBase defbase
unsigned short actdef
float obmat[4][4]
struct SculptSession * sculpt
void * data
struct ToolSettings * toolsettings
struct PBVH * pbvh
Definition: BKE_paint.h:504