Blender V4.5
draw_cache_impl_mesh.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2017 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include <array>
12#include <optional>
13
14#include "MEM_guardedalloc.h"
15
16#include "BLI_index_range.hh"
17#include "BLI_listbase.h"
18#include "BLI_span.hh"
19#include "BLI_string_ref.hh"
20
21#include "DNA_mesh_types.h"
22#include "DNA_object_types.h"
23#include "DNA_scene_types.h"
24#include "DNA_userdef_types.h"
25
26#include "BKE_attribute.hh"
27#include "BKE_customdata.hh"
28#include "BKE_editmesh.hh"
29#include "BKE_material.hh"
30#include "BKE_mesh.hh"
31#include "BKE_object.hh"
32#include "BKE_object_deform.h"
33#include "BKE_paint.hh"
34#include "BKE_paint_bvh.hh"
36
37#include "atomic_ops.h"
38
39#include "GPU_batch.hh"
40#include "GPU_material.hh"
41
42#include "DRW_render.hh"
43
44#include "draw_cache_extract.hh"
45#include "draw_cache_inline.hh"
46#include "draw_subdivision.hh"
47
48#include "draw_cache_impl.hh" /* own include */
50
52
53namespace blender::draw {
54
55/* ---------------------------------------------------------------------- */
58
59#define TRIS_PER_MAT_INDEX BUFFER_LEN
60
61static void mesh_batch_cache_clear(MeshBatchCache &cache);
62
64 const Span<VBOType> vbos,
65 const Span<IBOType> ibos)
66{
67 Set<const void *, 16> buffer_ptrs;
68 buffer_ptrs.reserve(vbos.size() + ibos.size());
69 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
70 for (const VBOType vbo : vbos) {
71 if (const auto *buffer = mbc->buff.vbos.lookup_ptr(vbo)) {
72 buffer_ptrs.add(buffer->get());
73 }
74 }
75 }
76 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
77 for (const IBOType ibo : ibos) {
78 if (const auto *buffer = mbc->buff.ibos.lookup_ptr(ibo)) {
79 buffer_ptrs.add(buffer->get());
80 }
81 }
82 }
83
84 const auto batch_contains_data = [&](gpu::Batch &batch) {
85 if (buffer_ptrs.contains(batch.elem)) {
86 return true;
87 }
88 if (std::any_of(batch.verts, batch.verts + ARRAY_SIZE(batch.verts), [&](gpu::VertBuf *vbo) {
89 return vbo && buffer_ptrs.contains(vbo);
90 }))
91 {
92 return true;
93 }
94 return false;
95 };
96
97 for (const int i : IndexRange(MBC_BATCH_LEN)) {
98 gpu::Batch *batch = ((gpu::Batch **)&cache.batch)[i];
99 if (batch && batch_contains_data(*batch)) {
100 GPU_BATCH_DISCARD_SAFE(((gpu::Batch **)&cache.batch)[i]);
101 cache.batch_ready &= ~DRWBatchFlag(1u << i);
102 }
103 }
104
105 if (!cache.surface_per_mat.is_empty()) {
106 if (cache.surface_per_mat.first() && batch_contains_data(*cache.surface_per_mat.first())) {
107 /* The format for all `surface_per_mat` batches is the same, discard them all. */
108 for (const int i : cache.surface_per_mat.index_range()) {
110 }
112 }
113 }
114
115 for (const VBOType vbo : vbos) {
116 cache.final.buff.vbos.remove(vbo);
117 cache.cage.buff.vbos.remove(vbo);
118 cache.uv_cage.buff.vbos.remove(vbo);
119 }
120 for (const IBOType ibo : ibos) {
121 cache.final.buff.ibos.remove(ibo);
122 cache.cage.buff.ibos.remove(ibo);
123 cache.uv_cage.buff.ibos.remove(ibo);
124 }
125}
126
127/* Return true is all layers in _b_ are inside _a_. */
129{
130 return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
131}
132
134{
135 return *((uint32_t *)&a) == *((uint32_t *)&b);
136}
137
139{
140 uint32_t *a_p = (uint32_t *)a;
141 uint32_t *b_p = (uint32_t *)&b;
143}
144
146{
147 *((uint32_t *)a) = 0;
148}
149
150static void mesh_cd_calc_edit_uv_layer(const Mesh & /*mesh*/, DRW_MeshCDMask *cd_used)
151{
152 cd_used->edit_uv = 1;
153}
154
155static void mesh_cd_calc_active_uv_layer(const Object &object,
156 const Mesh &mesh,
157 DRW_MeshCDMask &cd_used)
158{
159 const Mesh &me_final = editmesh_final_or_this(object, mesh);
160 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
161 int layer = CustomData_get_active_layer(&cd_ldata, CD_PROP_FLOAT2);
162 if (layer != -1) {
163 cd_used.uv |= (1 << layer);
164 }
165}
166
168 const Mesh &mesh,
169 DRW_MeshCDMask &cd_used)
170{
171 const Mesh &me_final = editmesh_final_or_this(object, mesh);
172 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
173 int layer = CustomData_get_stencil_layer(&cd_ldata, CD_PROP_FLOAT2);
174 if (layer != -1) {
175 cd_used.uv |= (1 << layer);
176 }
177}
178
180 const Mesh &mesh,
181 const Span<const GPUMaterial *> materials,
182 VectorSet<std::string> *attributes)
183{
184 const Mesh &me_final = editmesh_final_or_this(object, mesh);
185 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
186 const CustomData &cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
187 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
188 const CustomData &cd_edata = mesh_cd_edata_get_from_mesh(me_final);
189
190 /* See: DM_vertex_attributes_from_gpu for similar logic */
191 DRW_MeshCDMask cd_used;
193
194 const StringRefNull default_color_name = me_final.default_color_attribute ?
195 me_final.default_color_attribute :
196 "";
197
198 for (const GPUMaterial *gpumat : materials) {
199 if (gpumat == nullptr) {
200 continue;
201 }
202 ListBase gpu_attrs = GPU_material_attributes(gpumat);
203 LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
204 StringRef name = gpu_attr->name;
205 eCustomDataType type = eCustomDataType(gpu_attr->type);
206 int layer = -1;
207 std::optional<bke::AttrDomain> domain;
208
209 if (gpu_attr->is_default_color) {
210 name = default_color_name.c_str();
211 }
212
213 if (type == CD_AUTO_FROM_NAME) {
214 /* We need to deduce what exact layer is used.
215 *
216 * We do it based on the specified name.
217 */
218 if (!name.is_empty()) {
219 layer = CustomData_get_named_layer(&cd_ldata, CD_PROP_FLOAT2, name);
220 type = CD_MTFACE;
221
222 if (layer == -1) {
223 /* Try to match a generic attribute, we use the first attribute domain with a
224 * matching name. */
225 if (drw_custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
226 domain = bke::AttrDomain::Point;
227 }
228 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
230 }
231 else if (drw_custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
232 domain = bke::AttrDomain::Face;
233 }
234 else if (drw_custom_data_match_attribute(cd_edata, name, &layer, &type)) {
235 domain = bke::AttrDomain::Edge;
236 }
237 else {
238 layer = -1;
239 }
240 }
241
242 if (layer == -1) {
243 continue;
244 }
245 }
246 else {
247 /* Fall back to the UV layer, which matches old behavior. */
248 type = CD_MTFACE;
249 }
250 }
251
252 switch (type) {
253 case CD_MTFACE: {
254 if (layer == -1) {
255 layer = !name.is_empty() ?
258 }
259 if (layer != -1 && !CustomData_layer_is_anonymous(&cd_ldata, CD_PROP_FLOAT2, layer)) {
260 cd_used.uv |= (1 << layer);
261 }
262 break;
263 }
264 case CD_TANGENT: {
265 if (layer == -1) {
266 layer = !name.is_empty() ?
269
270 /* Only fall back to orco (below) when we have no UV layers, see: #56545 */
271 if (layer == -1 && !name.is_empty()) {
273 }
274 }
275 if (layer != -1) {
276 cd_used.tan |= (1 << layer);
277 }
278 else {
279 /* no UV layers at all => requesting orco */
280 cd_used.tan_orco = 1;
281 cd_used.orco = 1;
282 }
283 break;
284 }
285
286 case CD_ORCO: {
287 cd_used.orco = 1;
288 break;
289 }
291 case CD_PROP_COLOR:
293 case CD_PROP_FLOAT3:
294 case CD_PROP_BOOL:
295 case CD_PROP_INT8:
296 case CD_PROP_INT32:
297 case CD_PROP_INT16_2D:
298 case CD_PROP_INT32_2D:
299 case CD_PROP_FLOAT:
300 case CD_PROP_FLOAT2: {
301 if (layer != -1 && domain.has_value()) {
302 drw_attributes_add_request(attributes, name);
303 }
304 break;
305 }
306 default:
307 break;
308 }
309 }
310 }
311 return cd_used;
312}
313
315
316/* ---------------------------------------------------------------------- */
319
322{
326
327 memset(wstate, 0, sizeof(*wstate));
328
329 wstate->defgroup_active = -1;
330}
331
334 const DRW_MeshWeightState *wstate_src)
335{
336 MEM_SAFE_FREE(wstate_dst->defgroup_sel);
337 MEM_SAFE_FREE(wstate_dst->defgroup_locked);
338 MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
339
340 memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
341
342 if (wstate_src->defgroup_sel) {
343 wstate_dst->defgroup_sel = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_sel));
344 }
345 if (wstate_src->defgroup_locked) {
346 wstate_dst->defgroup_locked = static_cast<bool *>(MEM_dupallocN(wstate_src->defgroup_locked));
347 }
348 if (wstate_src->defgroup_unlocked) {
349 wstate_dst->defgroup_unlocked = static_cast<bool *>(
350 MEM_dupallocN(wstate_src->defgroup_unlocked));
351 }
352}
353
354static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
355{
356 return ((!array1 && !array2) ||
357 (array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
358}
359
362 const DRW_MeshWeightState *b)
363{
364 return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
365 a->flags == b->flags && a->alert_mode == b->alert_mode &&
366 a->defgroup_sel_count == b->defgroup_sel_count &&
367 drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
368 drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
369 drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
370}
371
373 Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
374{
375 /* Extract complete vertex weight group selection state and mode flags. */
376 memset(wstate, 0, sizeof(*wstate));
377
380
381 wstate->alert_mode = ts.weightuser;
382
383 if (paint_mode && ts.multipaint) {
384 /* Multi-paint needs to know all selected bones, not just the active group.
385 * This is actually a relatively expensive operation, but caching would be difficult. */
387 &ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
388
389 if (wstate->defgroup_sel_count > 1) {
392
395 wstate->defgroup_len,
396 wstate->defgroup_sel,
397 wstate->defgroup_sel,
398 &wstate->defgroup_sel_count);
399 }
400 }
401 /* With only one selected bone Multi-paint reverts to regular mode. */
402 else {
403 wstate->defgroup_sel_count = 0;
405 }
406 }
407
408 if (paint_mode && ts.wpaint_lock_relative) {
409 /* Set of locked vertex groups for the lock relative mode. */
412
413 /* Check that a deform group is active, and none of selected groups are locked. */
415 wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
417 wstate->defgroup_locked,
418 wstate->defgroup_sel,
419 wstate->defgroup_sel_count))
420 {
422
423 /* Compute the set of locked and unlocked deform vertex groups. */
425 wstate->defgroup_locked,
426 wstate->defgroup_unlocked,
427 wstate->defgroup_locked, /* out */
428 wstate->defgroup_unlocked);
429 }
430 else {
433 }
434 }
435}
436
438
439/* ---------------------------------------------------------------------- */
442
444{
445 atomic_fetch_and_or_uint32((uint32_t *)(&cache.batch_requested), *(uint32_t *)&new_flag);
446}
447
448/* gpu::Batch cache management. */
449
450static bool mesh_batch_cache_valid(Mesh &mesh)
451{
452 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
453
454 if (cache == nullptr) {
455 return false;
456 }
457
458 /* NOTE: bke::pbvh::Tree draw data should not be checked here. */
459
460 if (cache->is_editmode != (mesh.runtime->edit_mesh != nullptr)) {
461 return false;
462 }
463
464 if (cache->is_dirty) {
465 return false;
466 }
467
469 return false;
470 }
471
472 return true;
473}
474
475static void mesh_batch_cache_init(Mesh &mesh)
476{
477 if (!mesh.runtime->batch_cache) {
478 mesh.runtime->batch_cache = MEM_new<MeshBatchCache>(__func__);
479 }
480 else {
481 *static_cast<MeshBatchCache *>(mesh.runtime->batch_cache) = {};
482 }
483 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
484
485 cache->is_editmode = mesh.runtime->edit_mesh != nullptr;
486
487 if (cache->is_editmode == false) {
488 // cache->edge_len = mesh_render_edges_len_get(mesh);
489 // cache->tri_len = mesh_render_corner_tris_len_get(mesh);
490 // cache->face_len = mesh_render_faces_len_get(mesh);
491 // cache->vert_len = mesh_render_verts_len_get(mesh);
492 }
493
495 cache->surface_per_mat = Array<gpu::Batch *>(cache->mat_len, nullptr);
496 cache->tris_per_mat.reinitialize(cache->mat_len);
497
498 cache->is_dirty = false;
499 cache->batch_ready = (DRWBatchFlag)0;
500 cache->batch_requested = (DRWBatchFlag)0;
501
503}
504
506{
507 if (!mesh_batch_cache_valid(mesh)) {
508 if (mesh.runtime->batch_cache) {
509 mesh_batch_cache_clear(*static_cast<MeshBatchCache *>(mesh.runtime->batch_cache));
510 }
512 }
513}
514
516{
517 return static_cast<MeshBatchCache *>(mesh.runtime->batch_cache);
518}
519
521 const DRW_MeshWeightState *wstate)
522{
523 if (!drw_mesh_weight_state_compare(&cache.weight_state, wstate)) {
524 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
525 mbc->buff.vbos.remove(VBOType::VertexGroupWeight);
526 }
528
530
532 }
533}
534
536{
539
540 /* If there are only a few materials at most, just request batches for everything. However, if
541 * the maximum material index is large, detect the actually used material indices first and only
542 * request those. This reduces the overhead of dealing with all these batches down the line. */
543 if (cache.mat_len < 16) {
544 for (int i = 0; i < cache.mat_len; i++) {
546 }
547 }
548 else {
549 const VectorSet<int> &used_material_indices = mesh.material_indices_used();
550 for (const int i : used_material_indices) {
552 }
553 }
554}
555
560
562{
564 cache,
572
573 cache.tot_area = 0.0f;
574 cache.tot_uv_area = 0.0f;
575
576 /* We discarded the vbo.uv so we need to reset the cd_used flag. */
577 cache.cd_used.uv = 0;
578 cache.cd_used.edit_uv = 0;
579}
580
588
590{
591 if (!mesh->runtime->batch_cache) {
592 return;
593 }
594 MeshBatchCache &cache = *static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
595 switch (mode) {
598
599 /* Because visible UVs depends on edit mode selection, discard topology. */
601 break;
603 /* Paint mode selection flag is packed inside the nor attribute.
604 * Note that it can be slow if auto smooth is enabled. (see #63946) */
606 break;
608 cache.is_dirty = true;
609 break;
613 break;
616 break;
619 break;
620 default:
621 BLI_assert(0);
622 }
623}
624
626{
627 mbc->buff.ibos.clear();
628 mbc->buff.vbos.clear();
629
630 mbc->loose_geom = {};
631 mbc->face_sorted = {};
632}
633
635{
636 if (cache.subdiv_cache) {
638 MEM_delete(cache.subdiv_cache);
639 cache.subdiv_cache = nullptr;
640 }
641}
642
644{
645 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
647 }
648
649 cache.tris_per_mat = {};
650
651 for (int i = 0; i < sizeof(cache.batch) / sizeof(void *); i++) {
652 gpu::Batch **batch = (gpu::Batch **)&cache.batch;
654 }
655 for (const int i : cache.surface_per_mat.index_range()) {
657 }
658
661 cache.surface_per_mat = {};
662 cache.mat_len = 0;
663
664 cache.batch_ready = (DRWBatchFlag)0;
666
668}
669
670void DRW_mesh_batch_cache_free(void *batch_cache)
671{
672 MeshBatchCache *cache = static_cast<MeshBatchCache *>(batch_cache);
674 MEM_delete(cache);
675}
676
678
679/* ---------------------------------------------------------------------- */
682
683static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
684{
685 DRW_MeshCDMask cd_needed;
686 mesh_cd_layers_type_clear(&cd_needed);
687 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
688
689 BLI_assert(cd_needed.uv != 0 &&
690 "No uv layer available in texpaint, but batches requested anyway!");
691
692 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
693 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
694}
695
697 const Mesh &mesh,
698 VectorSet<std::string> &attributes)
699{
700 const Mesh &me_final = editmesh_final_or_this(object, mesh);
701 const CustomData &cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
702 const CustomData &cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
703
704 auto request_color_attribute = [&](const StringRef name) {
705 if (!name.is_empty()) {
706 int layer_index;
707 eCustomDataType type;
708 if (drw_custom_data_match_attribute(cd_vdata, name, &layer_index, &type)) {
709 drw_attributes_add_request(&attributes, name);
710 }
711 else if (drw_custom_data_match_attribute(cd_ldata, name, &layer_index, &type)) {
712 drw_attributes_add_request(&attributes, name);
713 }
714 }
715 };
716
717 request_color_attribute(me_final.active_color_attribute);
718 request_color_attribute(me_final.default_color_attribute);
719}
720
727
734
736{
737 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
739
740 return cache.batch.surface;
741}
742
744{
745 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
746 if (cache.no_loose_wire) {
747 return nullptr;
748 }
750 return DRW_batch_request(&cache.batch.loose_edges);
751}
752
759
760gpu::Batch *DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
761{
762 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
764 /* Even if is_manifold is not correct (not updated),
765 * the default (not manifold) is just the worst case. */
766 if (r_is_manifold) {
767 *r_is_manifold = cache.is_manifold;
768 }
770}
771
778
785
787 const Mesh &mesh,
788 const Span<const GPUMaterial *> materials,
789 VectorSet<std::string> *r_attrs,
790 DRW_MeshCDMask *r_cd_needed)
791{
792 VectorSet<std::string> attrs_needed;
793 DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(object, mesh, materials, &attrs_needed);
794
795 if (r_attrs) {
796 *r_attrs = attrs_needed;
797 }
798
799 if (r_cd_needed) {
800 *r_cd_needed = cd_needed;
801 }
802}
803
805 Object &object, Mesh &mesh, const Span<const GPUMaterial *> materials)
806{
807 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
808 VectorSet<std::string> attrs_needed;
809 DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(object, mesh, materials, &attrs_needed);
810
811 BLI_assert(materials.size() == cache.mat_len);
812
813 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
814 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
816 return cache.surface_per_mat;
817}
818
826
828{
829 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
830 texpaint_request_active_uv(cache, object, mesh);
832 return cache.batch.surface;
833}
834
836{
837 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
838
839 VectorSet<std::string> attrs_needed{};
840 request_active_and_default_color_attributes(object, mesh, attrs_needed);
841
842 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
843
845 return cache.batch.surface;
846}
847
849{
850 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
851
852 VectorSet<std::string> attrs_needed{};
853 request_active_and_default_color_attributes(object, mesh, attrs_needed);
854
855 drw_attributes_merge(&cache.attr_needed, &attrs_needed, mesh.runtime->render_mutex);
856
858 return cache.batch.surface;
859}
860
871
881
883
884/* ---------------------------------------------------------------------- */
887
894
901
908
915
922
929
936
938
939/* ---------------------------------------------------------------------- */
942
949
956
963
970
972
973/* ---------------------------------------------------------------------- */
976
977static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
978{
979 DRW_MeshCDMask cd_needed;
980 mesh_cd_layers_type_clear(&cd_needed);
981 mesh_cd_calc_active_uv_layer(object, mesh, cd_needed);
982 mesh_cd_calc_edit_uv_layer(mesh, &cd_needed);
983
984 BLI_assert(cd_needed.edit_uv != 0 &&
985 "No uv layer available in edituv, but batches requested anyway!");
986
987 mesh_cd_calc_active_mask_uv_layer(object, mesh, cd_needed);
988 mesh_cd_layers_type_merge(&cache.cd_needed, cd_needed);
989}
990
992 Mesh &mesh,
993 float **tot_area,
994 float **tot_uv_area)
995{
996 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
997 edituv_request_active_uv(cache, object, mesh);
999
1000 if (tot_area != nullptr) {
1001 *tot_area = &cache.tot_area;
1002 }
1003 if (tot_uv_area != nullptr) {
1004 *tot_uv_area = &cache.tot_uv_area;
1005 }
1007}
1008
1016
1018{
1019 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1020 edituv_request_active_uv(cache, object, mesh);
1022 return DRW_batch_request(&cache.batch.edituv_faces);
1023}
1024
1026{
1027 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1028 edituv_request_active_uv(cache, object, mesh);
1030 return DRW_batch_request(&cache.batch.edituv_edges);
1031}
1032
1034{
1035 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1036 edituv_request_active_uv(cache, object, mesh);
1038 return DRW_batch_request(&cache.batch.edituv_verts);
1039}
1040
1042{
1043 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1044 edituv_request_active_uv(cache, object, mesh);
1046 return DRW_batch_request(&cache.batch.edituv_fdots);
1047}
1048
1050{
1051 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1052 edituv_request_active_uv(cache, object, mesh);
1054 return DRW_batch_request(&cache.batch.uv_faces);
1055}
1056
1058{
1059 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1060 edituv_request_active_uv(cache, object, mesh);
1062 return DRW_batch_request(&cache.batch.wire_loops_uvs);
1063}
1064
1072
1079
1081
1082/* ---------------------------------------------------------------------- */
1085
1087{
1088 MeshBatchCache *cache = static_cast<MeshBatchCache *>(mesh->runtime->batch_cache);
1089
1090 if (cache == nullptr) {
1091 return;
1092 }
1093
1095 cache->lastmatch = ctime;
1096 }
1097
1099 cache->lastmatch = ctime;
1100 }
1101
1102 if (ctime - cache->lastmatch > U.vbotimeout) {
1104 }
1105
1107 cache->attr_used_over_time.clear();
1108}
1109
1110static void init_empty_dummy_batch(gpu::Batch &batch)
1111{
1112 /* The dummy batch is only used in cases with invalid edit mode mapping, so the overhead of
1113 * creating a vertex buffer shouldn't matter. */
1117 GPU_vertbuf_data_alloc(*vbo, 1);
1118 /* Avoid the batch being rendered at all. */
1119 GPU_vertbuf_data_len_set(*vbo, 0);
1120
1121 GPU_batch_vertbuf_add(&batch, vbo, true);
1122}
1123
1125 Object &ob,
1126 Mesh &mesh,
1127 const Scene &scene,
1128 const bool is_paint_mode,
1129 const bool use_hide)
1130{
1131 const ToolSettings *ts = scene.toolsettings;
1132
1133 MeshBatchCache &cache = *mesh_batch_cache_get(mesh);
1134 bool cd_uv_update = false;
1135
1136 /* Early out */
1137 if (cache.batch_requested == 0) {
1138 return;
1139 }
1140
1141 /* Sanity check. */
1142 if ((mesh.runtime->edit_mesh != nullptr) && (ob.mode & OB_MODE_EDIT)) {
1144 }
1145
1146 const bool is_editmode = ob.mode == OB_MODE_EDIT;
1147
1148 DRWBatchFlag batch_requested = cache.batch_requested;
1149 cache.batch_requested = (DRWBatchFlag)0;
1150
1151 if (batch_requested & MBC_SURFACE_WEIGHTS) {
1152 /* Check vertex weights. */
1153 if ((cache.batch.surface_weights != nullptr) && (ts != nullptr)) {
1154 DRW_MeshWeightState wstate;
1155 BLI_assert(ob.type == OB_MESH);
1156 drw_mesh_weight_state_extract(ob, mesh, *ts, is_paint_mode, &wstate);
1160 }
1161 }
1162
1163 if (batch_requested &
1167 {
1168 /* Modifiers will only generate an orco layer if the mesh is deformed. */
1169 if (cache.cd_needed.orco != 0) {
1170 /* Orco is always extracted from final mesh. */
1171 const Mesh *me_final = (mesh.runtime->edit_mesh) ? BKE_object_get_editmesh_eval_final(&ob) :
1172 &mesh;
1173 if (CustomData_get_layer(&me_final->vert_data, CD_ORCO) == nullptr) {
1174 /* Skip orco calculation */
1175 cache.cd_needed.orco = 0;
1176 }
1177 }
1178
1179 /* Verify that all surface batches have needed attribute layers.
1180 */
1181 /* TODO(fclem): We could be a bit smarter here and only do it per
1182 * material. */
1183 bool cd_overlap = mesh_cd_layers_type_overlap(cache.cd_used, cache.cd_needed);
1184 bool attr_overlap = drw_attributes_overlap(&cache.attr_used, &cache.attr_needed);
1185 if (cd_overlap == false || attr_overlap == false) {
1186 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1187 if ((cache.cd_used.uv & cache.cd_needed.uv) != cache.cd_needed.uv) {
1188 mbc->buff.vbos.remove(VBOType::UVs);
1189 cd_uv_update = true;
1190 }
1191 if ((cache.cd_used.tan & cache.cd_needed.tan) != cache.cd_needed.tan ||
1192 cache.cd_used.tan_orco != cache.cd_needed.tan_orco)
1193 {
1194 mbc->buff.vbos.remove(VBOType::Tangents);
1195 }
1196 if (cache.cd_used.orco != cache.cd_needed.orco) {
1197 mbc->buff.vbos.remove(VBOType::Orco);
1198 }
1199 if (cache.cd_used.sculpt_overlays != cache.cd_needed.sculpt_overlays) {
1200 mbc->buff.vbos.remove(VBOType::SculptData);
1201 }
1202 if (!drw_attributes_overlap(&cache.attr_used, &cache.attr_needed)) {
1203 for (int i = 0; i < GPU_MAX_ATTR; i++) {
1204 mbc->buff.vbos.remove(VBOType(int8_t(VBOType::Attr0) + i));
1205 }
1206 }
1207 }
1208 /* We can't discard batches at this point as they have been
1209 * referenced for drawing. Just clear them in place. */
1210 for (int i = 0; i < cache.mat_len; i++) {
1212 }
1216
1218 drw_attributes_merge(&cache.attr_used, &cache.attr_needed, mesh.runtime->render_mutex);
1219 }
1222
1224 &cache.attr_used_over_time, &cache.attr_needed, mesh.runtime->render_mutex);
1225 cache.attr_needed.clear();
1226 }
1227
1228 if ((batch_requested & MBC_EDITUV) || cd_uv_update) {
1229 /* Discard UV batches if sync_selection changes */
1230 const bool is_uvsyncsel = ts && (ts->uv_flag & UV_SYNC_SELECTION);
1231 if (cd_uv_update || (cache.is_uvsyncsel != is_uvsyncsel)) {
1232 cache.is_uvsyncsel = is_uvsyncsel;
1233 FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
1234 mbc->buff.vbos.remove(VBOType::EditUVData);
1235 mbc->buff.vbos.remove(VBOType::FaceDotUV);
1236 mbc->buff.vbos.remove(VBOType::FaceDotEditUVData);
1237 mbc->buff.ibos.remove(IBOType::EditUVTris);
1238 mbc->buff.ibos.remove(IBOType::EditUVLines);
1239 mbc->buff.ibos.remove(IBOType::EditUVPoints);
1240 mbc->buff.ibos.remove(IBOType::EditUVFaceDots);
1241 }
1242 /* We only clear the batches as they may already have been
1243 * referenced. */
1253 cache.batch_ready &= ~MBC_EDITUV;
1254 }
1255 }
1256
1257 /* Second chance to early out */
1258 if ((batch_requested & ~cache.batch_ready) == 0) {
1259 return;
1260 }
1261
1262 /* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-pbvh::Tree).
1263 * This makes tools that sample the surface per step get wrong normals until a redraw happens.
1264 * Normal updates should be part of the brush loop and only run during the stroke when the
1265 * brush needs to sample the surface. The drawing code should only update the normals
1266 * per redraw when smooth shading is enabled. */
1269 }
1270
1271 /* This is the mesh before modifier evaluation, used to test how the mesh changed during
1272 * evaluation to decide which data is valid to extract. */
1273 const Mesh *orig_edit_mesh = is_editmode ? BKE_object_get_pre_modified_mesh(&ob) : nullptr;
1274
1275 bool do_cage = false;
1276 const Mesh *edit_data_mesh = nullptr;
1277 if (is_editmode) {
1278 const Mesh *eval_cage = DRW_object_get_editmesh_cage_for_drawing(ob);
1279 if (eval_cage && eval_cage != &mesh) {
1280 /* Extract "cage" data separately when it exists and it's not just the same mesh as the
1281 * regular evaluated mesh. Otherwise edit data will be extracted from the final evaluated
1282 * mesh. */
1283 do_cage = true;
1284 edit_data_mesh = eval_cage;
1285 }
1286 else {
1287 edit_data_mesh = &mesh;
1288 }
1289 }
1290
1291 bool do_uvcage = false;
1292 if (is_editmode) {
1293 /* Currently we don't extract UV data from the evaluated mesh unless it's the same mesh as the
1294 * original edit mesh. */
1295 do_uvcage = !(mesh.runtime->is_original_bmesh &&
1296 mesh.runtime->wrapper_type == ME_WRAPPER_TYPE_BMESH);
1297 }
1298
1299 const DRWBatchFlag batches_to_create = batch_requested & ~cache.batch_ready;
1300
1301 const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(&mesh);
1302
1303 enum class BufferList { Final, Cage, UVCage };
1304
1305 struct BatchCreateData {
1306 gpu::Batch &batch;
1307 GPUPrimType prim_type;
1308 BufferList list;
1309 std::optional<IBOType> ibo;
1310 Vector<VBOType> vbos;
1311 };
1312 Vector<BatchCreateData> batch_info;
1313
1314 {
1315 const BufferList list = BufferList::Final;
1316 if (batches_to_create & MBC_SURFACE) {
1317 BatchCreateData batch{*cache.batch.surface,
1319 list,
1322 if (cache.cd_used.uv != 0) {
1323 batch.vbos.append(VBOType::UVs);
1324 }
1325 for (const int i : cache.attr_used.index_range()) {
1326 batch.vbos.append(VBOType(int8_t(VBOType::Attr0) + i));
1327 }
1328 batch_info.append(std::move(batch));
1329 }
1330 if (batches_to_create & MBC_VIEWER_ATTRIBUTE_OVERLAY) {
1331 batch_info.append({*cache.batch.surface_viewer_attribute,
1333 list,
1336 }
1337 if (batches_to_create & MBC_ALL_VERTS) {
1338 batch_info.append({*cache.batch.all_verts,
1340 list,
1341 std::nullopt,
1343 }
1344 if (batches_to_create & MBC_SCULPT_OVERLAYS) {
1345 batch_info.append({*cache.batch.sculpt_overlays,
1347 list,
1350 }
1351 if (batches_to_create & MBC_ALL_EDGES) {
1352 batch_info.append(
1354 }
1355 if (batches_to_create & MBC_LOOSE_EDGES) {
1356 batch_info.append({*cache.batch.loose_edges,
1358 list,
1361 }
1362 if (batches_to_create & MBC_EDGE_DETECTION) {
1363 batch_info.append({*cache.batch.edge_detection,
1365 list,
1368 }
1369 if (batches_to_create & MBC_SURFACE_WEIGHTS) {
1370 batch_info.append({*cache.batch.surface_weights,
1372 list,
1375 }
1376 if (batches_to_create & MBC_WIRE_LOOPS) {
1377 batch_info.append({*cache.batch.wire_loops,
1379 list,
1382 }
1383 if (batches_to_create & MBC_WIRE_EDGES) {
1384 batch_info.append({*cache.batch.wire_edges,
1386 list,
1389 }
1390 if (batches_to_create & MBC_WIRE_LOOPS_UVS) {
1391 BatchCreateData batch{
1393 if (cache.cd_used.uv != 0) {
1394 batch.vbos.append(VBOType::UVs);
1395 }
1396 batch_info.append(std::move(batch));
1397 }
1398 if (batches_to_create & MBC_WIRE_LOOPS_EDITUVS) {
1399 BatchCreateData batch{
1401 if (cache.cd_used.uv != 0) {
1402 batch.vbos.append(VBOType::UVs);
1403 }
1404 batch_info.append(std::move(batch));
1405 }
1406 if (batches_to_create & MBC_UV_FACES) {
1407 BatchCreateData batch{*cache.batch.uv_faces, GPU_PRIM_TRIS, list, IBOType::Tris, {}};
1408 if (cache.cd_used.uv != 0) {
1409 batch.vbos.append(VBOType::UVs);
1410 }
1411 batch_info.append(std::move(batch));
1412 }
1413 if (batches_to_create & MBC_EDIT_MESH_ANALYSIS) {
1414 batch_info.append({*cache.batch.edit_mesh_analysis,
1416 list,
1419 }
1420 }
1421
1422 /* When the mesh doesn't correspond to the object's original mesh (i.e. the mesh was replaced by
1423 * another with the object info node during evaluation), don't extract edit mode data for it.
1424 * That data can be invalid because any original indices (#CD_ORIGINDEX) on the evaluated mesh
1425 * won't correspond to the correct mesh. */
1426 const bool edit_mapping_valid = is_editmode && BKE_editmesh_eval_orig_map_available(
1427 *edit_data_mesh, orig_edit_mesh);
1428
1429 {
1430 const BufferList list = do_cage ? BufferList::Cage : BufferList::Final;
1431 if (batches_to_create & MBC_EDIT_TRIANGLES) {
1432 if (edit_mapping_valid) {
1433 batch_info.append({*cache.batch.edit_triangles,
1435 list,
1438 }
1439 else {
1441 }
1442 }
1443 if (batches_to_create & MBC_EDIT_VERTICES) {
1444 if (edit_mapping_valid) {
1445 BatchCreateData batch{*cache.batch.edit_vertices,
1447 list,
1450 if (!do_subdivision || do_cage) {
1451 batch.vbos.append(VBOType::CornerNormal);
1452 }
1453 batch_info.append(std::move(batch));
1454 }
1455 else {
1457 }
1458 }
1459 if (batches_to_create & MBC_EDIT_EDGES) {
1460 if (edit_mapping_valid) {
1461 BatchCreateData batch{*cache.batch.edit_edges,
1463 list,
1466 if (!do_subdivision || do_cage) {
1467 batch.vbos.append(VBOType::VertexNormal);
1468 }
1469 batch_info.append(std::move(batch));
1470 }
1471 else {
1473 }
1474 }
1475 if (batches_to_create & MBC_EDIT_VNOR) {
1476 if (edit_mapping_valid) {
1477 batch_info.append({*cache.batch.edit_vnor,
1479 list,
1482 }
1483 else {
1485 }
1486 }
1487 if (batches_to_create & MBC_EDIT_LNOR) {
1488 if (edit_mapping_valid) {
1489 batch_info.append({*cache.batch.edit_lnor,
1491 list,
1494 }
1495 else {
1497 }
1498 }
1499 if (batches_to_create & MBC_EDIT_FACEDOTS) {
1500 if (edit_mapping_valid) {
1501 batch_info.append({*cache.batch.edit_fdots,
1503 list,
1506 }
1507 else {
1509 }
1510 }
1511 if (batches_to_create & MBC_SKIN_ROOTS) {
1512 if (edit_mapping_valid) {
1513 batch_info.append({*cache.batch.edit_skin_roots,
1515 list,
1516 std::nullopt,
1518 }
1519 else {
1521 }
1522 }
1523 if (batches_to_create & MBC_EDIT_SELECTION_VERTS) {
1524 if (is_editmode && !edit_mapping_valid) {
1526 }
1527 else {
1528 batch_info.append({*cache.batch.edit_selection_verts,
1530 list,
1533 }
1534 }
1535 if (batches_to_create & MBC_EDIT_SELECTION_EDGES) {
1536 if (is_editmode && !edit_mapping_valid) {
1538 }
1539 else {
1540 batch_info.append({*cache.batch.edit_selection_edges,
1542 list,
1545 }
1546 }
1547 if (batches_to_create & MBC_EDIT_SELECTION_FACES) {
1548 if (is_editmode && !edit_mapping_valid) {
1550 }
1551 else {
1552 batch_info.append({*cache.batch.edit_selection_faces,
1554 list,
1557 }
1558 }
1559 if (batches_to_create & MBC_EDIT_SELECTION_FACEDOTS) {
1560 if (is_editmode && !edit_mapping_valid) {
1562 }
1563 else {
1564 batch_info.append({*cache.batch.edit_selection_fdots,
1566 list,
1569 }
1570 }
1571 }
1572
1573 {
1579 const BufferList list = do_uvcage ? BufferList::UVCage : BufferList::Final;
1580
1581 if (batches_to_create & MBC_EDITUV_FACES) {
1582 if (edit_mapping_valid) {
1583 batch_info.append({*cache.batch.edituv_faces,
1585 list,
1588 }
1589 else {
1591 }
1592 }
1593 if (batches_to_create & MBC_EDITUV_FACES_STRETCH_AREA) {
1594 if (edit_mapping_valid) {
1595 batch_info.append({*cache.batch.edituv_faces_stretch_area,
1597 list,
1600 }
1601 else {
1603 }
1604 }
1605 if (batches_to_create & MBC_EDITUV_FACES_STRETCH_ANGLE) {
1606 if (edit_mapping_valid) {
1607 batch_info.append({*cache.batch.edituv_faces_stretch_angle,
1609 list,
1612 }
1613 else {
1615 }
1616 }
1617 if (batches_to_create & MBC_EDITUV_EDGES) {
1618 if (edit_mapping_valid) {
1619 batch_info.append({*cache.batch.edituv_edges,
1621 list,
1624 }
1625 else {
1627 }
1628 }
1629 if (batches_to_create & MBC_EDITUV_VERTS) {
1630 if (edit_mapping_valid) {
1631 batch_info.append({*cache.batch.edituv_verts,
1633 list,
1636 }
1637 else {
1639 }
1640 }
1641 if (batches_to_create & MBC_EDITUV_FACEDOTS) {
1642 if (edit_mapping_valid) {
1643 batch_info.append({*cache.batch.edituv_fdots,
1645 list,
1648 }
1649 else {
1651 }
1652 }
1653 }
1654
1655 std::array<VectorSet<IBOType>, 3> ibo_requests;
1656 std::array<VectorSet<VBOType>, 3> vbo_requests;
1657 for (const BatchCreateData &batch : batch_info) {
1658 if (batch.ibo) {
1659 ibo_requests[int(batch.list)].add(*batch.ibo);
1660 }
1661 vbo_requests[int(batch.list)].add_multiple(batch.vbos);
1662 }
1663
1664 if (batches_to_create & MBC_SURFACE_PER_MAT) {
1665 ibo_requests[int(BufferList::Final)].add(IBOType::Tris);
1666 vbo_requests[int(BufferList::Final)].add(VBOType::CornerNormal);
1667 vbo_requests[int(BufferList::Final)].add(VBOType::Position);
1668 for (const int i : cache.attr_used.index_range()) {
1669 vbo_requests[int(BufferList::Final)].add(VBOType(int8_t(VBOType::Attr0) + i));
1670 }
1671 if (cache.cd_used.uv != 0) {
1672 vbo_requests[int(BufferList::Final)].add(VBOType::UVs);
1673 }
1674 if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
1675 vbo_requests[int(BufferList::Final)].add(VBOType::Tangents);
1676 }
1677 if (cache.cd_used.orco != 0) {
1678 vbo_requests[int(BufferList::Final)].add(VBOType::Orco);
1679 }
1680 }
1681
1682 if (do_uvcage) {
1684 scene,
1685 cache,
1686 cache.uv_cage,
1687 ibo_requests[int(BufferList::UVCage)],
1688 vbo_requests[int(BufferList::UVCage)],
1689 ob,
1690 mesh,
1691 is_editmode,
1692 is_paint_mode,
1693 false,
1694 true,
1695 true);
1696 }
1697
1698 if (do_cage) {
1700 scene,
1701 cache,
1702 cache.cage,
1703 ibo_requests[int(BufferList::Cage)],
1704 vbo_requests[int(BufferList::Cage)],
1705 ob,
1706 mesh,
1707 is_editmode,
1708 is_paint_mode,
1709 false,
1710 false,
1711 true);
1712 }
1713
1714 if (do_subdivision) {
1716 mesh,
1717 cache,
1718 cache.final,
1719 ibo_requests[int(BufferList::Final)],
1720 vbo_requests[int(BufferList::Final)],
1721 is_editmode,
1722 is_paint_mode,
1723 true,
1724 false,
1725 do_cage,
1726 ts,
1727 use_hide);
1728 }
1729 else {
1730 /* The subsurf modifier may have been recently removed, or another modifier was added after it,
1731 * so free any potential subdivision cache as it is not needed anymore. */
1733 }
1734
1736 scene,
1737 cache,
1738 cache.final,
1739 ibo_requests[int(BufferList::Final)],
1740 vbo_requests[int(BufferList::Final)],
1741 ob,
1742 mesh,
1743 is_editmode,
1744 is_paint_mode,
1745 true,
1746 false,
1747 use_hide);
1748
1749 std::array<MeshBufferCache *, 3> caches{&cache.final, &cache.cage, &cache.uv_cage};
1750 for (const BatchCreateData &batch : batch_info) {
1751 MeshBufferCache &cache_for_batch = *caches[int(batch.list)];
1752 gpu::IndexBuf *ibo = batch.ibo ? caches[int(batch.list)]->buff.ibos.lookup(*batch.ibo).get() :
1753 nullptr;
1754 GPU_batch_init(&batch.batch, batch.prim_type, nullptr, ibo);
1755 for (const VBOType vbo_request : batch.vbos) {
1757 &batch.batch, cache_for_batch.buff.vbos.lookup(vbo_request).get(), false);
1758 }
1759 }
1760
1761 if (batches_to_create & MBC_SURFACE_PER_MAT) {
1763 gpu::IndexBuf &tris_ibo = *buffers.ibos.lookup(IBOType::Tris);
1765 for (const int material : IndexRange(cache.mat_len)) {
1766 gpu::Batch *batch = cache.surface_per_mat[material];
1767 if (!batch) {
1768 continue;
1769 }
1770 GPU_batch_init(batch, GPU_PRIM_TRIS, nullptr, cache.tris_per_mat[material].get());
1771 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::CornerNormal).get(), false);
1772 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Position).get(), false);
1773 if (cache.cd_used.uv != 0) {
1774 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::UVs).get(), false);
1775 }
1776 if ((cache.cd_used.tan != 0) || (cache.cd_used.tan_orco != 0)) {
1777 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Tangents).get(), false);
1778 }
1779 if (cache.cd_used.orco != 0) {
1780 GPU_batch_vertbuf_add(batch, buffers.vbos.lookup(VBOType::Orco).get(), false);
1781 }
1782 for (const int i : cache.attr_used.index_range()) {
1784 batch, buffers.vbos.lookup(VBOType(int8_t(VBOType::Attr0) + i)).get(), false);
1785 }
1786 }
1787 }
1788
1789 cache.batch_ready |= batch_requested;
1790}
1791
1793
1794} // namespace blender::draw
CustomData interface, see also DNA_customdata_types.h.
int CustomData_get_named_layer(const CustomData *data, eCustomDataType type, blender::StringRef name)
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
bool CustomData_layer_is_anonymous(const CustomData *data, eCustomDataType type, int n)
int CustomData_get_stencil_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_active_layer(const CustomData *data, eCustomDataType type)
int CustomData_get_render_layer(const CustomData *data, eCustomDataType type)
bool BKE_editmesh_eval_orig_map_available(const Mesh &mesh_eval, const Mesh *mesh_orig)
Definition editmesh.cc:67
General operations, lookup, etc. for materials.
int BKE_id_material_used_with_fallback_eval(const ID &id)
eMeshBatchDirtyMode
Definition BKE_mesh.h:37
@ BKE_MESH_BATCH_DIRTY_UVEDIT_ALL
Definition BKE_mesh.h:42
@ BKE_MESH_BATCH_DIRTY_SELECT_PAINT
Definition BKE_mesh.h:40
@ BKE_MESH_BATCH_DIRTY_SHADING
Definition BKE_mesh.h:41
@ BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT
Definition BKE_mesh.h:43
@ BKE_MESH_BATCH_DIRTY_ALL
Definition BKE_mesh.h:38
@ BKE_MESH_BATCH_DIRTY_SELECT
Definition BKE_mesh.h:39
@ ME_WRAPPER_TYPE_BMESH
General operations, lookup, etc. for blender objects.
const Mesh * BKE_object_get_pre_modified_mesh(const Object *object)
const Mesh * BKE_object_get_editmesh_eval_final(const Object *object)
Functions for dealing with objects and deform verts, used by painting and tools.
bool BKE_object_defgroup_check_lock_relative(const bool *lock_flags, const bool *validmap, int index)
void BKE_object_defgroup_split_locked_validmap(int defbase_tot, const bool *locked, const bool *deform, bool *r_locked, bool *r_unlocked)
bool * BKE_object_defgroup_validmap_get(struct Object *ob, int defbase_tot)
bool * BKE_object_defgroup_lock_flags_get(struct Object *ob, int defbase_tot)
void BKE_object_defgroup_mirror_selection(struct Object *ob, int defbase_tot, const bool *selection, bool *dg_flags_sel, int *r_dg_flags_sel_tot)
bool BKE_object_defgroup_check_lock_relative_multi(int defbase_tot, const bool *lock_flags, const bool *selected, int sel_tot)
bool * BKE_object_defgroup_selected_get(struct Object *ob, int defbase_tot, int *r_dg_flags_sel_tot)
A BVH for high poly meshes.
bool BKE_subsurf_modifier_has_gpu_subdiv(const Mesh *mesh)
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_INLINE
#define LISTBASE_FOREACH(type, var, list)
int BLI_listbase_count(const ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:524
#define ARRAY_SIZE(arr)
@ CD_PROP_BYTE_COLOR
@ CD_PROP_FLOAT
@ CD_PROP_FLOAT3
@ CD_PROP_INT32_2D
@ CD_PROP_COLOR
@ CD_PROP_QUATERNION
@ CD_PROP_INT32
@ CD_PROP_FLOAT2
@ CD_PROP_INT16_2D
@ CD_AUTO_FROM_NAME
#define ME_USING_MIRROR_X_VERTEX_GROUPS(_me)
@ OB_MODE_EDIT
Object is a sort of wrapper for general info.
@ OB_MESH
@ UV_SYNC_SELECTION
#define GPU_BATCH_CLEAR_SAFE(batch)
Definition GPU_batch.hh:190
int GPU_batch_vertbuf_add(blender::gpu::Batch *batch, blender::gpu::VertBuf *vertex_buf, bool own_vbo)
#define GPU_batch_init(batch, primitive_type, vertex_buf, index_buf)
Definition GPU_batch.hh:166
#define GPU_BATCH_DISCARD_SAFE(batch)
Definition GPU_batch.hh:204
ListBase GPU_material_attributes(const GPUMaterial *material)
GPUPrimType
@ GPU_PRIM_LINES
@ GPU_PRIM_POINTS
@ GPU_PRIM_LINES_ADJ
@ GPU_PRIM_TRIS
static constexpr int GPU_MAX_ATTR
Definition GPU_shader.hh:34
#define GPU_vertbuf_create_with_format(format)
void GPU_vertbuf_data_len_set(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
@ GPU_FETCH_FLOAT
uint GPU_vertformat_attr_add(GPUVertFormat *, blender::StringRef name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_F32
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
#define U
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
IndexRange index_range() const
Definition BLI_array.hh:349
const T & first() const
Definition BLI_array.hh:270
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:398
bool is_empty() const
Definition BLI_array.hh:253
IndexRange index_range() const
void reserve(const int64_t n)
Definition BLI_set.hh:637
bool contains(const Key &key) const
Definition BLI_set.hh:310
bool add(const Key &key)
Definition BLI_set.hh:248
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr bool is_empty() const
constexpr const char * c_str() const
void append(const T &value)
#define FOREACH_MESH_BUFFER_CACHE(batch_cache, mbc)
#define MBC_BATCH_LEN
#define MBC_EDITUV
blender::gpu::Batch * DRW_batch_request(blender::gpu::Batch **batch)
const Mesh * DRW_object_get_editmesh_cage_for_drawing(const Object &object)
Extraction of Mesh data into VBO to feed to GPU.
struct @064345207361167251075330302113175271221317160336::@113254110077376341056327177062323111323010325277 batch
#define MEM_SAFE_FREE(v)
format
void * MEM_dupallocN(const void *vmemh)
Definition mallocn.cc:143
pbvh::Tree * pbvh_get(Object &object)
Definition paint.cc:2912
void update_normals_from_eval(Object &object_eval, Tree &pbvh)
Definition pbvh.cc:1080
void DRW_mesh_batch_cache_validate(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Object &object, Mesh &mesh)
void drw_attributes_add_request(VectorSet< std::string > *attrs, const StringRef name)
const CustomData & mesh_cd_ldata_get_from_mesh(const Mesh &mesh)
static void drw_mesh_weight_state_extract(Object &ob, Mesh &mesh, const ToolSettings &ts, bool paint_mode, DRW_MeshWeightState *wstate)
static void init_empty_dummy_batch(gpu::Batch &batch)
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_loose_edges(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_edges(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_texpaint_single(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_edges(Mesh &mesh)
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache &cache)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_facedots(Object &object, Mesh &mesh)
static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
blender::gpu::Batch * DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh &mesh)
static void mesh_cd_calc_active_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_edges(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_wireframe(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_facedots(Mesh &mesh)
void draw_subdiv_cache_free(DRWSubdivCache &cache)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_verts(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface(Mesh &mesh)
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
static void texpaint_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
static void mesh_cd_calc_edit_uv_layer(const Mesh &, DRW_MeshCDMask *cd_used)
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_weights(Mesh &mesh)
Span< gpu::Batch * > DRW_mesh_batch_cache_get_surface_shaded(Object &object, Mesh &mesh, Span< const GPUMaterial * > materials)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces(Object &object, Mesh &mesh)
const CustomData & mesh_cd_edata_get_from_mesh(const Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_vertpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_wireframes_face(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_sculpt(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_edges(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_loop_normals(Mesh &mesh)
static bool drw_mesh_weight_state_compare(const DRW_MeshWeightState *a, const DRW_MeshWeightState *b)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edge_detection(Mesh &mesh, bool *r_is_manifold)
void drw_attributes_merge(VectorSet< std::string > *dst, const VectorSet< std::string > *src, Mutex &render_mutex)
void DRW_mesh_batch_cache_free_old(Mesh *mesh, int ctime)
void DRW_mesh_batch_cache_free(void *batch_cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_triangles(Mesh &mesh)
Span< gpu::Batch * > DRW_mesh_batch_cache_get_surface_texpaint(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vertices(Mesh &mesh)
static void mesh_batch_cache_init(Mesh &mesh)
static void mesh_batch_cache_clear(MeshBatchCache &cache)
blender::gpu::Batch * DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh &mesh)
static void mesh_buffer_cache_clear(MeshBufferCache *mbc)
static void mesh_batch_cache_discard_uvedit(MeshBatchCache &cache)
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
static void mesh_batch_cache_check_vertex_group(MeshBatchCache &cache, const DRW_MeshWeightState *wstate)
static void drw_mesh_weight_state_copy(DRW_MeshWeightState *wstate_dst, const DRW_MeshWeightState *wstate_src)
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
const Mesh & editmesh_final_or_this(const Object &object, const Mesh &mesh)
static void mesh_cd_calc_active_mask_uv_layer(const Object &object, const Mesh &mesh, DRW_MeshCDMask &cd_used)
bool drw_attributes_overlap(const VectorSet< std::string > *a, const VectorSet< std::string > *b)
BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache &cache, DRWBatchFlag new_flag)
static void mesh_batch_cache_request_surface_batches(Mesh &mesh, MeshBatchCache &cache)
void DRW_create_subdivision(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const Span< IBOType > ibo_requests, const Span< VBOType > vbo_requests, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object &object, const Mesh &mesh, const Span< const GPUMaterial * > materials, VectorSet< std::string > *attributes)
static void request_active_and_default_color_attributes(const Object &object, const Mesh &mesh, VectorSet< std::string > &attributes)
blender::gpu::Batch * DRW_mesh_batch_cache_get_verts_with_select_id(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_sculpt_overlays(Mesh &mesh)
static void discard_buffers(MeshBatchCache &cache, const Span< VBOType > vbos, const Span< IBOType > ibos)
void DRW_mesh_batch_cache_dirty_tag(Mesh *mesh, eMeshBatchDirtyMode mode)
blender::gpu::Batch * DRW_mesh_batch_cache_get_surface_viewer_attribute(Mesh &mesh)
const CustomData & mesh_cd_vdata_get_from_mesh(const Mesh &mesh)
void DRW_mesh_batch_cache_create_requested(TaskGraph &task_graph, Object &ob, Mesh &mesh, const Scene &scene, bool is_paint_mode, bool use_hide)
void DRW_mesh_get_attributes(const Object &object, const Mesh &mesh, const Span< const GPUMaterial * > materials, VectorSet< std::string > *r_attrs, DRW_MeshCDMask *r_cd_needed)
static void mesh_batch_cache_free_subdiv_cache(MeshBatchCache &cache)
void mesh_buffer_cache_create_requested(TaskGraph &task_graph, const Scene &scene, MeshBatchCache &cache, MeshBufferCache &mbc, Span< IBOType > ibo_requests, Span< VBOType > vbo_requests, Object &object, Mesh &mesh, bool is_editmode, bool is_paint_mode, bool do_final, bool do_uvedit, bool use_hide)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_vert_normals(Mesh &mesh)
static MeshBatchCache * mesh_batch_cache_get(Mesh &mesh)
static void drw_mesh_weight_state_clear(DRW_MeshWeightState *wstate)
blender::gpu::Batch * DRW_mesh_batch_cache_get_uv_faces(Object &object, Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edges_with_select_id(Mesh &mesh)
gpu::Batch * DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Object &object, Mesh &mesh, float **tot_area, float **tot_uv_area)
static void edituv_request_active_uv(MeshBatchCache &cache, Object &object, Mesh &mesh)
static bool mesh_batch_cache_valid(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_all_verts(Mesh &mesh)
blender::gpu::Batch * DRW_mesh_batch_cache_get_uv_wireframe(Object &object, Mesh &mesh)
bool drw_custom_data_match_attribute(const CustomData &custom_data, const StringRef name, int *r_layer_index, eCustomDataType *r_type)
blender::gpu::Batch * DRW_mesh_batch_cache_get_edit_skin_roots(Mesh &mesh)
const CustomData & mesh_cd_pdata_get_from_mesh(const Mesh &mesh)
void create_material_subranges(const SortedFaceData &face_sorted, gpu::IndexBuf &tris_ibo, MutableSpan< gpu::IndexBufPtr > ibos)
MeshRuntimeHandle * runtime
char * default_color_attribute
ListBase vertex_group_names
CustomData vert_data
int vertex_group_active_index
char * active_color_attribute
struct ToolSettings * toolsettings
VectorSet< std::string > attr_used_over_time
Array< gpu::IndexBufPtr > tris_per_mat
VectorSet< std::string > attr_used
Array< gpu::Batch * > surface_per_mat
VectorSet< std::string > attr_needed
Map< IBOType, std::unique_ptr< gpu::IndexBuf, gpu::IndexBufDeleter > > ibos
Map< VBOType, std::unique_ptr< gpu::VertBuf, gpu::VertBufDeleter > > vbos
i
Definition text_draw.cc:230
char * buffers[2]