Blender V4.5
draw_cache_impl_subdivision.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
6#include "draw_subdivision.hh"
7
8#include "DNA_mesh_types.h"
9#include "DNA_object_types.h"
10#include "DNA_scene_types.h"
11
12#include "BKE_attribute.hh"
13#include "BKE_editmesh.hh"
14#include "BKE_mesh.hh"
15#include "BKE_mesh_mapping.hh"
16#include "BKE_object.hh"
17#include "BKE_subdiv.hh"
18#include "BKE_subdiv_eval.hh"
19#include "BKE_subdiv_foreach.hh"
20#include "BKE_subdiv_mesh.hh"
22
23#include "BLI_linklist.h"
24#include "BLI_mutex.hh"
25#include "BLI_virtual_array.hh"
26
27#include "DRW_engine.hh"
28#include "DRW_render.hh"
29
30#include "GPU_capabilities.hh"
31#include "GPU_compute.hh"
32#include "GPU_index_buffer.hh"
33#include "GPU_state.hh"
34#include "GPU_uniform_buffer.hh"
35#include "GPU_vertex_buffer.hh"
36
39#ifdef WITH_OPENSUBDIV
42#endif
43
44#include "draw_cache_extract.hh"
45#include "draw_cache_impl.hh"
46#include "draw_cache_inline.hh"
47#include "draw_common_c.hh"
48#include "draw_shader.hh"
51
52namespace blender::draw {
53
54/* -------------------------------------------------------------------- */
59
60#ifdef WITH_OPENSUBDIV
61/* Vertex format used for the `PatchTable::PatchHandle`. */
62static const GPUVertFormat &get_patch_handle_format()
63{
64 static const GPUVertFormat format = [&]() {
65 GPUVertFormat format{};
69 return format;
70 }();
71 return format;
72}
73
74/* Vertex format used for the quad-tree nodes of the PatchMap. */
75static const GPUVertFormat &get_quadtree_format()
76{
77 static const GPUVertFormat format = [&]() {
78 GPUVertFormat format{};
80 return format;
81 }();
82 return format;
83}
84
85struct CompressedPatchCoord {
86 int ptex_face_index;
87 /* UV coordinate encoded as u << 16 | v, where u and v are quantized on 16-bits. */
88 uint encoded_uv;
89};
90
91MINLINE CompressedPatchCoord make_patch_coord(int ptex_face_index, float u, float v)
92{
93 CompressedPatchCoord patch_coord = {
94 ptex_face_index,
95 (uint(u * 65535.0f) << 16) | uint(v * 65535.0f),
96 };
97 return patch_coord;
98}
99
100/* Vertex format used for the #CompressedPatchCoord. */
101static const GPUVertFormat &get_blender_patch_coords_format()
102{
103 static const GPUVertFormat format = [&]() {
104 GPUVertFormat format{};
105 /* WARNING! Adjust #CompressedPatchCoord accordingly. */
108 return format;
109 }();
110 return format;
111}
112
113#endif
114
116{
117 static const GPUVertFormat format = [&]() {
120 return format;
121 }();
122 return format;
123}
124
136
138
139// --------------------------------------------------------
140
141static uint tris_count_from_number_of_loops(const uint number_of_loops)
142{
143 const uint32_t number_of_quads = number_of_loops / 4;
144 return number_of_quads * 2;
145}
146
147/* -------------------------------------------------------------------- */
150
152 uint num_loops,
153 uint loose_len)
154{
157 GPU_vertbuf_data_alloc(*buffer, num_loops + loose_len);
158
159 buffer->data<int32_t>().take_front(num_loops).copy_from({vert_origindex, num_loops});
160 return buffer;
161}
162
164{
165 return draw_subdiv_init_origindex_buffer(vert_origindex, num_loops, 0).release();
166}
167
169
170/* -------------------------------------------------------------------- */
173
174#ifdef WITH_OPENSUBDIV
175
176static void draw_patch_map_build(DRWPatchMap *gpu_patch_map, bke::subdiv::Subdiv *subdiv)
177{
178 gpu::VertBuf *patch_map_handles = GPU_vertbuf_calloc();
179 GPU_vertbuf_init_with_format_ex(*patch_map_handles, get_patch_handle_format(), GPU_USAGE_STATIC);
180
181 gpu::VertBuf *patch_map_quadtree = GPU_vertbuf_calloc();
182 GPU_vertbuf_init_with_format_ex(*patch_map_quadtree, get_quadtree_format(), GPU_USAGE_STATIC);
183
184 int min_patch_face = 0;
185 int max_patch_face = 0;
186 int max_depth = 0;
187 int patches_are_triangular = 0;
188
189 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
190 evaluator->eval_output->getPatchMap(patch_map_handles,
191 patch_map_quadtree,
192 &min_patch_face,
193 &max_patch_face,
194 &max_depth,
195 &patches_are_triangular);
196
197 gpu_patch_map->patch_map_handles = patch_map_handles;
198 gpu_patch_map->patch_map_quadtree = patch_map_quadtree;
199 gpu_patch_map->min_patch_face = min_patch_face;
200 gpu_patch_map->max_patch_face = max_patch_face;
201 gpu_patch_map->max_depth = max_depth;
202 gpu_patch_map->patches_are_triangular = patches_are_triangular;
203}
204
205#endif
206
207static void draw_patch_map_free(DRWPatchMap *gpu_patch_map)
208{
211 gpu_patch_map->min_patch_face = 0;
212 gpu_patch_map->max_patch_face = 0;
213 gpu_patch_map->max_depth = 0;
214 gpu_patch_map->patches_are_triangular = 0;
215}
216
218
219/* -------------------------------------------------------------------- */
222
224{
225 return cache.subdiv && cache.subdiv->evaluator && cache.num_subdiv_loops != 0;
226}
227
234
242
273
274/* Flags used in #DRWSubdivCache.extra_coarse_face_data. The flags are packed in the upper bits of
275 * each uint (one per coarse face), #SUBDIV_COARSE_FACE_FLAG_OFFSET tells where they are in the
276 * packed bits. */
277#define SUBDIV_COARSE_FACE_FLAG_SMOOTH 1u
278#define SUBDIV_COARSE_FACE_FLAG_SELECT 2u
279#define SUBDIV_COARSE_FACE_FLAG_ACTIVE 4u
280#define SUBDIV_COARSE_FACE_FLAG_HIDDEN 8u
281
282#define SUBDIV_COARSE_FACE_FLAG_OFFSET 28u
283
284#define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK \
285 (SUBDIV_COARSE_FACE_FLAG_SMOOTH << SUBDIV_COARSE_FACE_FLAG_OFFSET)
286#define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK \
287 (SUBDIV_COARSE_FACE_FLAG_SELECT << SUBDIV_COARSE_FACE_FLAG_OFFSET)
288#define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK \
289 (SUBDIV_COARSE_FACE_FLAG_ACTIVE << SUBDIV_COARSE_FACE_FLAG_OFFSET)
290#define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK \
291 (SUBDIV_COARSE_FACE_FLAG_HIDDEN << SUBDIV_COARSE_FACE_FLAG_OFFSET)
292
293#define SUBDIV_COARSE_FACE_LOOP_START_MASK \
294 ~((SUBDIV_COARSE_FACE_FLAG_SMOOTH | SUBDIV_COARSE_FACE_FLAG_SELECT | \
295 SUBDIV_COARSE_FACE_FLAG_ACTIVE | SUBDIV_COARSE_FACE_FLAG_HIDDEN) \
296 << SUBDIV_COARSE_FACE_FLAG_OFFSET)
297
298static uint32_t compute_coarse_face_flag_bm(BMFace *f, BMFace *efa_act)
299{
300 uint32_t flag = 0;
303 }
306 }
307 if (f == efa_act) {
309 }
310 return flag;
311}
312
314 BMFace *efa_act,
315 MutableSpan<uint32_t> flags_data)
316{
317 BMFace *f;
318 BMIter iter;
319
320 BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
321 const int index = BM_elem_index_get(f);
322 uint32_t flag = compute_coarse_face_flag_bm(f, efa_act);
325 }
326 const int loopstart = BM_elem_index_get(f->l_first);
327 flags_data[index] = uint(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
328 }
329}
330
332 const Mesh *mesh,
333 MutableSpan<uint32_t> flags_data)
334{
335 const OffsetIndices faces = mesh->faces();
336 for (const int i : faces.index_range()) {
337 uint32_t flag = 0;
339 (!mr.sharp_faces.is_empty() && mr.sharp_faces[i])))
340 {
342 }
343 if (!mr.select_poly.is_empty() && mr.select_poly[i]) {
345 }
346 if (!mr.hide_poly.is_empty() && mr.hide_poly[i]) {
348 }
349 flags_data[i] = uint(faces[i].start()) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
350 }
351}
352
354 BMesh *bm,
355 MeshRenderData &mr,
356 MutableSpan<uint32_t> flags_data)
357{
358 if (bm == nullptr) {
360 return;
361 }
362
363 const OffsetIndices faces = mesh->faces();
364 for (const int i : faces.index_range()) {
365 BMFace *f = bm_original_face_get(mr, i);
366 /* Selection and hiding from bmesh. */
367 uint32_t flag = (f) ? compute_coarse_face_flag_bm(f, mr.efa_act) : 0;
368 /* Smooth from mesh. */
370 (!mr.sharp_faces.is_empty() && mr.sharp_faces[i])))
371 {
373 }
374 flags_data[i] = uint(faces[i].start()) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
375 }
376}
377
379 const Mesh *mesh,
380 MeshRenderData &mr)
381{
382 if (cache.extra_coarse_face_data == nullptr) {
384 static const GPUVertFormat format = []() {
387 return format;
388 }();
392 mesh->faces_num);
393 }
394
395 MutableSpan<uint32_t> flags_data = cache.extra_coarse_face_data->data<uint32_t>();
396
399 }
400 else if (mr.orig_index_face != nullptr) {
401 draw_subdiv_cache_extra_coarse_face_data_mapped(mesh, cache.bm, mr, flags_data);
402 }
403 else {
405 }
406
407 /* Make sure updated data is re-uploaded. */
409}
410
412{
413 DRWSubdivCache *subdiv_cache = mbc.subdiv_cache;
414 if (subdiv_cache == nullptr) {
415 subdiv_cache = MEM_new<DRWSubdivCache>(__func__);
416 }
417 mbc.subdiv_cache = subdiv_cache;
418 return *subdiv_cache;
419}
420
421#ifdef WITH_OPENSUBDIV
422
423static void draw_subdiv_invalidate_evaluator_for_orco(bke::subdiv::Subdiv *subdiv,
424 const Mesh *mesh)
425{
426 if (!(subdiv && subdiv->evaluator)) {
427 return;
428 }
429
430 const bool has_orco = CustomData_has_layer(&mesh->vert_data, CD_ORCO);
431 if (has_orco && !subdiv->evaluator->eval_output->hasVertexData()) {
432 /* If we suddenly have/need original coordinates, recreate the evaluator if the extra
433 * source was not created yet. The refiner also has to be recreated as refinement for source
434 * and vertex data is done only once. */
435 delete subdiv->evaluator;
436 subdiv->evaluator = nullptr;
437
438 delete subdiv->topology_refiner;
439 subdiv->topology_refiner = nullptr;
440 }
441}
442
444
445/* -------------------------------------------------------------------- */
455
456struct DRWCacheBuildingContext {
457 const Mesh *coarse_mesh;
458 const bke::subdiv::Subdiv *subdiv;
459 const bke::subdiv::ToMeshSettings *settings;
460
461 DRWSubdivCache *cache;
462
463 /* Pointers into #DRWSubdivCache buffers for easier access during traversal. */
464 CompressedPatchCoord *patch_coords;
465 int *subdiv_loop_vert_index;
466 int *subdiv_loop_subdiv_vert_index;
467 int *subdiv_loop_edge_index;
468 int *subdiv_loop_edge_draw_flag;
469 int *subdiv_loop_subdiv_edge_index;
470 int *subdiv_loop_face_index;
471
472 /* Temporary buffers used during traversal. */
473 int *vert_origindex_map;
474 int *edge_draw_flag_map;
475 int *edge_origindex_map;
476
477 /* #CD_ORIGINDEX layers from the mesh to directly look up during traversal the original-index
478 * from the base mesh for edit data so that we do not have to handle yet another GPU buffer and
479 * do this in the shaders. */
480 const int *orig_index_vert;
481 const int *orig_index_edge;
482};
483
484static bool draw_subdiv_topology_info_cb(const bke::subdiv::ForeachContext *foreach_context,
485 const int num_verts,
486 const int num_edges,
487 const int num_loops,
488 const int num_faces,
489 const int *subdiv_face_offset)
490{
491 /* num_loops does not take into account meshes with only loose geometry, which might be meshes
492 * used as custom bone shapes, so let's check the num_verts also. */
493 if (num_verts == 0 && num_loops == 0) {
494 return false;
495 }
496
497 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
498 DRWSubdivCache *cache = ctx->cache;
499
500 /* Set topology information only if we have loops. */
501 if (num_loops != 0) {
502 cache->num_subdiv_edges = uint(num_edges);
503 cache->num_subdiv_loops = uint(num_loops);
504 cache->num_subdiv_verts = uint(num_verts);
505 cache->num_subdiv_quads = uint(num_faces);
506 cache->subdiv_face_offset = static_cast<int *>(MEM_dupallocN(subdiv_face_offset));
507 }
508
509 cache->may_have_loose_geom = num_verts != 0 || num_edges != 0;
510
511 /* Initialize cache buffers, prefer dynamic usage so we can reuse memory on the host even after
512 * it was sent to the device, since we may use the data while building other buffers on the CPU
513 * side.
514 *
515 * These VBOs are created even when there are no faces and only loose geometry. This avoids the
516 * need for many null checks. Binding them must be avoided if they are empty though. */
517 cache->patch_coords = GPU_vertbuf_calloc();
519 *cache->patch_coords, get_blender_patch_coords_format(), GPU_USAGE_DYNAMIC);
520 GPU_vertbuf_data_alloc(*cache->patch_coords, cache->num_subdiv_loops);
521
522 cache->corner_patch_coords = GPU_vertbuf_calloc();
524 *cache->corner_patch_coords, get_blender_patch_coords_format(), GPU_USAGE_DYNAMIC);
525 GPU_vertbuf_data_alloc(*cache->corner_patch_coords, cache->num_subdiv_loops);
526
527 cache->verts_orig_index = GPU_vertbuf_calloc();
529 *cache->verts_orig_index, get_origindex_format(), GPU_USAGE_DYNAMIC);
530 GPU_vertbuf_data_alloc(*cache->verts_orig_index, cache->num_subdiv_loops);
531
532 cache->edges_orig_index = GPU_vertbuf_calloc();
534 *cache->edges_orig_index, get_origindex_format(), GPU_USAGE_DYNAMIC);
535 GPU_vertbuf_data_alloc(*cache->edges_orig_index, cache->num_subdiv_loops);
536
537 cache->edges_draw_flag = GPU_vertbuf_calloc();
539 *cache->edges_draw_flag, get_origindex_format(), GPU_USAGE_DYNAMIC);
540 GPU_vertbuf_data_alloc(*cache->edges_draw_flag, cache->num_subdiv_loops);
541
542 cache->subdiv_loop_subdiv_vert_index = MEM_malloc_arrayN<int>(cache->num_subdiv_loops,
543 "subdiv_loop_subdiv_vert_index");
544
545 cache->subdiv_loop_subdiv_edge_index = MEM_malloc_arrayN<int>(cache->num_subdiv_loops,
546 "subdiv_loop_subdiv_edge_index");
547
548 cache->subdiv_loop_face_index = MEM_malloc_arrayN<int>(cache->num_subdiv_loops,
549 "subdiv_loop_face_index");
550
551 /* Initialize context pointers and temporary buffers. */
552 ctx->patch_coords = cache->patch_coords->data<CompressedPatchCoord>().data();
553 ctx->subdiv_loop_vert_index = cache->verts_orig_index->data<int>().data();
554 ctx->subdiv_loop_edge_index = cache->edges_orig_index->data<int>().data();
555 ctx->subdiv_loop_edge_draw_flag = cache->edges_draw_flag->data<int>().data();
556 ctx->subdiv_loop_subdiv_vert_index = cache->subdiv_loop_subdiv_vert_index;
557 ctx->subdiv_loop_subdiv_edge_index = cache->subdiv_loop_subdiv_edge_index;
558 ctx->subdiv_loop_face_index = cache->subdiv_loop_face_index;
559
560 ctx->orig_index_vert = static_cast<const int *>(
561 CustomData_get_layer(&ctx->coarse_mesh->vert_data, CD_ORIGINDEX));
562
563 ctx->orig_index_edge = static_cast<const int *>(
564 CustomData_get_layer(&ctx->coarse_mesh->edge_data, CD_ORIGINDEX));
565
566 if (cache->num_subdiv_verts) {
567 ctx->vert_origindex_map = MEM_malloc_arrayN<int>(cache->num_subdiv_verts,
568 "subdiv_vert_origindex_map");
569 for (int i = 0; i < num_verts; i++) {
570 ctx->vert_origindex_map[i] = -1;
571 }
572 }
573
574 if (cache->num_subdiv_edges) {
575 ctx->edge_origindex_map = MEM_malloc_arrayN<int>(cache->num_subdiv_edges,
576 "subdiv_edge_origindex_map");
577 for (int i = 0; i < num_edges; i++) {
578 ctx->edge_origindex_map[i] = -1;
579 }
580 ctx->edge_draw_flag_map = MEM_calloc_arrayN<int>(cache->num_subdiv_edges,
581 "subdiv_edge_draw_flag_map");
582 }
583
584 return true;
585}
586
587static void draw_subdiv_vertex_corner_cb(const bke::subdiv::ForeachContext *foreach_context,
588 void * /*tls*/,
589 const int /*ptex_face_index*/,
590 const float /*u*/,
591 const float /*v*/,
592 const int coarse_vertex_index,
593 const int /*coarse_face_index*/,
594 const int /*coarse_corner*/,
595 const int subdiv_vertex_index)
596{
597 BLI_assert(coarse_vertex_index != ORIGINDEX_NONE);
598 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
599 ctx->vert_origindex_map[subdiv_vertex_index] = coarse_vertex_index;
600}
601
602static void draw_subdiv_vertex_edge_cb(const bke::subdiv::ForeachContext * /*foreach_context*/,
603 void * /*tls_v*/,
604 const int /*ptex_face_index*/,
605 const float /*u*/,
606 const float /*v*/,
607 const int /*coarse_edge_index*/,
608 const int /*coarse_face_index*/,
609 const int /*coarse_corner*/,
610 const int /*subdiv_vertex_index*/)
611{
612 /* Required if bke::subdiv::ForeachContext.vertex_corner is also set. */
613}
614
615static void draw_subdiv_edge_cb(const bke::subdiv::ForeachContext *foreach_context,
616 void * /*tls*/,
617 const int coarse_edge_index,
618 const int subdiv_edge_index,
619 const bool /*is_loose*/,
620 const int /*subdiv_v1*/,
621 const int /*subdiv_v2*/)
622{
623 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
624
625 if (!ctx->edge_origindex_map) {
626 return;
627 }
628
629 if (coarse_edge_index == ORIGINDEX_NONE) {
630 /* Not mapped to edge in the subdivision base mesh. */
631 ctx->edge_origindex_map[subdiv_edge_index] = ORIGINDEX_NONE;
632 if (!ctx->cache->optimal_display) {
633 ctx->edge_draw_flag_map[subdiv_edge_index] = 1;
634 }
635 }
636 else {
637 if (ctx->orig_index_edge) {
638 const int origindex = ctx->orig_index_edge[coarse_edge_index];
639 ctx->edge_origindex_map[subdiv_edge_index] = origindex;
640 if (!(origindex == ORIGINDEX_NONE && ctx->cache->hide_unmapped_edges)) {
641 /* Not mapped to edge in original mesh (generated by a preceding modifier). */
642 ctx->edge_draw_flag_map[subdiv_edge_index] = 1;
643 }
644 }
645 else {
646 ctx->edge_origindex_map[subdiv_edge_index] = coarse_edge_index;
647 ctx->edge_draw_flag_map[subdiv_edge_index] = 1;
648 }
649 }
650}
651
652static void draw_subdiv_loop_cb(const bke::subdiv::ForeachContext *foreach_context,
653 void * /*tls_v*/,
654 const int ptex_face_index,
655 const float u,
656 const float v,
657 const int /*coarse_loop_index*/,
658 const int coarse_face_index,
659 const int /*coarse_corner*/,
660 const int subdiv_loop_index,
661 const int subdiv_vertex_index,
662 const int subdiv_edge_index)
663{
664 DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
665 ctx->patch_coords[subdiv_loop_index] = make_patch_coord(ptex_face_index, u, v);
666
667 int coarse_vertex_index = ctx->vert_origindex_map[subdiv_vertex_index];
668
669 ctx->subdiv_loop_subdiv_vert_index[subdiv_loop_index] = subdiv_vertex_index;
670 ctx->subdiv_loop_subdiv_edge_index[subdiv_loop_index] = subdiv_edge_index;
671 ctx->subdiv_loop_face_index[subdiv_loop_index] = coarse_face_index;
672 ctx->subdiv_loop_vert_index[subdiv_loop_index] = coarse_vertex_index;
673}
674
675static void draw_subdiv_foreach_callbacks(bke::subdiv::ForeachContext *foreach_context)
676{
677 memset(foreach_context, 0, sizeof(*foreach_context));
678 foreach_context->topology_info = draw_subdiv_topology_info_cb;
679 foreach_context->loop = draw_subdiv_loop_cb;
680 foreach_context->edge = draw_subdiv_edge_cb;
681 foreach_context->vertex_corner = draw_subdiv_vertex_corner_cb;
682 foreach_context->vertex_edge = draw_subdiv_vertex_edge_cb;
683}
684
685static void do_subdiv_traversal(DRWCacheBuildingContext *cache_building_context,
686 bke::subdiv::Subdiv *subdiv)
687{
688 bke::subdiv::ForeachContext foreach_context;
689 draw_subdiv_foreach_callbacks(&foreach_context);
690 foreach_context.user_data = cache_building_context;
691
693 &foreach_context,
694 cache_building_context->settings,
695 cache_building_context->coarse_mesh);
696
697 /* Now that traversal is done, we can set up the right original indices for the
698 * subdiv-loop-to-coarse-edge map.
699 */
700 for (int i = 0; i < cache_building_context->cache->num_subdiv_loops; i++) {
701 const int edge_index = cache_building_context->subdiv_loop_subdiv_edge_index[i];
702 cache_building_context->subdiv_loop_edge_index[i] =
703 cache_building_context->edge_origindex_map[edge_index];
704 cache_building_context->subdiv_loop_edge_draw_flag[i] =
705 cache_building_context->edge_draw_flag_map[edge_index];
706 }
707}
708
709static gpu::VertBuf *gpu_vertbuf_create_from_format(const GPUVertFormat &format, uint len)
710{
711 gpu::VertBuf *verts = GPU_vertbuf_calloc();
714 return verts;
715}
716
717/* Build maps to hold enough information to tell which face is adjacent to which vertex; those will
718 * be used for computing normals if limit surfaces are unavailable. */
719static void build_vertex_face_adjacency_maps(DRWSubdivCache &cache)
720{
721 /* +1 so that we do not require a special case for the last vertex, this extra offset will
722 * contain the total number of adjacent faces. */
723 cache.subdiv_vertex_face_adjacency_offsets = gpu_vertbuf_create_from_format(
724 get_origindex_format(), cache.num_subdiv_verts + 1);
725
726 MutableSpan<int> vertex_offsets = cache.subdiv_vertex_face_adjacency_offsets->data<int>();
727 vertex_offsets.fill(0);
728
730 {cache.subdiv_loop_subdiv_vert_index, cache.num_subdiv_loops}, vertex_offsets);
731
732 cache.subdiv_vertex_face_adjacency = gpu_vertbuf_create_from_format(get_origindex_format(),
733 cache.num_subdiv_loops);
734 MutableSpan<int> adjacent_faces = cache.subdiv_vertex_face_adjacency->data<int>();
735 int *tmp_set_faces = MEM_calloc_arrayN<int>(cache.num_subdiv_verts, "tmp subdiv vertex offset");
736
737 for (int i = 0; i < cache.num_subdiv_loops / 4; i++) {
738 for (int j = 0; j < 4; j++) {
739 const int subdiv_vertex = cache.subdiv_loop_subdiv_vert_index[i * 4 + j];
740 int first_face_offset = vertex_offsets[subdiv_vertex] + tmp_set_faces[subdiv_vertex];
741 adjacent_faces[first_face_offset] = i;
742 tmp_set_faces[subdiv_vertex] += 1;
743 }
744 }
745
746 MEM_freeN(tmp_set_faces);
747}
748
749static bool draw_subdiv_build_cache(DRWSubdivCache &cache,
750 bke::subdiv::Subdiv *subdiv,
751 const Mesh *mesh_eval,
752 const SubsurfRuntimeData *runtime_data)
753{
754 bke::subdiv::ToMeshSettings to_mesh_settings;
755 to_mesh_settings.resolution = runtime_data->resolution;
756 to_mesh_settings.use_optimal_display = false;
757
758 if (cache.resolution != to_mesh_settings.resolution) {
759 /* Resolution changed, we need to rebuild, free any existing cached data. */
761 }
762
763 /* If the resolution between the cache and the settings match for some reason, check if the patch
764 * coordinates were not already generated. Those coordinates are specific to the resolution, so
765 * they should be null either after initialization, or after freeing if the resolution (or some
766 * other subdivision setting) changed.
767 */
768 if (cache.patch_coords != nullptr) {
769 return true;
770 }
771
772 DRWCacheBuildingContext cache_building_context;
773 memset(&cache_building_context, 0, sizeof(DRWCacheBuildingContext));
774 cache_building_context.coarse_mesh = mesh_eval;
775 cache_building_context.settings = &to_mesh_settings;
776 cache_building_context.cache = &cache;
777
778 do_subdiv_traversal(&cache_building_context, subdiv);
779 if (cache.num_subdiv_loops == 0 && cache.num_subdiv_verts == 0 && !cache.may_have_loose_geom) {
780 /* Either the traversal failed, or we have an empty mesh, either way we cannot go any further.
781 * The subdiv_face_offset cannot then be reliably stored in the cache, so free it directly.
782 */
783 MEM_SAFE_FREE(cache.subdiv_face_offset);
784 return false;
785 }
786
787 /* Only build face related data if we have polygons. */
788 const OffsetIndices faces = mesh_eval->faces();
789 if (cache.num_subdiv_loops != 0) {
790 /* Build buffers for the PatchMap. */
791 draw_patch_map_build(&cache.gpu_patch_map, subdiv);
792
793 cache.face_ptex_offset = bke::subdiv::face_ptex_offset_get(subdiv);
794
795 /* Build patch coordinates for all the face dots. */
796 cache.fdots_patch_coords = gpu_vertbuf_create_from_format(get_blender_patch_coords_format(),
797 mesh_eval->faces_num);
798 CompressedPatchCoord *blender_fdots_patch_coords =
799 cache.fdots_patch_coords->data<CompressedPatchCoord>().data();
800 for (int i = 0; i < mesh_eval->faces_num; i++) {
801 const int ptex_face_index = cache.face_ptex_offset[i];
802 if (faces[i].size() == 4) {
803 /* For quads, the center coordinate of the coarse face has `u = v = 0.5`. */
804 blender_fdots_patch_coords[i] = make_patch_coord(ptex_face_index, 0.5f, 0.5f);
805 }
806 else {
807 /* For N-gons, since they are split into quads from the center, and since the center is
808 * chosen to be the top right corner of each quad, the center coordinate of the coarse face
809 * is any one of those top right corners with `u = v = 1.0`. */
810 blender_fdots_patch_coords[i] = make_patch_coord(ptex_face_index, 1.0f, 1.0f);
811 }
812 }
813
814 cache.subdiv_face_offset_buffer = draw_subdiv_build_origindex_buffer(cache.subdiv_face_offset,
815 faces.size());
816
817 cache.face_ptex_offset_buffer = draw_subdiv_build_origindex_buffer(cache.face_ptex_offset,
818 faces.size() + 1);
819
820 build_vertex_face_adjacency_maps(cache);
821 }
822
823 cache.resolution = to_mesh_settings.resolution;
824 cache.num_coarse_faces = faces.size();
825
826 /* To avoid floating point precision issues when evaluating patches at patch boundaries,
827 * ensure that all loops sharing a vertex use the same patch coordinate. This could cause
828 * the mesh to not be watertight, leading to shadowing artifacts (see #97877). */
829 Vector<int> first_loop_index(cache.num_subdiv_verts, -1);
830
831 /* Save coordinates for corners, as attributes may vary for each loop connected to the same
832 * vertex. */
833 if (cache.num_subdiv_loops > 0) {
834 memcpy(cache.corner_patch_coords->data<CompressedPatchCoord>().data(),
835 cache_building_context.patch_coords,
836 sizeof(CompressedPatchCoord) * cache.num_subdiv_loops);
837
838 for (int i = 0; i < cache.num_subdiv_loops; i++) {
839 const int vertex = cache_building_context.subdiv_loop_subdiv_vert_index[i];
840 if (first_loop_index[vertex] != -1) {
841 continue;
842 }
843 first_loop_index[vertex] = i;
844 }
845
846 for (int i = 0; i < cache.num_subdiv_loops; i++) {
847 const int vertex = cache_building_context.subdiv_loop_subdiv_vert_index[i];
848 cache_building_context.patch_coords[i] =
849 cache_building_context.patch_coords[first_loop_index[vertex]];
850 }
851 }
852
853 /* Cleanup. */
854 MEM_SAFE_FREE(cache_building_context.vert_origindex_map);
855 MEM_SAFE_FREE(cache_building_context.edge_origindex_map);
856 MEM_SAFE_FREE(cache_building_context.edge_draw_flag_map);
857
858 return true;
859}
860
861#endif
862
864
865/* -------------------------------------------------------------------- */
870
873 const int src_offset,
874 const int dst_offset,
875 const uint total_dispatch_size,
876 const bool has_sculpt_mask,
877 const uint edge_loose_offset)
878{
879 ubo->src_offset = src_offset;
880 ubo->dst_offset = dst_offset;
883 ubo->max_depth = cache.gpu_patch_map.max_depth;
887 ubo->edge_loose_offset = edge_loose_offset;
888 ubo->has_sculpt_mask = has_sculpt_mask;
894 ubo->total_dispatch_size = total_dispatch_size;
895 ubo->is_edit_mode = cache.is_edit_mode;
896 ubo->use_hide = cache.use_hide;
897}
898
900 const int src_offset,
901 const int dst_offset,
902 const uint total_dispatch_size,
903 const bool has_sculpt_mask = false,
904 const uint edge_loose_offset = 0)
905{
906 DRWSubdivUboStorage storage;
908 &storage,
909 src_offset,
910 dst_offset,
911 total_dispatch_size,
912 has_sculpt_mask,
913 edge_loose_offset);
914
915 if (!cache.ubo) {
916 const_cast<DRWSubdivCache *>(&cache)->ubo = GPU_uniformbuf_create_ex(
917 sizeof(DRWSubdivUboStorage), &storage, "DRWSubdivUboStorage");
918 }
919
920 GPU_uniformbuf_update(cache.ubo, &storage);
922}
923
925
926// --------------------------------------------------------
927
928#define SUBDIV_LOCAL_WORK_GROUP_SIZE 64
930{
932}
933
940 GPUShader *shader,
941 const int src_offset,
942 const int dst_offset,
943 uint total_dispatch_size,
944 const bool has_sculpt_mask = false,
945 const uint edge_loose_offset = 0)
946{
947 const uint max_res_x = uint(GPU_max_work_group_count(0));
948
949 const uint dispatch_size = get_dispatch_size(total_dispatch_size);
950 uint dispatch_rx = dispatch_size;
951 uint dispatch_ry = 1u;
952 if (dispatch_rx > max_res_x) {
953 /* Since there are some limitations with regards to the maximum work group size (could be as
954 * low as 64k elements per call), we split the number elements into a "2d" number, with the
955 * final index being computed as `res_x + res_y * max_work_group_size`. Even with a maximum
956 * work group size of 64k, that still leaves us with roughly `64k * 64k = 4` billion elements
957 * total, which should be enough. If not, we could also use the 3rd dimension. */
958 /* TODO(fclem): We could dispatch fewer groups if we compute the prime factorization and
959 * get the smallest rect fitting the requirements. */
960 dispatch_rx = dispatch_ry = ceilf(sqrtf(dispatch_size));
961 /* Avoid a completely empty dispatch line caused by rounding. */
962 if ((dispatch_rx * (dispatch_ry - 1)) >= dispatch_size) {
963 dispatch_ry -= 1;
964 }
965 }
966
967 /* X and Y dimensions may have different limits so the above computation may not be right, but
968 * even with the standard 64k minimum on all dimensions we still have a lot of room. Therefore,
969 * we presume it all fits. */
970 BLI_assert(dispatch_ry < uint(GPU_max_work_group_count(1)));
971
973 cache, src_offset, dst_offset, total_dispatch_size, has_sculpt_mask, edge_loose_offset);
974
975 GPU_compute_dispatch(shader, dispatch_rx, dispatch_ry, 1);
976}
977
979 gpu::VertBuf *flags_buffer,
980 gpu::VertBuf *pos_nor,
981 gpu::VertBuf *orco)
982{
983#ifdef WITH_OPENSUBDIV
985 /* Happens on meshes with only loose geometry. */
986 return;
987 }
988
989 bke::subdiv::Subdiv *subdiv = cache.subdiv;
990 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
991
992 gpu::VertBuf *src_buffer = evaluator->eval_output->get_source_buf();
993 gpu::VertBuf *src_extra_buffer = nullptr;
994 if (orco) {
995 src_extra_buffer = evaluator->eval_output->get_source_data_buf();
996 }
997
998 GPUStorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
999 GPUStorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
1000 GPUStorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
1001
1004 GPU_shader_bind(shader);
1005
1017 if (flags_buffer) {
1019 }
1021 if (orco) {
1022 GPU_vertbuf_bind_as_ssbo(src_extra_buffer,
1025 }
1026
1027 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1028
1029 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1030 * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1031 * needed. */
1033
1034 /* Cleanup. */
1036
1037 GPU_storagebuf_free(patch_arrays_buffer);
1038#else
1039 UNUSED_VARS(cache, flags_buffer, pos_nor, orco);
1040#endif
1041}
1042
1044 gpu::VertBuf *uvs,
1045 const int face_varying_channel,
1046 const int dst_offset)
1047{
1048#ifdef WITH_OPENSUBDIV
1050 /* Happens on meshes with only loose geometry. */
1051 return;
1052 }
1053
1054 bke::subdiv::Subdiv *subdiv = cache.subdiv;
1055 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
1056
1057 gpu::VertBuf *src_buffer = evaluator->eval_output->get_face_varying_source_buf(
1058 face_varying_channel);
1059 int src_buffer_offset = evaluator->eval_output->get_face_varying_source_offset(
1060 face_varying_channel);
1061
1062 GPUStorageBuf *patch_arrays_buffer = evaluator->eval_output->create_face_varying_patch_array_buf(
1063 face_varying_channel);
1064 GPUStorageBuf *patch_index_buffer = evaluator->eval_output->get_face_varying_patch_index_buf(
1065 face_varying_channel);
1066 GPUStorageBuf *patch_param_buffer = evaluator->eval_output->get_face_varying_patch_param_buf(
1067 face_varying_channel);
1068
1070 GPU_shader_bind(shader);
1071
1084
1085 /* The buffer offset has the stride baked in (which is 2 as we have UVs) so remove the stride by
1086 * dividing by 2 */
1088 cache, shader, src_buffer_offset / 2, dst_offset, cache.num_subdiv_quads);
1089
1090 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1091 * Since it may also be used for computing UV stretches, we also need a barrier on the shader
1092 * storage. */
1094
1095 /* Cleanup. */
1097
1098 GPU_storagebuf_free(patch_arrays_buffer);
1099#else
1100 UNUSED_VARS(cache, uvs, face_varying_channel, dst_offset);
1101#endif
1102}
1103
1105 gpu::VertBuf &src_data,
1106 gpu::VertBuf &dst_data,
1107 GPUVertCompType comp_type,
1108 int dimensions,
1109 int dst_offset)
1110{
1112 /* Happens on meshes with only loose geometry. */
1113 return;
1114 }
1115
1116 GPUShader *shader = DRW_shader_subdiv_custom_data_get(comp_type, dimensions);
1117 GPU_shader_bind(shader);
1118
1119 /* subdiv_face_offset is always at binding point 0 for each shader using it. */
1127
1128 drw_subdiv_compute_dispatch(cache, shader, 0, dst_offset, cache.num_subdiv_quads);
1129
1130 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. Put
1131 * a barrier on the shader storage as we may use the result in another compute shader. */
1133
1134 /* Cleanup. */
1136}
1137
1139 gpu::VertBuf *mask_vbo,
1140 gpu::VertBuf *face_set_vbo,
1141 gpu::VertBuf *sculpt_data)
1142{
1144 GPU_shader_bind(shader);
1145
1146 /* Mask VBO is always at binding point 0. */
1147 if (mask_vbo) {
1149 }
1152
1153 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads, mask_vbo != nullptr);
1154
1155 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1157
1158 /* Cleanup. */
1160}
1161
1163 gpu::VertBuf *pos_nor,
1164 gpu::VertBuf *face_adjacency_offsets,
1165 gpu::VertBuf *face_adjacency_lists,
1166 gpu::VertBuf *vertex_loop_map,
1167 gpu::VertBuf *vert_normals)
1168{
1170 GPU_shader_bind(shader);
1171
1173 GPU_vertbuf_bind_as_ssbo(face_adjacency_offsets,
1178
1179 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_verts);
1180
1181 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1182 * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1183 * needed. */
1185
1186 /* Cleanup. */
1188}
1189
1191 gpu::VertBuf *vert_normals,
1192 gpu::VertBuf *subdiv_loop_subdiv_vert_index,
1193 gpu::VertBuf *pos_nor)
1194{
1196 GPU_shader_bind(shader);
1197
1199 GPU_vertbuf_bind_as_ssbo(subdiv_loop_subdiv_vert_index,
1202
1203 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1204
1205 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1206 * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1207 * needed. */
1209
1210 /* Cleanup. */
1212}
1213
1215 gpu::VertBuf *src_custom_normals,
1216 gpu::VertBuf *pos_nor)
1217{
1219 GPU_shader_bind(shader);
1220
1221 int binding_point = 0;
1222 GPU_vertbuf_bind_as_ssbo(src_custom_normals, binding_point++);
1223 /* outputPosNor is bound at index 2 in the base shader. */
1224 binding_point = 2;
1225 GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
1226 BLI_assert(binding_point <= MAX_GPU_SUBDIV_SSBOS);
1227
1228 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1229
1230 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
1231 * We also need it for subsequent compute shaders, so a barrier on the shader storage is also
1232 * needed. */
1234
1235 /* Cleanup. */
1237}
1238
1240 gpu::IndexBuf *subdiv_tris,
1241 const int material_count)
1242{
1244 /* Happens on meshes with only loose geometry. */
1245 return;
1246 }
1247
1248 const bool do_single_material = material_count <= 1;
1249
1250 GPUShader *shader = DRW_shader_subdiv_get(do_single_material ?
1253 GPU_shader_bind(shader);
1254
1255 /* subdiv_face_offset is always at binding point 0 for each shader using it. */
1258 if (!do_single_material) {
1260 }
1261
1262 /* Outputs */
1264
1265 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1266
1267 /* This generates an index buffer, so we need to put a barrier on the element array. */
1269
1270 /* Cleanup. */
1272}
1273
1275 gpu::VertBuf *fdots_pos,
1276 gpu::VertBuf *fdots_nor,
1277 gpu::IndexBuf *fdots_indices)
1278{
1279#ifdef WITH_OPENSUBDIV
1281 /* Happens on meshes with only loose geometry. */
1282 return;
1283 }
1284
1285 bke::subdiv::Subdiv *subdiv = cache.subdiv;
1286 OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
1287
1288 gpu::VertBuf *src_buffer = evaluator->eval_output->get_source_buf();
1289 GPUStorageBuf *patch_arrays_buffer = evaluator->eval_output->create_patch_arrays_buf();
1290 GPUStorageBuf *patch_index_buffer = evaluator->eval_output->get_patch_index_buf();
1291 GPUStorageBuf *patch_param_buffer = evaluator->eval_output->get_patch_param_buf();
1292
1293 GPUShader *shader = DRW_shader_subdiv_get(
1296 GPU_shader_bind(shader);
1297
1310 /* F-dots normals may not be requested, still reserve the binding point. */
1311 if (fdots_nor) {
1313 }
1317
1318 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_coarse_faces);
1319
1320 /* This generates two vertex buffers and an index buffer, so we need to put a barrier on the
1321 * vertex attributes and element arrays. */
1323
1324 /* Cleanup. */
1326
1327 GPU_storagebuf_free(patch_arrays_buffer);
1328#else
1329 UNUSED_VARS(cache, fdots_pos, fdots_nor, fdots_indices);
1330#endif
1331}
1332
1334{
1336 GPU_shader_bind(shader);
1337
1342
1343 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1344
1345 /* This generates an index buffer, so we need to put a barrier on the element array. */
1347
1348 /* Cleanup. */
1350}
1351
1353 gpu::IndexBuf *lines_indices,
1354 gpu::VertBuf *lines_flags,
1355 uint edge_loose_offset,
1356 uint num_loose_edges)
1357{
1359 GPU_shader_bind(shader);
1360
1363
1364 drw_subdiv_compute_dispatch(cache, shader, 0, 0, num_loose_edges, false, edge_loose_offset);
1365
1366 /* This generates an index buffer, so we need to put a barrier on the element array. */
1368
1369 /* Cleanup. */
1371}
1372
1374 gpu::VertBuf *pos_nor,
1375 gpu::VertBuf *edge_draw_flag,
1376 gpu::VertBuf *poly_other_map,
1377 gpu::VertBuf *edge_fac)
1378{
1380 GPU_shader_bind(shader);
1381
1386
1387 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1388
1389 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1391
1392 /* Cleanup. */
1394}
1395
1397 gpu::VertBuf *pos_nor,
1398 gpu::VertBuf *lnor)
1399{
1401 /* Happens on meshes with only loose geometry. */
1402 return;
1403 }
1404
1406 GPU_shader_bind(shader);
1407
1408 /* Inputs */
1414
1415 /* Outputs */
1417
1418 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1419
1420 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1422
1423 /* Cleanup. */
1425}
1426
1428 gpu::VertBuf *coarse_data,
1429 gpu::VertBuf *subdiv_data)
1430{
1432 GPU_shader_bind(shader);
1433
1434 /* Inputs */
1435 /* subdiv_face_offset is always at binding point 0 for each shader using it. */
1438 /* Outputs */
1440
1441 drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache.num_subdiv_quads);
1442
1443 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1445
1446 /* Cleanup. */
1448}
1449
1451 gpu::VertBuf *pos_nor,
1452 gpu::VertBuf *uvs,
1453 int uvs_offset,
1454 gpu::VertBuf *stretch_angles)
1455{
1457 GPU_shader_bind(shader);
1458
1459 /* Inputs */
1462 /* Outputs */
1464
1465 drw_subdiv_compute_dispatch(cache, shader, uvs_offset, 0, cache.num_subdiv_quads);
1466
1467 /* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
1469
1470 /* Cleanup. */
1472}
1473
1474/* -------------------------------------------------------------------- */
1475
1510 const Mesh *mesh_eval,
1511 uint mat_len)
1512{
1514
1515 const int number_of_quads = cache.num_subdiv_loops / 4;
1516
1517 if (mat_len == 1) {
1518 cache.mat_start = MEM_callocN<int>("subdiv mat_end");
1519 cache.mat_end = MEM_callocN<int>("subdiv mat_end");
1520 cache.mat_start[0] = 0;
1521 cache.mat_end[0] = number_of_quads;
1522 return;
1523 }
1524
1525 const bke::AttributeAccessor attributes = mesh_eval->attributes();
1526 const VArraySpan<int> material_indices = *attributes.lookup_or_default<int>(
1527 "material_index", bke::AttrDomain::Face, 0);
1528
1529 /* Count number of subdivided polygons for each material. */
1530 int *mat_start = MEM_calloc_arrayN<int>(mat_len, "subdiv mat_start");
1531 int *subdiv_face_offset = cache.subdiv_face_offset;
1532
1533 /* TODO: parallel_reduce? */
1534 for (int i = 0; i < mesh_eval->faces_num; i++) {
1535 const int next_offset = (i == mesh_eval->faces_num - 1) ? number_of_quads :
1536 subdiv_face_offset[i + 1];
1537 const int quad_count = next_offset - subdiv_face_offset[i];
1538 const uint mat_index = uint(material_indices[i]) < mat_len ? uint(material_indices[i]) : 0;
1539 mat_start[mat_index] += quad_count;
1540 }
1541
1542 /* Accumulate offsets. */
1543 int ofs = mat_start[0];
1544 mat_start[0] = 0;
1545 for (uint i = 1; i < mat_len; i++) {
1546 int tmp = mat_start[i];
1547 mat_start[i] = ofs;
1548 ofs += tmp;
1549 }
1550
1551 /* Compute per face offsets. */
1552 int *mat_end = static_cast<int *>(MEM_dupallocN(mat_start));
1553 int *per_face_mat_offset = MEM_malloc_arrayN<int>(mesh_eval->faces_num, "per_face_mat_offset");
1554
1555 for (int i = 0; i < mesh_eval->faces_num; i++) {
1556 const uint mat_index = uint(material_indices[i]) < mat_len ? uint(material_indices[i]) : 0;
1557 const int single_material_index = subdiv_face_offset[i];
1558 const int material_offset = mat_end[mat_index];
1559 const int next_offset = (i == mesh_eval->faces_num - 1) ? number_of_quads :
1560 subdiv_face_offset[i + 1];
1561 const int quad_count = next_offset - subdiv_face_offset[i];
1562 mat_end[mat_index] += quad_count;
1563
1564 per_face_mat_offset[i] = material_offset - single_material_index;
1565 }
1566
1567 cache.face_mat_offset = draw_subdiv_build_origindex_buffer(per_face_mat_offset,
1568 mesh_eval->faces_num);
1569 cache.mat_start = mat_start;
1570 cache.mat_end = mat_end;
1571
1572 MEM_freeN(per_face_mat_offset);
1573}
1574
1582/* The evaluator cache is global, so we cannot allow concurrent usage and need synchronization. */
1584
1586 Mesh &mesh,
1587 MeshBatchCache &batch_cache,
1588 MeshBufferCache &mbc,
1589 const Span<IBOType> ibo_requests,
1590 const Span<VBOType> vbo_requests,
1591 const bool is_editmode,
1592 const bool is_paint_mode,
1593 const bool do_final,
1594 const bool do_uvedit,
1595 const bool do_cage,
1596 const ToolSettings *ts,
1597 const bool use_hide)
1598{
1599 SubsurfRuntimeData *runtime_data = mesh.runtime->subsurf_runtime_data;
1600 BLI_assert(runtime_data && runtime_data->has_gpu_subdiv);
1601
1602 if (runtime_data->settings.level == 0) {
1603 return false;
1604 }
1605
1606 const Mesh *mesh_eval = &mesh;
1607 BMesh *bm = nullptr;
1608 if (mesh.runtime->edit_mesh) {
1609 mesh_eval = BKE_object_get_editmesh_eval_final(&ob);
1610 bm = mesh.runtime->edit_mesh->bm;
1611 }
1612
1613#ifdef WITH_OPENSUBDIV
1614 draw_subdiv_invalidate_evaluator_for_orco(runtime_data->subdiv_gpu, mesh_eval);
1615#endif
1616
1618 runtime_data, mesh_eval, true);
1619 if (!subdiv) {
1620 return false;
1621 }
1622
1623 /* Lock the entire evaluation to avoid concurrent usage of shader objects in evaluator cache. */
1624 std::scoped_lock lock(g_subdiv_eval_mutex);
1625
1626 if (g_subdiv_evaluator_cache == nullptr) {
1628 }
1629
1630 /* Increment evaluator cache reference if an evaluator has been assigned to it. */
1631 bool evaluator_might_be_assigned = subdiv->evaluator == nullptr;
1632 auto maybe_increment_cache_ref = [evaluator_might_be_assigned](bke::subdiv::Subdiv *subdiv) {
1633 if (evaluator_might_be_assigned && subdiv->evaluator != nullptr) {
1634 /* An evaluator was assigned. */
1636 }
1637 };
1638
1641 {
1642 /* This could happen in two situations:
1643 * - OpenSubdiv is disabled.
1644 * - Something totally bad happened, and OpenSubdiv rejected our
1645 * topology.
1646 * In either way, we can't safely continue. However, we still have to handle potential loose
1647 * geometry, which is done separately. */
1648 if (mesh_eval->faces_num) {
1649 maybe_increment_cache_ref(subdiv);
1650 return false;
1651 }
1652 }
1653
1654 DRWSubdivCache &draw_cache = mesh_batch_cache_ensure_subdiv_cache(batch_cache);
1655
1656 draw_cache.optimal_display = runtime_data->use_optimal_display;
1657 /* If there is no distinct cage, hide unmapped edges that can't be selected. */
1658 draw_cache.hide_unmapped_edges = is_editmode && !do_cage;
1659 draw_cache.bm = bm;
1660 draw_cache.mesh = mesh_eval;
1661 draw_cache.subdiv = subdiv;
1662
1663#ifdef WITH_OPENSUBDIV
1664 if (!draw_subdiv_build_cache(draw_cache, subdiv, mesh_eval, runtime_data)) {
1665 maybe_increment_cache_ref(subdiv);
1666 return false;
1667 }
1668#endif
1669
1671
1672 /* Copy topology information for stats display. */
1673 runtime_data->stats_totvert = draw_cache.num_subdiv_verts;
1674 runtime_data->stats_totedge = draw_cache.num_subdiv_edges;
1675 runtime_data->stats_faces_num = draw_cache.num_subdiv_quads;
1676 runtime_data->stats_totloop = draw_cache.num_subdiv_loops;
1677
1678 draw_cache.use_custom_loop_normals = (runtime_data->use_loop_normals) &&
1679 mesh_eval->attributes().contains("custom_normal");
1680
1681 if (ibo_requests.contains(IBOType::Tris)) {
1682 draw_subdiv_cache_ensure_mat_offsets(draw_cache, mesh_eval, batch_cache.mat_len);
1683 }
1684
1686 ob, mesh, is_editmode, is_paint_mode, do_final, do_uvedit, use_hide, ts);
1687 draw_cache.use_hide = use_hide;
1688
1689 /* Used for setting loop normals flags. Mapped extraction is only used during edit mode.
1690 * See comments in #extract_lnor_iter_face_mesh.
1691 */
1692 draw_cache.is_edit_mode = mr.edit_bmesh != nullptr;
1693
1694 draw_subdiv_cache_update_extra_coarse_face_data(draw_cache, mesh_eval, mr);
1695
1697 batch_cache, mbc, ibo_requests, vbo_requests, draw_cache, mr);
1698
1699 maybe_increment_cache_ref(subdiv);
1700 return true;
1701}
1702
1704{
1705 const Span<int> loose_edges = cache.loose_geom.edges;
1706 if (loose_edges.is_empty()) {
1707 return;
1708 }
1709
1710 if (!subdiv_cache.loose_edge_positions.is_empty()) {
1711 /* Already processed. */
1712 return;
1713 }
1714
1715 const Mesh *coarse_mesh = subdiv_cache.mesh;
1716 const bool is_simple = subdiv_cache.subdiv->settings.is_simple;
1717 const int resolution = subdiv_cache.resolution;
1718 const int resolution_1 = resolution - 1;
1719 const float inv_resolution_1 = 1.0f / float(resolution_1);
1720
1721 const Span<float3> coarse_positions = coarse_mesh->vert_positions();
1722 const Span<int2> coarse_edges = coarse_mesh->edges();
1723
1724 Array<int> vert_to_edge_offsets;
1725 Array<int> vert_to_edge_indices;
1726 const GroupedSpan<int> vert_to_edge_map = bke::mesh::build_vert_to_edge_map(
1727 coarse_edges, coarse_mesh->verts_num, vert_to_edge_offsets, vert_to_edge_indices);
1728
1729 /* Also store the last vertex to simplify copying the positions to the VBO. */
1730 subdiv_cache.loose_edge_positions.reinitialize(loose_edges.size() * resolution);
1731 MutableSpan<float3> edge_positions = subdiv_cache.loose_edge_positions;
1732
1733 threading::parallel_for(loose_edges.index_range(), 1024, [&](const IndexRange range) {
1734 for (const int i : range) {
1735 const int coarse_edge = loose_edges[i];
1736 MutableSpan positions = edge_positions.slice(i * resolution, resolution);
1737 for (const int j : positions.index_range()) {
1738 positions[j] = bke::subdiv::mesh_interpolate_position_on_edge(coarse_positions,
1739 coarse_edges,
1740 vert_to_edge_map,
1741 coarse_edge,
1742 is_simple,
1743 j * inv_resolution_1);
1744 }
1745 }
1746 });
1747}
1748
1758
1760 Mesh &mesh,
1761 MeshBatchCache &batch_cache,
1762 MeshBufferCache &mbc,
1763 const Span<IBOType> ibo_requests,
1764 const Span<VBOType> vbo_requests,
1765 const bool is_editmode,
1766 const bool is_paint_mode,
1767 const bool do_final,
1768 const bool do_uvedit,
1769 const bool do_cage,
1770 const ToolSettings *ts,
1771 const bool use_hide)
1772{
1773
1774#undef TIME_SUBDIV
1775
1776#ifdef TIME_SUBDIV
1777 const double begin_time = BLI_time_now_seconds();
1778#endif
1779
1781 mesh,
1782 batch_cache,
1783 mbc,
1784 ibo_requests,
1785 vbo_requests,
1786 is_editmode,
1787 is_paint_mode,
1788 do_final,
1789 do_uvedit,
1790 do_cage,
1791 ts,
1792 use_hide))
1793 {
1794 /* Did not run. */
1795 return;
1796 }
1797
1798#ifdef TIME_SUBDIV
1799 const double end_time = BLI_time_now_seconds();
1800 fprintf(stderr, "Time to update subdivision: %f\n", end_time - begin_time);
1801 fprintf(stderr, "Maximum FPS: %f\n", 1.0 / (end_time - begin_time));
1802#endif
1803}
1804
1810
1812{
1813 {
1814 std::scoped_lock lock(gpu_subdiv_queue_mutex);
1815
1816 while (gpu_subdiv_free_queue != nullptr) {
1817 bke::subdiv::Subdiv *subdiv = static_cast<bke::subdiv::Subdiv *>(
1819
1820 {
1821 std::scoped_lock lock(g_subdiv_eval_mutex);
1822 if (subdiv->evaluator != nullptr) {
1824 }
1825 }
1826#ifdef WITH_OPENSUBDIV
1827 /* Set the type to CPU so that we do actually free the cache. */
1829#endif
1830 bke::subdiv::free(subdiv);
1831 }
1832 }
1833
1834 {
1835 std::scoped_lock lock(g_subdiv_eval_mutex);
1836 /* Free evaluator cache if there is no more reference to it.. */
1837 if (g_subdiv_evaluator_users == 0) {
1839 g_subdiv_evaluator_cache = nullptr;
1840 }
1841 }
1842}
1843
1844} // namespace blender::draw
const void * CustomData_get_layer(const CustomData *data, eCustomDataType type)
#define ORIGINDEX_NONE
bool CustomData_has_layer(const CustomData *data, eCustomDataType type)
General operations, lookup, etc. for blender objects.
const Mesh * BKE_object_get_editmesh_eval_final(const Object *object)
#define MAX_GPU_SUBDIV_SSBOS
blender::bke::subdiv::Subdiv * BKE_subsurf_modifier_subdiv_descriptor_ensure(SubsurfRuntimeData *runtime_data, const Mesh *mesh, bool for_draw_code)
#define BLI_assert(a)
Definition BLI_assert.h:46
MINLINE uint divide_ceil_u(uint a, uint b)
#define MINLINE
unsigned int uint
double BLI_time_now_seconds(void)
Definition time.cc:65
#define UNUSED_VARS(...)
struct Mesh Mesh
Object is a sort of wrapper for general info.
int GPU_max_work_group_count(int index)
void GPU_compute_dispatch(GPUShader *shader, uint groups_x_len, uint groups_y_len, uint groups_z_len, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_indexbuf_bind_as_ssbo(blender::gpu::IndexBuf *elem, int binding)
void GPU_shader_bind(GPUShader *shader, const blender::gpu::shader::SpecializationConstants *constants_state=nullptr)
void GPU_shader_unbind()
void GPU_memory_barrier(eGPUBarrier barrier)
Definition gpu_state.cc:385
@ GPU_BARRIER_SHADER_STORAGE
Definition GPU_state.hh:48
@ GPU_BARRIER_ELEMENT_ARRAY
Definition GPU_state.hh:52
@ GPU_BARRIER_VERTEX_ATTRIB_ARRAY
Definition GPU_state.hh:50
void GPU_storagebuf_bind(GPUStorageBuf *ssbo, int slot)
void GPU_storagebuf_free(GPUStorageBuf *ssbo)
GPUUniformBuf * GPU_uniformbuf_create_ex(size_t size, const void *data, const char *name)
void GPU_uniformbuf_update(GPUUniformBuf *ubo, const void *data)
void GPU_uniformbuf_free(GPUUniformBuf *ubo)
void GPU_uniformbuf_bind(GPUUniformBuf *ubo, int slot)
blender::gpu::VertBuf * GPU_vertbuf_create_with_format_ex(const GPUVertFormat &format, GPUUsageType usage)
void GPU_vertbuf_tag_dirty(blender::gpu::VertBuf *verts)
#define GPU_vertbuf_init_with_format(verts, format)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
blender::gpu::VertBuf * GPU_vertbuf_calloc()
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_bind_as_ssbo(blender::gpu::VertBuf *verts, int binding)
void GPU_vertbuf_init_with_format_ex(blender::gpu::VertBuf &verts, const GPUVertFormat &format, GPUUsageType)
@ GPU_USAGE_STATIC
@ GPU_USAGE_DYNAMIC
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT
void GPU_vertformat_alias_add(GPUVertFormat *, blender::StringRef alias)
uint GPU_vertformat_attr_add(GPUVertFormat *, blender::StringRef name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
GPUVertCompType
@ GPU_COMP_F32
@ GPU_COMP_I32
@ GPU_COMP_U32
volatile int lock
@ BM_ELEM_HIDDEN
@ BM_ELEM_SELECT
@ BM_ELEM_SMOOTH
#define BM_elem_index_get(ele)
#define BM_elem_flag_test(ele, hflag)
#define BM_ITER_MESH(ele, iter, bm, itype)
@ BM_FACES_OF_MESH
BMesh const char void * data
BMesh * bm
ATTR_WARN_UNUSED_RESULT const BMVert * v
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
AttributeSet attributes
constexpr void fill(const T &value) const
Definition BLI_span.hh:517
constexpr bool is_empty() const
Definition BLI_span.hh:260
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr IndexRange index_range() const
Definition BLI_span.hh:401
constexpr bool is_empty() const
Definition BLI_span.hh:260
constexpr bool contains(const T &value) const
Definition BLI_span.hh:277
GAttributeReader lookup_or_default(StringRef attribute_id, AttrDomain domain, eCustomDataType data_type, const void *default_value=nullptr) const
MutableSpan< T > data()
gpu::VertBuf * get_face_varying_source_buf(const int face_varying_channel)
GPUStorageBuf * create_face_varying_patch_array_buf(const int face_varying_channel)
GPUStorageBuf * get_face_varying_patch_param_buf(const int face_varying_channel)
GPUStorageBuf * get_face_varying_patch_index_buf(const int face_varying_channel)
int get_face_varying_source_offset(const int face_varying_channel) const
void getPatchMap(blender::gpu::VertBuf *patch_map_handles, blender::gpu::VertBuf *patch_map_quadtree, int *min_patch_face, int *max_patch_face, int *max_depth, int *patches_are_triangular)
#define ceilf(x)
#define sqrtf(x)
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN
#define SUBDIV_COARSE_FACE_FLAG_HIDDEN_MASK
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE_MASK
#define SUBDIV_COARSE_FACE_FLAG_OFFSET
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH_MASK
#define SUBDIV_COARSE_FACE_FLAG_SMOOTH
#define SUBDIV_COARSE_FACE_FLAG_ACTIVE
#define SUBDIV_COARSE_FACE_FLAG_SELECT_MASK
#define SUBDIV_LOCAL_WORK_GROUP_SIZE
#define SUBDIV_COARSE_FACE_FLAG_SELECT
#define SUBDIV_COARSE_FACE_LOOP_START_MASK
GPUShader * DRW_shader_subdiv_custom_data_get(GPUVertCompType comp_type, int dimensions)
GPUShader * DRW_shader_subdiv_get(SubdivShaderType shader_type)
@ PATCH_EVALUATION_FACE_DOTS_WITH_NORMALS
#define LOOP_NORMALS_INPUT_VERT_ORIG_INDEX_BUF_SLOT
#define EDGE_FAC_EDGE_FAC_BUF_SLOT
#define LINES_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define TRIS_OUTPUT_TRIS_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_NORMALS_BUF_SLOT
#define LOOP_NORMALS_POS_NOR_BUF_SLOT
#define PATCH_EVALUATION_PATCH_ARRAY_BUFFER_BUF_SLOT
#define CUSTOM_DATA_SOURCE_DATA_BUF_SLOT
#define CUSTOM_DATA_FACE_PTEX_OFFSET_BUF_SLOT
#define LOOP_NORMALS_OUTPUT_LNOR_BUF_SLOT
#define LINES_OUTPUT_LINES_BUF_SLOT
#define PATCH_EVALUATION_INPUT_VERTEX_ORIG_INDEX_BUF_SLOT
#define NORMALS_ACCUMULATE_POS_NOR_BUF_SLOT
#define NORMALS_FINALIZE_VERTEX_LOOP_MAP_BUF_SLOT
#define SCULPT_DATA_SCULPT_DATA_BUF_SLOT
#define SHADER_DATA_BUF_SLOT
#define PATCH_EVALUATION_QUAD_NODES_BUF_SLOT
#define PATCH_EVALUATION_INPUT_PATCH_HANDLES_BUF_SLOT
#define PATCH_EVALUATION_SOURCE_EXTRA_VERTEX_BUFFER_BUF_SLOT
#define STRETCH_ANGLE_UV_STRETCHES_BUF_SLOT
#define NORMALS_ACCUMULATE_FACE_ADJACENCY_LISTS_BUF_SLOT
#define PATCH_EVALUATION_PATCH_PARAM_BUFFER_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_FDOTS_VERTEX_BUFFER_BUF_SLOT
#define LINES_LINES_LOOSE_FLAGS
#define EDGE_FAC_POLY_OTHER_MAP_BUF_SLOT
#define LINES_INPUT_EDGE_DRAW_FLAG_BUF_SLOT
#define TRIS_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define PATCH_EVALUATION_PATCH_COORDS_BUF_SLOT
#define PATCH_EVALUATION_PATCH_INDEX_BUFFER_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_INDICES_BUF_SLOT
#define NORMALS_ACCUMULATE_NORMALS_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_ORCOS_BUF_SLOT
#define SCULPT_DATA_SCULPT_FACE_SET_COLOR_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_VERTS_BUF_SLOT
#define EDGE_FAC_POS_NOR_BUF_SLOT
#define LOOP_NORMALS_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define SCULPT_DATA_SCULPT_MASK_BUF_SLOT
#define EDGE_FAC_EDGE_DRAW_FLAG_BUF_SLOT
#define PATCH_EVALUATION_SOURCE_VERTEX_BUFFER_BUF_SLOT
#define TRIS_FACE_MAT_OFFSET
#define STRETCH_AREA_COARSE_STRETCH_AREA_BUF_SLOT
#define NORMALS_FINALIZE_POS_NOR_BUF_SLOT
#define PATCH_EVALUATION_FLAGS_BUFFER_BUF_SLOT
#define CUSTOM_DATA_DESTINATION_DATA_BUF_SLOT
#define PATCH_EVALUATION_OUTPUT_FVAR_BUF_SLOT
#define SUBDIV_FACE_OFFSET_BUF_SLOT
#define CUSTOM_DATA_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define NORMALS_ACCUMULATE_VERTEX_LOOP_MAP_BUF_SLOT
#define STRETCH_AREA_SUBDIV_STRETCH_AREA_BUF_SLOT
#define PATCH_EVALUATION_EXTRA_COARSE_FACE_DATA_BUF_SLOT
#define NORMALS_ACCUMULATE_FACE_ADJACENCY_OFFSETS_BUF_SLOT
#define NORMALS_FINALIZE_VERTEX_NORMALS_BUF_SLOT
#define STRETCH_ANGLE_POS_NOR_BUF_SLOT
#define STRETCH_ANGLE_UVS_BUF_SLOT
#define CUSTOM_DATA_PATCH_COORDS_BUF_SLOT
void openSubdiv_deleteEvaluatorCache(OpenSubdiv_EvaluatorCache *evaluator_cache)
OpenSubdiv_EvaluatorCache * openSubdiv_createEvaluatorCache(eOpenSubdivEvaluator evaluator_type)
Extraction of Mesh data into VBO to feed to GPU.
static float verts[][3]
#define MEM_SAFE_FREE(v)
format
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void * MEM_dupallocN(const void *vmemh)
Definition mallocn.cc:143
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static char faces[256]
GroupedSpan< int > build_vert_to_edge_map(Span< int2 > edges, int verts_num, Array< int > &r_offsets, Array< int > &r_indices)
void free(Subdiv *subdiv)
Definition subdiv.cc:190
bool foreach_subdiv_geometry(Subdiv *subdiv, const ForeachContext *context, const ToMeshSettings *mesh_settings, const Mesh *coarse_mesh)
bool eval_begin_from_mesh(Subdiv *subdiv, const Mesh *mesh, Span< float3 > coarse_vert_positions, eSubdivEvaluatorType evaluator_type, OpenSubdiv_EvaluatorCache *evaluator_cache)
int * face_ptex_offset_get(Subdiv *subdiv)
Definition subdiv.cc:217
BLI_INLINE BMFace * bm_original_face_get(const MeshRenderData &mr, int idx)
static void draw_subdiv_init_ubo_storage(const DRWSubdivCache &cache, DRWSubdivUboStorage *ubo, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask, const uint edge_loose_offset)
static void draw_subdiv_cache_free_material_data(DRWSubdivCache &cache)
static void draw_subdiv_free_edit_mode_cache(DRWSubdivCache &cache)
void draw_subdiv_build_edge_fac_buffer(const DRWSubdivCache &cache, gpu::VertBuf *pos_nor, gpu::VertBuf *edge_draw_flag, gpu::VertBuf *poly_other_map, gpu::VertBuf *edge_fac)
static bool draw_subdiv_cache_need_face_data(const DRWSubdivCache &cache)
void draw_subdiv_extract_uvs(const DRWSubdivCache &cache, gpu::VertBuf *uvs, const int face_varying_channel, const int dst_offset)
gpu::VertBufPtr draw_subdiv_init_origindex_buffer(int32_t *vert_origindex, uint num_loops, uint loose_len)
void draw_subdiv_build_edituv_stretch_area_buffer(const DRWSubdivCache &cache, gpu::VertBuf *coarse_data, gpu::VertBuf *subdiv_data)
static void draw_patch_map_free(DRWPatchMap *gpu_patch_map)
static void draw_subdiv_cache_ensure_mat_offsets(DRWSubdivCache &cache, const Mesh *mesh_eval, uint mat_len)
void draw_subdiv_build_fdots_buffers(const DRWSubdivCache &cache, gpu::VertBuf *fdots_pos, gpu::VertBuf *fdots_nor, gpu::IndexBuf *fdots_indices)
void DRW_subdivide_loose_geom(DRWSubdivCache &subdiv_cache, const MeshBufferCache &cache)
void draw_subdiv_interp_custom_data(const DRWSubdivCache &cache, gpu::VertBuf &src_data, gpu::VertBuf &dst_data, GPUVertCompType comp_type, int dimensions, int dst_offset)
static blender::Mutex gpu_subdiv_queue_mutex
void draw_subdiv_cache_free(DRWSubdivCache &cache)
void draw_subdiv_build_tris_buffer(const DRWSubdivCache &cache, gpu::IndexBuf *subdiv_tris, const int material_count)
static LinkNode * gpu_subdiv_free_queue
void DRW_subdiv_cache_free(bke::subdiv::Subdiv *subdiv)
void draw_subdiv_accumulate_normals(const DRWSubdivCache &cache, gpu::VertBuf *pos_nor, gpu::VertBuf *face_adjacency_offsets, gpu::VertBuf *face_adjacency_lists, gpu::VertBuf *vertex_loop_map, gpu::VertBuf *vert_normals)
void draw_subdiv_build_lines_buffer(const DRWSubdivCache &cache, gpu::IndexBuf *lines_indices)
static uint get_dispatch_size(uint elements)
void draw_subdiv_build_lnor_buffer(const DRWSubdivCache &cache, gpu::VertBuf *pos_nor, gpu::VertBuf *lnor)
void draw_subdiv_extract_pos_nor(const DRWSubdivCache &cache, gpu::VertBuf *flags_buffer, gpu::VertBuf *pos_nor, gpu::VertBuf *orco)
static bool draw_subdiv_create_requested_buffers(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const Span< IBOType > ibo_requests, const Span< VBOType > vbo_requests, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache &cache, MeshBufferCache &mbc, Span< IBOType > ibo_requests, Span< VBOType > vbo_requests, DRWSubdivCache &subdiv_cache, MeshRenderData &mr)
static void drw_subdiv_compute_dispatch(const DRWSubdivCache &cache, GPUShader *shader, const int src_offset, const int dst_offset, uint total_dispatch_size, const bool has_sculpt_mask=false, const uint edge_loose_offset=0)
static DRWSubdivCache & mesh_batch_cache_ensure_subdiv_cache(MeshBatchCache &mbc)
void draw_subdiv_build_lines_loose_buffer(const DRWSubdivCache &cache, gpu::IndexBuf *lines_indices, gpu::VertBuf *lines_flags, uint edge_loose_offset, uint num_loose_edges)
static uint32_t compute_coarse_face_flag_bm(BMFace *f, BMFace *efa_act)
static void draw_subdiv_cache_extra_coarse_face_data_mesh(const MeshRenderData &mr, const Mesh *mesh, MutableSpan< uint32_t > flags_data)
static void draw_subdiv_ubo_update_and_bind(const DRWSubdivCache &cache, const int src_offset, const int dst_offset, const uint total_dispatch_size, const bool has_sculpt_mask=false, const uint edge_loose_offset=0)
static uint tris_count_from_number_of_loops(const uint number_of_loops)
void draw_subdiv_build_sculpt_data_buffer(const DRWSubdivCache &cache, gpu::VertBuf *mask_vbo, gpu::VertBuf *face_set_vbo, gpu::VertBuf *sculpt_data)
void DRW_create_subdivision(Object &ob, Mesh &mesh, MeshBatchCache &batch_cache, MeshBufferCache &mbc, const Span< IBOType > ibo_requests, const Span< VBOType > vbo_requests, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool do_cage, const ToolSettings *ts, const bool use_hide)
void draw_subdiv_finalize_normals(const DRWSubdivCache &cache, gpu::VertBuf *vert_normals, gpu::VertBuf *subdiv_loop_subdiv_vert_index, gpu::VertBuf *pos_nor)
gpu::VertBuf * draw_subdiv_build_origindex_buffer(int *vert_origindex, uint num_loops)
static void draw_subdiv_cache_extra_coarse_face_data_mapped(const Mesh *mesh, BMesh *bm, MeshRenderData &mr, MutableSpan< uint32_t > flags_data)
static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache &cache, const Mesh *mesh, MeshRenderData &mr)
MeshRenderData mesh_render_data_create(Object &object, Mesh &mesh, const bool is_editmode, const bool is_paint_mode, const bool do_final, const bool do_uvedit, const bool use_hide, const ToolSettings *ts)
static const GPUVertFormat & get_origindex_format()
const GPUVertFormat & draw_subdiv_get_pos_nor_format()
void draw_subdiv_finalize_custom_normals(const DRWSubdivCache &cache, gpu::VertBuf *src_custom_normals, gpu::VertBuf *pos_nor)
void draw_subdiv_build_edituv_stretch_angle_buffer(const DRWSubdivCache &cache, gpu::VertBuf *pos_nor, gpu::VertBuf *uvs, int uvs_offset, gpu::VertBuf *stretch_angles)
static OpenSubdiv_EvaluatorCache * g_subdiv_evaluator_cache
static uint64_t g_subdiv_evaluator_users
static void draw_subdiv_cache_extra_coarse_face_data_bm(BMesh *bm, BMFace *efa_act, MutableSpan< uint32_t > flags_data)
std::unique_ptr< gpu::VertBuf, gpu::VertBufDeleter > VertBufPtr
void build_reverse_offsets(Span< int > indices, MutableSpan< int > offsets)
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
std::mutex Mutex
Definition BLI_mutex.hh:47
@ OPENSUBDIV_EVALUATOR_GPU
@ OPENSUBDIV_EVALUATOR_CPU
BMLoop * l_first
int totface
MeshRuntimeHandle * runtime
CustomData vert_data
int faces_num
int verts_num
blender::opensubdiv::EvalOutputAPI * eval_output
eOpenSubdivEvaluator type
blender::bke::subdiv::Settings settings
blender::bke::subdiv::Subdiv * subdiv_gpu
OpenSubdiv_Evaluator * evaluator
blender::opensubdiv::TopologyRefinerImpl * topology_refiner
gpu::VertBuf * subdiv_vertex_face_adjacency_offsets
VArraySpan< bool > sharp_faces
VArraySpan< bool > select_poly
bke::MeshNormalDomain normals_domain
i
Definition text_draw.cc:230
uint len
uint8_t flag
Definition wm_window.cc:139