Blender V4.5
extract_mesh_vbo_lnor.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2021 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include "BLI_array_utils.hh"
10
12
13#include "extract_mesh.hh"
14
15#include "draw_subdivision.hh"
16
17namespace blender::draw {
18
19template<typename GPUType>
20static void extract_vert_normals(const Span<int> corner_verts,
21 const Span<float3> vert_normals,
23{
24 Array<GPUType> vert_normals_converted(vert_normals.size());
25 gpu::convert_normals(vert_normals, vert_normals_converted.as_mutable_span());
26 array_utils::gather(vert_normals_converted.as_span(), corner_verts, normals);
27}
28
29template<typename GPUType>
31{
32 const OffsetIndices faces = mr.faces;
33 const Span<float3> face_normals = mr.face_normals;
34 threading::parallel_for(faces.index_range(), 4096, [&](const IndexRange range) {
35 for (const int face : range) {
36 normals.slice(faces[face]).fill(gpu::convert_normal<GPUType>(face_normals[face]));
37 }
38 });
39}
40
41template<typename GPUType>
43{
44 const auto get_vert_normals = [&]() {
45 return mr.use_simplify_normals ? mr.mesh->vert_normals_true() : mr.mesh->vert_normals();
46 };
49 }
51 extract_vert_normals(mr.corner_verts, get_vert_normals(), normals);
52 }
53 else if (!mr.corner_normals.is_empty()) {
55 }
56 else if (mr.sharp_faces.is_empty()) {
57 extract_vert_normals(mr.corner_verts, get_vert_normals(), normals);
58 }
59 else {
60 const OffsetIndices faces = mr.faces;
61 const Span<int> corner_verts = mr.corner_verts;
62 const Span<bool> sharp_faces = mr.sharp_faces;
63 const Span<float3> vert_normals = get_vert_normals();
64 const Span<float3> face_normals = mr.face_normals;
65 threading::parallel_for(faces.index_range(), 2048, [&](const IndexRange range) {
66 for (const int face : range) {
67 if (sharp_faces[face]) {
68 normals.slice(faces[face]).fill(gpu::convert_normal<GPUType>(face_normals[face]));
69 }
70 else {
71 for (const int corner : faces[face]) {
72 normals[corner] = gpu::convert_normal<GPUType>(vert_normals[corner_verts[corner]]);
73 }
74 }
75 }
76 });
77 }
78}
79
80template<typename GPUType>
82{
83 const bool use_face_select = (mr.mesh->editflag & ME_EDIT_PAINT_FACE_SEL) != 0;
84 Span<bool> selection;
86 selection = mr.select_poly;
87 }
88 else if (mr.mesh->editflag & ME_EDIT_PAINT_VERT_SEL) {
89 selection = mr.select_vert;
90 }
91 if (selection.is_empty() && mr.hide_poly.is_empty() && (!mr.edit_bmesh || !mr.orig_index_vert)) {
92 return;
93 }
94 const OffsetIndices faces = mr.faces;
95 threading::parallel_for(faces.index_range(), 1024, [&](const IndexRange range) {
96 if (!selection.is_empty()) {
97 if (use_face_select) {
98 for (const int face : range) {
99 if (selection[face]) {
100 for (const int corner : faces[face]) {
101 normals[corner].w = 1;
102 }
103 }
104 }
105 }
106 else {
107 const Span<int> corner_verts = mr.corner_verts;
108 for (const int face : range) {
109 for (const int corner : faces[face]) {
110 if (selection[corner_verts[corner]]) {
111 normals[corner].w = 1;
112 }
113 }
114 }
115 }
116 }
117 if (!mr.hide_poly.is_empty()) {
118 const Span<bool> hide_poly = mr.hide_poly;
119 for (const int face : range) {
120 if (hide_poly[face]) {
121 for (const int corner : faces[face]) {
122 normals[corner].w = -1;
123 }
124 }
125 }
126 }
127 if (mr.edit_bmesh && mr.orig_index_vert) {
128 const Span<int> corner_verts = mr.corner_verts;
129 const Span<int> orig_indices(mr.orig_index_vert, mr.verts_num);
130 for (const int face : range) {
131 for (const int corner : faces[face]) {
132 if (orig_indices[corner_verts[corner]] == ORIGINDEX_NONE) {
133 normals[corner].w = -1;
134 }
135 }
136 }
137 }
138 });
139}
140
141template<typename GPUType>
143{
144 const BMesh &bm = *mr.bm;
145 if (mr.bm_free_normal_offset_vert != -1) {
146 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
147 for (const int face_index : range) {
148 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
149 const BMLoop *loop = BM_FACE_FIRST_LOOP(&face);
150 const IndexRange face_range(BM_elem_index_get(loop), face.len);
151 for (const int corner : face_range) {
152 normals[corner] = gpu::convert_normal<GPUType>(
153 BM_ELEM_CD_GET_FLOAT_P(loop->v, mr.bm_free_normal_offset_vert));
154 loop = loop->next;
155 }
156 }
157 });
158 }
159 else if (!mr.bm_vert_normals.is_empty()) {
160 Array<GPUType> vert_normals_converted(mr.bm_vert_normals.size());
161 gpu::convert_normals(mr.bm_vert_normals, vert_normals_converted.as_mutable_span());
162 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
163 for (const int face_index : range) {
164 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
165 const BMLoop *loop = BM_FACE_FIRST_LOOP(&face);
166 const IndexRange face_range(BM_elem_index_get(loop), face.len);
167 for (const int corner : face_range) {
168 normals[corner] = vert_normals_converted[BM_elem_index_get(loop->v)];
169 loop = loop->next;
170 }
171 }
172 });
173 }
174 else {
175 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
176 for (const int face_index : range) {
177 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
178 const BMLoop *loop = BM_FACE_FIRST_LOOP(&face);
179 const IndexRange face_range(BM_elem_index_get(loop), face.len);
180 for (const int corner : face_range) {
181 normals[corner] = gpu::convert_normal<GPUType>(loop->v->no);
182 loop = loop->next;
183 }
184 }
185 });
186 }
187}
188
189template<typename GPUType>
191{
192 const BMesh &bm = *mr.bm;
193 if (mr.bm_free_normal_offset_face != -1) {
194 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
195 for (const int face_index : range) {
196 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
197 const IndexRange face_range(BM_elem_index_get(BM_FACE_FIRST_LOOP(&face)), face.len);
198 normals.slice(face_range)
199 .fill(gpu::convert_normal<GPUType>(
200 BM_ELEM_CD_GET_FLOAT_P(&face, mr.bm_free_normal_offset_face)));
201 }
202 });
203 }
204 else if (!mr.bm_face_normals.is_empty()) {
205 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
206 for (const int face_index : range) {
207 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
208 const IndexRange face_range(BM_elem_index_get(BM_FACE_FIRST_LOOP(&face)), face.len);
209 normals.slice(face_range)
210 .fill(gpu::convert_normal<GPUType>(mr.bm_face_normals[face_index]));
211 }
212 });
213 }
214 else {
215 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
216 for (const int face_index : range) {
217 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
218 const IndexRange face_range(BM_elem_index_get(BM_FACE_FIRST_LOOP(&face)), face.len);
219 normals.slice(face_range).fill(gpu::convert_normal<GPUType>(face.no));
220 }
221 });
222 }
223}
224
225template<typename GPUType>
227{
228 /* TODO: Return early if there are no hidden faces. */
229 const BMesh &bm = *mr.bm;
230 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
231 for (const int face_index : range) {
232 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
233 if (BM_elem_flag_test(&face, BM_ELEM_HIDDEN)) {
234 const IndexRange face_range(BM_elem_index_get(BM_FACE_FIRST_LOOP(&face)), face.len);
235 for (GPUType &value : normals.slice(face_range)) {
236 value.w = -1;
237 }
238 }
239 }
240 });
241}
242
243template<typename GPUType>
245{
246 const BMesh &bm = *mr.bm;
249 }
252 }
253 else if (mr.bm_free_normal_offset_corner != -1) {
254 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
255 for (const int face_index : range) {
256 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
257 const BMLoop *loop = BM_FACE_FIRST_LOOP(&face);
258 const IndexRange face_range(BM_elem_index_get(loop), face.len);
259 for (const int corner : face_range) {
260 normals[corner] = gpu::convert_normal<GPUType>(
261 BM_ELEM_CD_GET_FLOAT_P(loop, mr.bm_free_normal_offset_corner));
262 loop = loop->next;
263 }
264 }
265 });
266 }
267 else if (!mr.bm_loop_normals.is_empty()) {
268 gpu::convert_normals(mr.bm_loop_normals, normals);
269 }
270 else {
271 threading::parallel_for(IndexRange(bm.totface), 2048, [&](const IndexRange range) {
272 for (const int face_index : range) {
273 const BMFace &face = *BM_face_at_index(&const_cast<BMesh &>(bm), face_index);
274 const BMLoop *loop = BM_FACE_FIRST_LOOP(&face);
275 const IndexRange face_range(BM_elem_index_get(loop), face.len);
276
277 if (!BM_elem_flag_test(&face, BM_ELEM_SMOOTH)) {
278 if (!mr.bm_face_normals.is_empty()) {
279 normals.slice(face_range)
280 .fill(gpu::convert_normal<GPUType>(mr.bm_face_normals[face_index]));
281 }
282 else {
283 normals.slice(face_range).fill(gpu::convert_normal<GPUType>(face.no));
284 }
285 }
286 else {
287 if (!mr.bm_vert_normals.is_empty()) {
288 for (const int corner : face_range) {
289 normals[corner] = gpu::convert_normal<GPUType>(
290 mr.bm_vert_normals[BM_elem_index_get(loop->v)]);
291 loop = loop->next;
292 }
293 }
294 else {
295 for (const int corner : face_range) {
296 normals[corner] = gpu::convert_normal<GPUType>(loop->v->no);
297 loop = loop->next;
298 }
299 }
300 }
301 }
302 });
303 }
304}
305
306gpu::VertBufPtr extract_normals(const MeshRenderData &mr, const bool use_hq)
307{
308 const int size = mr.corners_num + mr.loose_indices_num;
309 if (use_hq) {
310 static const GPUVertFormat format = []() {
314 return format;
315 }();
318 MutableSpan vbo_data = vbo->data<short4>();
319 MutableSpan corners_data = vbo_data.take_front(mr.corners_num);
320 MutableSpan loose_data = vbo_data.take_back(mr.loose_indices_num);
321
323 extract_normals_mesh(mr, corners_data);
324 extract_paint_overlay_flags(mr, corners_data);
325 }
326 else {
327 extract_normals_bm(mr, corners_data);
328 extract_edit_flags_bm(mr, corners_data);
329 }
330
331 loose_data.fill(short4(0));
332 return vbo;
333 }
334 static const GPUVertFormat format = []() {
338 return format;
339 }();
342 MutableSpan vbo_data = vbo->data<gpu::PackedNormal>();
343 MutableSpan corners_data = vbo_data.take_front(mr.corners_num);
344 MutableSpan loose_data = vbo_data.take_back(mr.loose_indices_num);
345
347 extract_normals_mesh(mr, corners_data);
348 extract_paint_overlay_flags(mr, corners_data);
349 }
350 else {
351 extract_normals_bm(mr, corners_data);
352 extract_edit_flags_bm(mr, corners_data);
353 }
354
355 loose_data.fill(gpu::PackedNormal{});
356 return vbo;
357}
358
360{
361 static const GPUVertFormat format = []() {
366 return format;
367 }();
368 return format;
369}
370
372 const DRWSubdivCache &subdiv_cache,
373 gpu::VertBuf &pos_nor)
374{
375 const int vbo_size = subdiv_full_vbo_size(mr, subdiv_cache);
376 const int loose_geom_start = subdiv_cache.num_subdiv_loops;
377
380 draw_subdiv_build_lnor_buffer(subdiv_cache, &pos_nor, lnor.get());
381
382 /* Push VBO content to the GPU and bind the VBO so that #GPU_vertbuf_update_sub can work. */
383 GPU_vertbuf_use(lnor.get());
384
385 /* Default to zeroed attribute. The overlay shader should expect this and render engines should
386 * never draw loose geometry. */
387 const float4 default_normal(0.0f, 0.0f, 0.0f, 0.0f);
388 for (const int i : IndexRange::from_begin_end(loose_geom_start, vbo_size)) {
389 /* TODO(fclem): This has HORRENDOUS performance. Prefer clearing the buffer on device with
390 * something like glClearBufferSubData. */
391 GPU_vertbuf_update_sub(lnor.get(), i * sizeof(float4), sizeof(float4), &default_normal);
392 }
393 return lnor;
394}
395
396} // namespace blender::draw
@ ME_EDIT_PAINT_VERT_SEL
@ ME_EDIT_PAINT_FACE_SEL
blender::gpu::VertBuf * GPU_vertbuf_create_on_device(const GPUVertFormat &format, uint v_len)
#define GPU_vertbuf_create_with_format(format)
void GPU_vertbuf_use(blender::gpu::VertBuf *)
void GPU_vertbuf_data_alloc(blender::gpu::VertBuf &verts, uint v_len)
void GPU_vertbuf_update_sub(blender::gpu::VertBuf *verts, uint start, uint len, const void *data)
@ GPU_FETCH_FLOAT
@ GPU_FETCH_INT_TO_FLOAT_UNIT
void GPU_vertformat_alias_add(GPUVertFormat *, blender::StringRef alias)
uint GPU_vertformat_attr_add(GPUVertFormat *, blender::StringRef name, GPUVertCompType, uint comp_len, GPUVertFetchMode)
@ GPU_COMP_I10
@ GPU_COMP_F32
@ GPU_COMP_I16
BMesh * bm
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
constexpr bool is_empty() const
Definition BLI_span.hh:260
Span< T > as_span() const
Definition BLI_array.hh:232
MutableSpan< T > as_mutable_span()
Definition BLI_array.hh:237
static constexpr IndexRange from_begin_end(const int64_t begin, const int64_t end)
constexpr MutableSpan take_back(const int64_t n) const
Definition BLI_span.hh:640
constexpr T * data() const
Definition BLI_span.hh:539
constexpr void fill(const T &value) const
Definition BLI_span.hh:517
constexpr MutableSpan take_front(const int64_t n) const
Definition BLI_span.hh:629
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr bool is_empty() const
Definition BLI_span.hh:260
Extraction of Mesh data into VBO to feed to GPU.
static float normals[][3]
VecBase< short, 4 > short4
format
static char faces[256]
void gather(const GVArray &src, const IndexMask &indices, GMutableSpan dst, int64_t grain_size=4096)
static void extract_face_normals_bm(const MeshRenderData &mr, MutableSpan< GPUType > normals)
gpu::VertBufPtr extract_vert_normals(const MeshRenderData &mr)
gpu::VertBufPtr extract_normals_subdiv(const MeshRenderData &mr, const DRWSubdivCache &subdiv_cache, gpu::VertBuf &pos_nor)
static const GPUVertFormat & get_subdiv_lnor_format()
static void extract_vert_normals_bm(const MeshRenderData &mr, MutableSpan< GPUType > normals)
void draw_subdiv_build_lnor_buffer(const DRWSubdivCache &cache, gpu::VertBuf *pos_nor, gpu::VertBuf *lnor)
gpu::VertBufPtr extract_normals(const MeshRenderData &mr, bool use_hq)
int subdiv_full_vbo_size(const MeshRenderData &mr, const DRWSubdivCache &cache)
static void extract_paint_overlay_flags(const MeshRenderData &mr, MutableSpan< GPUType > normals)
static void extract_normals_mesh(const MeshRenderData &mr, MutableSpan< GPUType > normals)
static void extract_normals_bm(const MeshRenderData &mr, MutableSpan< GPUType > normals)
static void extract_face_normals(const MeshRenderData &mr, MutableSpan< GPUType > normals)
static void extract_edit_flags_bm(const MeshRenderData &mr, MutableSpan< GPUType > normals)
void convert_normals(Span< float3 > src, MutableSpan< GPUType > dst)
std::unique_ptr< gpu::VertBuf, gpu::VertBufDeleter > VertBufPtr
void parallel_for(const IndexRange range, const int64_t grain_size, const Function &function, const TaskSizeHints &size_hints=detail::TaskSizeHints_Static(1))
Definition BLI_task.hh:93
blender::VecBase< int16_t, 4 > short4
VecBase< float, 4 > float4
char editflag
VArraySpan< bool > select_vert
VArraySpan< bool > sharp_faces
VArraySpan< bool > select_poly
OffsetIndices< int > faces
bke::MeshNormalDomain normals_domain
i
Definition text_draw.cc:230