Blender V4.5
bmesh_interp.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2007 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
10
11#include "MEM_guardedalloc.h"
12
13#include "DNA_meshdata_types.h"
14
15#include "BLI_alloca.h"
16#include "BLI_linklist.h"
17#include "BLI_math_geom.h"
18#include "BLI_math_matrix.h"
19#include "BLI_math_vector.h"
20#include "BLI_memarena.h"
21#include "BLI_task.h"
22
23#include "BKE_attribute.h"
24#include "BKE_customdata.hh"
25#include "BKE_multires.hh"
26
27#include "bmesh.hh"
29
31
32/* edge and vertex share, currently there's no need to have different logic */
33static void bm_data_interp_from_elem(CustomData *data_layer,
34 const BMElem *ele_src_1,
35 const BMElem *ele_src_2,
36 BMElem *ele_dst,
37 const float fac)
38{
39 if (ele_src_1->head.data && ele_src_2->head.data) {
40 /* first see if we can avoid interpolation */
41 if (fac <= 0.0f) {
42 if (ele_src_1 == ele_dst) {
43 /* do nothing */
44 }
45 else {
46 CustomData_bmesh_copy_block(*data_layer, ele_src_1->head.data, &ele_dst->head.data);
47 }
48 }
49 else if (fac >= 1.0f) {
50 if (ele_src_2 == ele_dst) {
51 /* do nothing */
52 }
53 else {
54 CustomData_bmesh_copy_block(*data_layer, ele_src_2->head.data, &ele_dst->head.data);
55 }
56 }
57 else {
58 const void *src[2];
59 float w[2];
60
61 src[0] = ele_src_1->head.data;
62 src[1] = ele_src_2->head.data;
63 w[0] = 1.0f - fac;
64 w[1] = fac;
65 CustomData_bmesh_interp(data_layer, src, w, nullptr, 2, ele_dst->head.data);
66 }
67 }
68}
69
71 BMesh *bm, const BMVert *v_src_1, const BMVert *v_src_2, BMVert *v_dst, const float fac)
72{
74 &bm->vdata, (const BMElem *)v_src_1, (const BMElem *)v_src_2, (BMElem *)v_dst, fac);
75}
76
78 BMesh *bm, const BMEdge *e_src_1, const BMEdge *e_src_2, BMEdge *e_dst, const float fac)
79{
81 &bm->edata, (const BMElem *)e_src_1, (const BMElem *)e_src_2, (BMElem *)e_dst, fac);
82}
83
90static void UNUSED_FUNCTION(BM_Data_Vert_Average)(BMesh * /*bm*/, BMFace * /*f*/)
91{
92 // BMIter iter;
93}
94
96 const BMVert *v_src_1,
97 const BMVert * /*v_src_2*/,
98 BMVert *v,
99 BMEdge *e,
100 const float fac)
101{
102 float w[2];
103 BMLoop *l_v1 = nullptr, *l_v = nullptr, *l_v2 = nullptr;
104 BMLoop *l_iter = nullptr;
105
106 if (!e->l) {
107 return;
108 }
109
110 w[1] = 1.0f - fac;
111 w[0] = fac;
112
113 l_iter = e->l;
114 do {
115 if (l_iter->v == v_src_1) {
116 l_v1 = l_iter;
117 l_v = l_v1->next;
118 l_v2 = l_v->next;
119 }
120 else if (l_iter->v == v) {
121 l_v1 = l_iter->next;
122 l_v = l_iter;
123 l_v2 = l_iter->prev;
124 }
125
126 if (!l_v1 || !l_v2) {
127 return;
128 }
129
130 const void *src[2];
131 src[0] = l_v1->head.data;
132 src[1] = l_v2->head.data;
133
134 CustomData_bmesh_interp(&bm->ldata, src, w, nullptr, 2, l_v->head.data);
135 } while ((l_iter = l_iter->radial_next) != e->l);
136}
137
139 BMFace *f_dst,
140 const BMFace *f_src,
141 const bool do_vertex,
142 const void **blocks_l,
143 const void **blocks_v,
144 float (*cos_2d)[2],
145 float axis_mat[3][3])
146{
147 BMLoop *l_iter;
148 BMLoop *l_first;
149
150 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
151 float co[2];
152
153 /* interpolate */
154 l_iter = l_first = BM_FACE_FIRST_LOOP(f_dst);
155 do {
156 mul_v2_m3v3(co, axis_mat, l_iter->v->co);
157 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
158 CustomData_bmesh_interp(&bm->ldata, blocks_l, w, nullptr, f_src->len, l_iter->head.data);
159 if (do_vertex) {
160 CustomData_bmesh_interp(&bm->vdata, blocks_v, w, nullptr, f_src->len, l_iter->v->head.data);
161 }
162 } while ((l_iter = l_iter->next) != l_first);
163}
164
165void BM_face_interp_from_face(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex)
166{
167 BMLoop *l_iter;
168 BMLoop *l_first;
169
170 const void **blocks_l = static_cast<const void **>(BLI_array_alloca(blocks_l, f_src->len));
171 const void **blocks_v = do_vertex ?
172 static_cast<const void **>(BLI_array_alloca(blocks_v, f_src->len)) :
173 nullptr;
174 float(*cos_2d)[2] = static_cast<float(*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
175 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
176 int i;
177
178 /* convert the 3d coords into 2d for projection */
180 axis_dominant_v3_to_m3(axis_mat, f_src->no);
181
182 i = 0;
183 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
184 do {
185 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
186 blocks_l[i] = l_iter->head.data;
187 if (do_vertex) {
188 blocks_v[i] = l_iter->v->head.data;
189 }
190 } while ((void)i++, (l_iter = l_iter->next) != l_first);
191
192 BM_face_interp_from_face_ex(bm, f_dst, f_src, do_vertex, blocks_l, blocks_v, cos_2d, axis_mat);
193}
194
209static int compute_mdisp_quad(const BMLoop *l,
210 const float l_f_center[3],
211 float v1[3],
212 float v2[3],
213 float v3[3],
214 float v4[3],
215 float e1[3],
216 float e2[3])
217{
218 float n[3], p[3];
219
220#ifndef NDEBUG
221 {
222 float cent[3];
223 /* computer center */
225 BLI_assert(equals_v3v3(cent, l_f_center));
226 }
227#endif
228
229 mid_v3_v3v3(p, l->prev->v->co, l->v->co);
230 mid_v3_v3v3(n, l->next->v->co, l->v->co);
231
232 copy_v3_v3(v1, l_f_center);
233 copy_v3_v3(v2, p);
234 copy_v3_v3(v3, l->v->co);
235 copy_v3_v3(v4, n);
236
237 sub_v3_v3v3(e1, v2, v1);
238 sub_v3_v3v3(e2, v3, v4);
239
240 return 1;
241}
242
243static bool quad_co(const float v1[3],
244 const float v2[3],
245 const float v3[3],
246 const float v4[3],
247 const float p[3],
248 const float n[3],
249 float r_uv[2])
250{
251 float projverts[5][3], n2[3];
252 const float origin[2] = {0.0f, 0.0f};
253 int i;
254
255 /* project points into 2d along normal */
256 copy_v3_v3(projverts[0], v1);
257 copy_v3_v3(projverts[1], v2);
258 copy_v3_v3(projverts[2], v3);
259 copy_v3_v3(projverts[3], v4);
260 copy_v3_v3(projverts[4], p);
261
262 normal_quad_v3(n2, projverts[0], projverts[1], projverts[2], projverts[3]);
263
264 if (dot_v3v3(n, n2) < -FLT_EPSILON) {
265 return false;
266 }
267
268 /* rotate */
269 poly_rotate_plane(n, projverts, 5);
270
271 /* subtract origin */
272 for (i = 0; i < 4; i++) {
273 sub_v2_v2(projverts[i], projverts[4]);
274 }
275
276 if (!isect_point_quad_v2(origin, projverts[0], projverts[1], projverts[2], projverts[3])) {
277 return false;
278 }
279
280 resolve_quad_uv_v2(r_uv, origin, projverts[0], projverts[3], projverts[2], projverts[1]);
281
282 return true;
283}
284
285static void mdisp_axis_from_quad(const float v1[3],
286 const float v2[3],
287 float /*v3*/[3],
288 const float v4[3],
289 float r_axis_x[3],
290 float r_axis_y[3])
291{
292 sub_v3_v3v3(r_axis_x, v4, v1);
293 sub_v3_v3v3(r_axis_y, v2, v1);
294
295 normalize_v3(r_axis_x);
296 normalize_v3(r_axis_y);
297}
298
305static bool mdisp_in_mdispquad(BMLoop *l_src,
306 BMLoop *l_dst,
307 const float l_dst_f_center[3],
308 const float p[3],
309 int res,
310 float r_axis_x[3],
311 float r_axis_y[3],
312 float r_uv[2])
313{
314 float v1[3], v2[3], c[3], v3[3], v4[3], e1[3], e2[3];
315 float eps = FLT_EPSILON * 4000;
316
317 if (is_zero_v3(l_src->v->no)) {
319 }
320 if (is_zero_v3(l_dst->v->no)) {
322 }
323
324 compute_mdisp_quad(l_dst, l_dst_f_center, v1, v2, v3, v4, e1, e2);
325
326 /* expand quad a bit */
327 mid_v3_v3v3v3v3(c, v1, v2, v3, v4);
328
329 sub_v3_v3(v1, c);
330 sub_v3_v3(v2, c);
331 sub_v3_v3(v3, c);
332 sub_v3_v3(v4, c);
333 mul_v3_fl(v1, 1.0f + eps);
334 mul_v3_fl(v2, 1.0f + eps);
335 mul_v3_fl(v3, 1.0f + eps);
336 mul_v3_fl(v4, 1.0f + eps);
337 add_v3_v3(v1, c);
338 add_v3_v3(v2, c);
339 add_v3_v3(v3, c);
340 add_v3_v3(v4, c);
341
342 if (!quad_co(v1, v2, v3, v4, p, l_src->v->no, r_uv)) {
343 return false;
344 }
345
346 mul_v2_fl(r_uv, float(res - 1));
347
348 mdisp_axis_from_quad(v1, v2, v3, v4, r_axis_x, r_axis_y);
349
350 return true;
351}
352
353static float bm_loop_flip_equotion(float mat[2][2],
354 float b[2],
355 const float target_axis_x[3],
356 const float target_axis_y[3],
357 const float coord[3],
358 int i,
359 int j)
360{
361 mat[0][0] = target_axis_x[i];
362 mat[0][1] = target_axis_y[i];
363 mat[1][0] = target_axis_x[j];
364 mat[1][1] = target_axis_y[j];
365 b[0] = coord[i];
366 b[1] = coord[j];
367
368 return cross_v2v2(mat[0], mat[1]);
369}
370
371static void bm_loop_flip_disp(const float source_axis_x[3],
372 const float source_axis_y[3],
373 const float target_axis_x[3],
374 const float target_axis_y[3],
375 float disp[3])
376{
377 float vx[3], vy[3], coord[3];
378 float n[3], vec[3];
379 float b[2], mat[2][2], d;
380
381 mul_v3_v3fl(vx, source_axis_x, disp[0]);
382 mul_v3_v3fl(vy, source_axis_y, disp[1]);
383 add_v3_v3v3(coord, vx, vy);
384
385 /* project displacement from source grid plane onto target grid plane */
386 cross_v3_v3v3(n, target_axis_x, target_axis_y);
387 project_v3_v3v3(vec, coord, n);
388 sub_v3_v3v3(coord, coord, vec);
389
390 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 0, 1);
391
392 if (fabsf(d) < 1e-4f) {
393 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 0, 2);
394 if (fabsf(d) < 1e-4f) {
395 d = bm_loop_flip_equotion(mat, b, target_axis_x, target_axis_y, coord, 1, 2);
396 }
397 }
398
399 disp[0] = (b[0] * mat[1][1] - mat[0][1] * b[1]) / d;
400 disp[1] = (mat[0][0] * b[1] - b[0] * mat[1][0]) / d;
401}
402
407
409 const float *f_src_center;
410
411 float *axis_x, *axis_y;
412 float *v1, *v4;
413 float *e1, *e2;
414
415 int res;
416 float d;
417};
418
419static void loop_interp_multires_cb(void *__restrict userdata,
420 const int ix,
421 const TaskParallelTLS *__restrict /*tls*/)
422{
423 BMLoopInterpMultiresData *data = static_cast<BMLoopInterpMultiresData *>(userdata);
424
425 BMLoop *l_first = data->l_src_first;
426 BMLoop *l_dst = data->l_dst;
427 const int cd_loop_mdisp_offset = data->cd_loop_mdisp_offset;
428
429 MDisps *md_dst = data->md_dst;
430 const float *f_src_center = data->f_src_center;
431
432 float *axis_x = data->axis_x;
433 float *axis_y = data->axis_y;
434
435 float *v1 = data->v1;
436 float *v4 = data->v4;
437 float *e1 = data->e1;
438 float *e2 = data->e2;
439
440 const int res = data->res;
441 const float d = data->d;
442
443 float x = d * ix, y;
444 int iy;
445 for (y = 0.0f, iy = 0; iy < res; y += d, iy++) {
446 BMLoop *l_iter = l_first;
447 float co1[3], co2[3], co[3];
448
449 madd_v3_v3v3fl(co1, v1, e1, y);
450 madd_v3_v3v3fl(co2, v4, e2, y);
451 interp_v3_v3v3(co, co1, co2, x);
452
453 do {
454 MDisps *md_src;
455 float src_axis_x[3], src_axis_y[3];
456 float uv[2];
457
458 md_src = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_mdisp_offset));
459
460 if (mdisp_in_mdispquad(l_dst, l_iter, f_src_center, co, res, src_axis_x, src_axis_y, uv)) {
461 old_mdisps_bilinear(md_dst->disps[iy * res + ix], md_src->disps, res, uv[0], uv[1]);
462 bm_loop_flip_disp(src_axis_x, src_axis_y, axis_x, axis_y, md_dst->disps[iy * res + ix]);
463
464 break;
465 }
466 } while ((l_iter = l_iter->next) != l_first);
467 }
468}
469
471 BMLoop *l_dst,
472 const BMFace *f_src,
473 const float f_dst_center[3],
474 const float f_src_center[3],
475 const int cd_loop_mdisp_offset)
476{
477 MDisps *md_dst;
478 float v1[3], v2[3], v3[3], v4[3] = {0.0f, 0.0f, 0.0f}, e1[3], e2[3];
479 float axis_x[3], axis_y[3];
480
481 /* ignore 2-edged faces */
482 if (UNLIKELY(l_dst->f->len < 3)) {
483 return;
484 }
485
486 md_dst = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l_dst, cd_loop_mdisp_offset));
487 compute_mdisp_quad(l_dst, f_dst_center, v1, v2, v3, v4, e1, e2);
488
489 /* if no disps data allocate a new grid, the size of the first grid in f_src. */
490 if (!md_dst->totdisp) {
491 const MDisps *md_src = static_cast<const MDisps *>(
492 BM_ELEM_CD_GET_VOID_P(BM_FACE_FIRST_LOOP(f_src), cd_loop_mdisp_offset));
493
494 md_dst->totdisp = md_src->totdisp;
495 md_dst->level = md_src->level;
496 if (md_dst->totdisp) {
497 md_dst->disps = static_cast<float(*)[3]>(
498 MEM_callocN(sizeof(float[3]) * md_dst->totdisp, __func__));
499 }
500 else {
501 return;
502 }
503 }
504
505 mdisp_axis_from_quad(v1, v2, v3, v4, axis_x, axis_y);
506
507 const int res = int(sqrt(md_dst->totdisp));
509 data.l_dst = l_dst;
510 data.l_src_first = BM_FACE_FIRST_LOOP(f_src);
511 data.cd_loop_mdisp_offset = cd_loop_mdisp_offset;
512 data.md_dst = md_dst;
513 data.f_src_center = f_src_center;
514 data.axis_x = axis_x;
515 data.axis_y = axis_y;
516 data.v1 = v1;
517 data.v4 = v4;
518 data.e1 = e1;
519 data.e2 = e2;
520 data.res = res;
521 data.d = 1.0f / float(res - 1);
522
523 TaskParallelSettings settings;
525 settings.use_threading = (res > 5);
527}
528
529void BM_loop_interp_multires(BMesh *bm, BMLoop *l_dst, const BMFace *f_src)
530{
531 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
532
533 if (cd_loop_mdisp_offset != -1) {
534 float f_dst_center[3];
535 float f_src_center[3];
536
537 BM_face_calc_center_median(l_dst->f, f_dst_center);
538 BM_face_calc_center_median(f_src, f_src_center);
539
540 BM_loop_interp_multires_ex(bm, l_dst, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
541 }
542}
543
545 BMFace *f_dst,
546 const BMFace *f_src,
547 const float f_dst_center[3],
548 const float f_src_center[3],
549 const int cd_loop_mdisp_offset)
550{
551 BMLoop *l_iter, *l_first;
552 l_iter = l_first = BM_FACE_FIRST_LOOP(f_dst);
553 do {
555 bm, l_iter, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
556 } while ((l_iter = l_iter->next) != l_first);
557}
558
559void BM_face_interp_multires(BMesh *bm, BMFace *f_dst, const BMFace *f_src)
560{
561 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
562
563 if (cd_loop_mdisp_offset != -1) {
564 float f_dst_center[3];
565 float f_src_center[3];
566
567 BM_face_calc_center_median(f_dst, f_dst_center);
568 BM_face_calc_center_median(f_src, f_src_center);
569
570 BM_face_interp_multires_ex(bm, f_dst, f_src, f_dst_center, f_src_center, cd_loop_mdisp_offset);
571 }
572}
573
575{
576 const int cd_loop_mdisp_offset = CustomData_get_offset(&bm->ldata, CD_MDISPS);
577 BMLoop *l;
578 BMIter liter;
579
580 if (cd_loop_mdisp_offset == -1) {
581 return;
582 }
583
584 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
585 MDisps *mdp = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->prev, cd_loop_mdisp_offset));
586 MDisps *mdl = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l, cd_loop_mdisp_offset));
587 MDisps *mdn = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->next, cd_loop_mdisp_offset));
588 float co1[3];
589 int sides;
590 int y;
591
606
607 sides = int(sqrt(mdp->totdisp));
608 for (y = 0; y < sides; y++) {
609 mid_v3_v3v3(co1, mdn->disps[y * sides], mdl->disps[y]);
610
611 copy_v3_v3(mdn->disps[y * sides], co1);
612 copy_v3_v3(mdl->disps[y], co1);
613 }
614 }
615
616 BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
617 MDisps *mdl1 = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l, cd_loop_mdisp_offset));
618 MDisps *mdl2;
619 float co1[3], co2[3], co[3];
620 int sides;
621 int y;
622
637
638 if (l->radial_next == l) {
639 continue;
640 }
641
642 if (l->radial_next->v == l->v) {
643 mdl2 = static_cast<MDisps *>(BM_ELEM_CD_GET_VOID_P(l->radial_next, cd_loop_mdisp_offset));
644 }
645 else {
646 mdl2 = static_cast<MDisps *>(
647 BM_ELEM_CD_GET_VOID_P(l->radial_next->next, cd_loop_mdisp_offset));
648 }
649
650 sides = int(sqrt(mdl1->totdisp));
651 for (y = 0; y < sides; y++) {
652 int a1, a2, o1, o2;
653
654 if (l->v != l->radial_next->v) {
655 a1 = sides * y + sides - 2;
656 a2 = (sides - 2) * sides + y;
657
658 o1 = sides * y + sides - 1;
659 o2 = (sides - 1) * sides + y;
660 }
661 else {
662 a1 = sides * y + sides - 2;
663 a2 = sides * y + sides - 2;
664 o1 = sides * y + sides - 1;
665 o2 = sides * y + sides - 1;
666 }
667
668 /* magic blending numbers, hardcoded! */
669 add_v3_v3v3(co1, mdl1->disps[a1], mdl2->disps[a2]);
670 mul_v3_fl(co1, 0.18);
671
672 add_v3_v3v3(co2, mdl1->disps[o1], mdl2->disps[o2]);
673 mul_v3_fl(co2, 0.32);
674
675 add_v3_v3v3(co, co1, co2);
676
677 copy_v3_v3(mdl1->disps[o1], co);
678 copy_v3_v3(mdl2->disps[o2], co);
679 }
680 }
681}
682
684 BMesh *bm, BMLoop *l_dst, const BMFace *f_src, const bool do_vertex, const bool do_multires)
685{
686 BMLoop *l_iter;
687 BMLoop *l_first;
688 const void **vblocks = do_vertex ?
689 static_cast<const void **>(BLI_array_alloca(vblocks, f_src->len)) :
690 nullptr;
691 const void **blocks = static_cast<const void **>(BLI_array_alloca(blocks, f_src->len));
692 float(*cos_2d)[2] = static_cast<float(*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
693 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
694 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
695 float co[2];
696
697 /* Convert the 3d coords into 2d for projection. */
698 float axis_dominant[3];
699 if (!is_zero_v3(f_src->no)) {
700 BLI_assert(BM_face_is_normal_valid(f_src));
701 copy_v3_v3(axis_dominant, f_src->no);
702 }
703 else {
704 /* Rare case in which all the vertices of the face are aligned.
705 * Get a random axis that is orthogonal to the tangent. */
706 float vec[3];
707 BM_face_calc_tangent_auto(f_src, vec);
708 ortho_v3_v3(axis_dominant, vec);
709 normalize_v3(axis_dominant);
710 }
711 axis_dominant_v3_to_m3(axis_mat, axis_dominant);
712
713 int i = 0;
714 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
715 do {
716 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
717 blocks[i] = l_iter->head.data;
718
719 if (do_vertex) {
720 vblocks[i] = l_iter->v->head.data;
721 }
722 } while ((void)i++, (l_iter = l_iter->next) != l_first);
723
724 mul_v2_m3v3(co, axis_mat, l_dst->v->co);
725
726 /* interpolate */
727 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
728 CustomData_bmesh_interp(&bm->ldata, blocks, w, nullptr, f_src->len, l_dst->head.data);
729 if (do_vertex) {
730 CustomData_bmesh_interp(&bm->vdata, vblocks, w, nullptr, f_src->len, l_dst->v->head.data);
731 }
732
733 if (do_multires) {
734 BM_loop_interp_multires(bm, l_dst, f_src);
735 }
736}
737
738void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
739{
740 BMLoop *l_iter;
741 BMLoop *l_first;
742 const void **blocks = static_cast<const void **>(BLI_array_alloca(blocks, f_src->len));
743 float(*cos_2d)[2] = static_cast<float(*)[2]>(BLI_array_alloca(cos_2d, f_src->len));
744 float *w = static_cast<float *>(BLI_array_alloca(w, f_src->len));
745 float axis_mat[3][3]; /* use normal to transform into 2d xy coords */
746 float co[2];
747
748 /* convert the 3d coords into 2d for projection */
750 axis_dominant_v3_to_m3(axis_mat, f_src->no);
751
752 int i = 0;
753 l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
754 do {
755 mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
756 blocks[i] = l_iter->v->head.data;
757 } while ((void)i++, (l_iter = l_iter->next) != l_first);
758
759 mul_v2_m3v3(co, axis_mat, v_dst->co);
760
761 /* interpolate */
762 interp_weights_poly_v2(w, cos_2d, f_src->len, co);
763 CustomData_bmesh_interp(&bm->vdata, blocks, w, nullptr, f_src->len, v_dst->head.data);
764}
765
767{
769
770 BMIter iter;
771 BLI_mempool *oldpool = olddata->pool;
772 void *block;
773
774 if (data == &bm->vdata) {
775 BMVert *eve;
776
778
779 BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
780 block = nullptr;
781 CustomData_bmesh_copy_block(*data, cd_map, eve->head.data, &block);
782 CustomData_bmesh_free_block(olddata, &eve->head.data);
783 eve->head.data = block;
784 }
785 }
786 else if (data == &bm->edata) {
787 BMEdge *eed;
788
790
791 BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
792 block = nullptr;
793 CustomData_bmesh_copy_block(*data, cd_map, eed->head.data, &block);
794 CustomData_bmesh_free_block(olddata, &eed->head.data);
795 eed->head.data = block;
796 }
797 }
798 else if (data == &bm->ldata) {
799 BMIter liter;
800 BMFace *efa;
801 BMLoop *l;
802
804 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
805 BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
806 block = nullptr;
807 CustomData_bmesh_copy_block(*data, cd_map, l->head.data, &block);
808 CustomData_bmesh_free_block(olddata, &l->head.data);
809 l->head.data = block;
810 }
811 }
812 }
813 else if (data == &bm->pdata) {
814 BMFace *efa;
815
817
818 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
819 block = nullptr;
820 CustomData_bmesh_copy_block(*data, cd_map, efa->head.data, &block);
821 CustomData_bmesh_free_block(olddata, &efa->head.data);
822 efa->head.data = block;
823 }
824 }
825 else {
826 /* should never reach this! */
827 BLI_assert(0);
828 }
829
830 if (oldpool) {
831 /* this should never happen but can when dissolve fails - #28960. */
832 BLI_assert(data->pool != oldpool);
833
834 BLI_mempool_destroy(oldpool);
835 }
836}
837
839{
840 CustomData olddata = *data;
841 olddata.layers = (olddata.layers) ?
842 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
843 nullptr;
844 /* The pool is now owned by `olddata` and must not be shared. */
845 data->pool = nullptr;
846
848
849 update_data_blocks(bm, &olddata, data);
850 if (olddata.layers) {
851 MEM_freeN(olddata.layers);
852 }
853}
854
856{
857 CustomData olddata = *data;
858 olddata.layers = (olddata.layers) ?
859 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
860 nullptr;
861 /* The pool is now owned by `olddata` and must not be shared. */
862 data->pool = nullptr;
863
865
866 update_data_blocks(bm, &olddata, data);
867 if (olddata.layers) {
868 MEM_freeN(olddata.layers);
869 }
870}
871
873{
874 if (CustomData_get_named_layer_index(data, eCustomDataType(type), name) == -1) {
875 BM_data_layer_add_named(bm, data, type, name);
876 }
877}
878
880{
881 const int nr_uv_layers = CustomData_number_of_layers(&bm->ldata, CD_PROP_FLOAT2);
882 for (int l = 0; l < nr_uv_layers; l++) {
883 /* NOTE: you can't re-use the return-value of #CustomData_get_layer_name()
884 * because adding layers can invalidate that. */
885 char name[MAX_CUSTOMDATA_LAYER_NAME];
887 bm,
888 &bm->ldata,
891 name));
893 bm,
894 &bm->ldata,
897 name));
899 bm,
900 &bm->ldata,
903 }
904}
905
907{
908 char name[MAX_CUSTOMDATA_LAYER_NAME];
910 bm, &bm->ldata, CD_PROP_BOOL, BKE_uv_map_vert_select_name_get(uv_map_name, name));
911}
912
914{
915 char name[MAX_CUSTOMDATA_LAYER_NAME];
917 bm, &bm->ldata, CD_PROP_BOOL, BKE_uv_map_edge_select_name_get(uv_map_name, name));
918}
919
921{
922 char name[MAX_CUSTOMDATA_LAYER_NAME];
924 bm, &bm->ldata, CD_PROP_BOOL, BKE_uv_map_pin_name_get(uv_map_name, name));
925}
926
928{
929 char name[MAX_CUSTOMDATA_LAYER_NAME];
931 &bm->ldata, CD_PROP_BOOL, BKE_uv_map_vert_select_name_get(uv_map_name, name)) != -1);
932}
933
934bool BM_uv_map_attr_pin_exists(const BMesh *bm, const StringRef uv_map_name)
935{
936 char name[MAX_CUSTOMDATA_LAYER_NAME];
938 &bm->ldata, CD_PROP_BOOL, BKE_uv_map_pin_name_get(uv_map_name, name)) != -1);
939}
940
942{
943 CustomData olddata = *data;
944 olddata.layers = (olddata.layers) ?
945 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
946 nullptr;
947 /* The pool is now owned by `olddata` and must not be shared. */
948 data->pool = nullptr;
949
950 const bool had_layer = CustomData_free_layer_active(data, eCustomDataType(type));
951 /* Assert because its expensive to realloc - better not do if layer isn't present. */
952 BLI_assert(had_layer != false);
953 UNUSED_VARS_NDEBUG(had_layer);
954
955 update_data_blocks(bm, &olddata, data);
956 if (olddata.layers) {
957 MEM_freeN(olddata.layers);
958 }
959}
960
962{
963 CustomData olddata = *data;
964 olddata.layers = (olddata.layers) ?
965 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
966 nullptr;
967 /* The pool is now owned by `olddata` and must not be shared. */
968 data->pool = nullptr;
969
970 const bool had_layer = CustomData_free_layer_named(data, name);
971
972 if (had_layer) {
973 update_data_blocks(bm, &olddata, data);
974 }
975 else {
976 /* Move pool ownership back to BMesh CustomData, no block reallocation. */
977 data->pool = olddata.pool;
978 }
979
980 if (olddata.layers) {
981 MEM_freeN(olddata.layers);
982 }
983
984 return had_layer;
985}
986
987void BM_data_layer_free_n(BMesh *bm, CustomData *data, int type, int n)
988{
989 CustomData olddata = *data;
990 olddata.layers = (olddata.layers) ?
991 static_cast<CustomDataLayer *>(MEM_dupallocN(olddata.layers)) :
992 nullptr;
993 /* The pool is now owned by `olddata` and must not be shared. */
994 data->pool = nullptr;
995
996 const bool had_layer = CustomData_free_layer(
998 /* Assert because its expensive to realloc - better not do if layer isn't present. */
999 BLI_assert(had_layer != false);
1000 UNUSED_VARS_NDEBUG(had_layer);
1001
1002 update_data_blocks(bm, &olddata, data);
1003 if (olddata.layers) {
1004 MEM_freeN(olddata.layers);
1005 }
1006}
1007
1008void BM_data_layer_copy(BMesh *bm, CustomData *data, int type, int src_n, int dst_n)
1009{
1010 BMIter iter;
1011
1012 if (&bm->vdata == data) {
1013 BMVert *eve;
1014
1015 BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
1016 void *ptr = CustomData_bmesh_get_n(data, eve->head.data, eCustomDataType(type), src_n);
1018 }
1019 }
1020 else if (&bm->edata == data) {
1021 BMEdge *eed;
1022
1023 BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
1024 void *ptr = CustomData_bmesh_get_n(data, eed->head.data, eCustomDataType(type), src_n);
1026 }
1027 }
1028 else if (&bm->pdata == data) {
1029 BMFace *efa;
1030
1031 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
1032 void *ptr = CustomData_bmesh_get_n(data, efa->head.data, eCustomDataType(type), src_n);
1034 }
1035 }
1036 else if (&bm->ldata == data) {
1037 BMIter liter;
1038 BMFace *efa;
1039 BMLoop *l;
1040
1041 BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
1042 BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) {
1043 void *ptr = CustomData_bmesh_get_n(data, l->head.data, eCustomDataType(type), src_n);
1044 CustomData_bmesh_set_n(data, l->head.data, eCustomDataType(type), dst_n, ptr);
1045 }
1046 }
1047 }
1048 else {
1049 /* should never reach this! */
1050 BLI_assert(0);
1051 }
1052}
1053
1054float BM_elem_float_data_get(CustomData *cd, void *element, int type)
1055{
1056 const float *f = static_cast<const float *>(
1058 return f ? *f : 0.0f;
1059}
1060
1061void BM_elem_float_data_set(CustomData *cd, void *element, int type, const float val)
1062{
1063 float *f = static_cast<float *>(
1065 if (f) {
1066 *f = val;
1067 }
1068}
1069
1070/* -------------------------------------------------------------------- */
1088
1090 /* same for all groups */
1091 int type;
1093 const float *loop_weights;
1095
1096 /* --- Per loop fan vars --- */
1097
1098 /* reference for this contiguous fan */
1099 const void *data_ref;
1101
1102 /* accumulate 'LoopGroupCD.weight' to make unit length */
1104
1105 /* both arrays the size of the 'BM_vert_face_count(v)'
1106 * each contiguous fan gets a slide of these arrays */
1110};
1111
1112/* Store vars to pass into 'CustomData_bmesh_interp' */
1114 /* direct customdata pointer array */
1115 void **data;
1116 /* weights (aligned with 'data') */
1118 /* index-in-face */
1120 /* number of loops in the fan */
1122};
1123
1125{
1126 const int i = BM_elem_index_get(l);
1127 const float w = lwc->loop_weights[i];
1130 lwc->data_index_array[lwc->data_len] = i;
1131 lwc->weight_array[lwc->data_len] = w;
1132 lwc->weight_accum += w;
1133
1134 lwc->data_len += 1;
1135}
1136
1142static void bm_loop_walk_data(LoopWalkCtx *lwc, BMLoop *l_walk)
1143{
1144 int i;
1145
1147 lwc->data_ref,
1148 BM_ELEM_CD_GET_VOID_P(l_walk, lwc->cd_layer_offset)));
1150
1151 bm_loop_walk_add(lwc, l_walk);
1152
1153 /* recurse around this loop-fan (in both directions) */
1154 for (i = 0; i < 2; i++) {
1155 BMLoop *l_other = ((i == 0) ? l_walk : l_walk->prev)->radial_next;
1156 if (l_other->radial_next != l_other) {
1157 if (l_other->v != l_walk->v) {
1158 l_other = l_other->next;
1159 }
1160 BLI_assert(l_other->v == l_walk->v);
1163 lwc->data_ref,
1164 BM_ELEM_CD_GET_VOID_P(l_other, lwc->cd_layer_offset)))
1165 {
1166 bm_loop_walk_data(lwc, l_other);
1167 }
1168 }
1169 }
1170 }
1171}
1172
1174 BMesh *bm, BMVert *v, const int layer_n, const float *loop_weights, MemArena *arena)
1175{
1176 LoopWalkCtx lwc;
1177 LinkNode *groups = nullptr;
1178 BMLoop *l;
1179 BMIter liter;
1180 int loop_num;
1181
1182 lwc.type = bm->ldata.layers[layer_n].type;
1183 lwc.cd_layer_offset = bm->ldata.layers[layer_n].offset;
1184 lwc.loop_weights = loop_weights;
1185 lwc.arena = arena;
1186
1187 /* Enable 'BM_ELEM_INTERNAL_TAG', leaving the flag clean on completion. */
1188 loop_num = 0;
1189 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1191 BM_elem_index_set(l, loop_num); /* set_dirty! */
1192 loop_num++;
1193 }
1194 bm->elem_index_dirty |= BM_LOOP;
1195
1196 lwc.data_len = 0;
1197 lwc.data_array = static_cast<void **>(BLI_memarena_alloc(lwc.arena, sizeof(void *) * loop_num));
1198 lwc.data_index_array = static_cast<int *>(BLI_memarena_alloc(lwc.arena, sizeof(int) * loop_num));
1199 lwc.weight_array = static_cast<float *>(BLI_memarena_alloc(lwc.arena, sizeof(float) * loop_num));
1200
1201 BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1203 LoopGroupCD *lf = static_cast<LoopGroupCD *>(BLI_memarena_alloc(lwc.arena, sizeof(*lf)));
1204 int len_prev = lwc.data_len;
1205
1207
1208 /* assign len-last */
1209 lf->data = &lwc.data_array[lwc.data_len];
1210 lf->data_index = &lwc.data_index_array[lwc.data_len];
1211 lf->data_weights = &lwc.weight_array[lwc.data_len];
1212 lwc.weight_accum = 0.0f;
1213
1214 /* new group */
1215 bm_loop_walk_data(&lwc, l);
1216 lf->data_len = lwc.data_len - len_prev;
1217
1218 if (LIKELY(lwc.weight_accum != 0.0f)) {
1219 mul_vn_fl(lf->data_weights, lf->data_len, 1.0f / lwc.weight_accum);
1220 }
1221 else {
1222 copy_vn_fl(lf->data_weights, lf->data_len, 1.0f / float(lf->data_len));
1223 }
1224
1225 BLI_linklist_prepend_arena(&groups, lf, lwc.arena);
1226 }
1227 }
1228
1229 BLI_assert(lwc.data_len == loop_num);
1230
1231 return groups;
1232}
1233
1235 void *lf_p,
1236 int layer_n,
1237 void *data_tmp)
1238{
1239 LoopGroupCD *lf = static_cast<LoopGroupCD *>(lf_p);
1240 const int type = bm->ldata.layers[layer_n].type;
1241 int i;
1242 const float *data_weights;
1243
1244 data_weights = lf->data_weights;
1245
1247 &bm->ldata, (const void **)lf->data, data_weights, nullptr, lf->data_len, data_tmp, layer_n);
1248
1249 for (i = 0; i < lf->data_len; i++) {
1250 CustomData_copy_elements(eCustomDataType(type), data_tmp, lf->data[i], 1);
1251 }
1252}
1253
1255 BMesh *bm, void *lf_p, const int layer_n, void *data_tmp, const float *loop_weights)
1256{
1257 LoopGroupCD *lf = static_cast<LoopGroupCD *>(lf_p);
1258 const int type = bm->ldata.layers[layer_n].type;
1259 int i;
1260 const float *data_weights;
1261
1262 /* re-weight */
1263 float *temp_weights = static_cast<float *>(BLI_array_alloca(temp_weights, lf->data_len));
1264 float weight_accum = 0.0f;
1265
1266 for (i = 0; i < lf->data_len; i++) {
1267 float w = loop_weights[lf->data_index[i]] * lf->data_weights[i];
1268 temp_weights[i] = w;
1269 weight_accum += w;
1270 }
1271
1272 if (LIKELY(weight_accum != 0.0f)) {
1273 mul_vn_fl(temp_weights, lf->data_len, 1.0f / weight_accum);
1274 data_weights = temp_weights;
1275 }
1276 else {
1277 data_weights = lf->data_weights;
1278 }
1279
1281 &bm->ldata, (const void **)lf->data, data_weights, nullptr, lf->data_len, data_tmp, layer_n);
1282
1283 for (i = 0; i < lf->data_len; i++) {
1284 CustomData_copy_elements(eCustomDataType(type), data_tmp, lf->data[i], 1);
1285 }
1286}
1287
1288void BM_vert_loop_groups_data_layer_merge(BMesh *bm, LinkNode *groups, const int layer_n)
1289{
1290 const int type = bm->ldata.layers[layer_n].type;
1291 const int size = CustomData_sizeof(eCustomDataType(type));
1292 void *data_tmp = alloca(size);
1293
1294 do {
1295 bm_vert_loop_groups_data_layer_merge__single(bm, groups->link, layer_n, data_tmp);
1296 } while ((groups = groups->next));
1297}
1298
1300 LinkNode *groups,
1301 const int layer_n,
1302 const float *loop_weights)
1303{
1304 const int type = bm->ldata.layers[layer_n].type;
1305 const int size = CustomData_sizeof(eCustomDataType(type));
1306 void *data_tmp = alloca(size);
1307
1308 do {
1310 bm, groups->link, layer_n, data_tmp, loop_weights);
1311 } while ((groups = groups->next));
1312}
1313
Generic geometry attributes built on CustomData.
blender::StringRef BKE_uv_map_pin_name_get(blender::StringRef uv_map_name, char *buffer)
blender::StringRef BKE_uv_map_edge_select_name_get(blender::StringRef uv_map_name, char *buffer)
blender::StringRef BKE_uv_map_vert_select_name_get(blender::StringRef uv_map_name, char *buffer)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_sizeof(eCustomDataType type)
int CustomData_get_offset(const CustomData *data, eCustomDataType type)
int CustomData_get_layer_index_n(const CustomData *data, eCustomDataType type, int n)
@ CD_SET_DEFAULT
void CustomData_copy_elements(eCustomDataType type, void *src_data_ofs, void *dst_data_ofs, int count)
void CustomData_bmesh_free_block(CustomData *data, void **block)
void * CustomData_add_layer_named(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem, blender::StringRef name)
BMCustomDataCopyMap CustomData_bmesh_copy_map_calc(const CustomData &src, const CustomData &dst, eCustomDataMask mask_exclude=0)
void CustomData_bmesh_init_pool(CustomData *data, int totelem, char htype)
void * CustomData_bmesh_get_n(const CustomData *data, void *block, eCustomDataType type, int n)
bool CustomData_free_layer(CustomData *data, eCustomDataType type, int index)
int CustomData_get_named_layer_index(const CustomData *data, eCustomDataType type, blender::StringRef name)
const char * CustomData_get_layer_name(const CustomData *data, eCustomDataType type, int n)
void * CustomData_bmesh_get(const CustomData *data, void *block, eCustomDataType type)
void CustomData_bmesh_interp(CustomData *data, const void **src_blocks, const float *weights, const float *sub_weights, int count, void *dst_block)
void CustomData_bmesh_set_n(CustomData *data, void *block, eCustomDataType type, int n, const void *source)
void CustomData_bmesh_copy_block(CustomData &data, void *src_block, void **dst_block)
void * CustomData_add_layer(CustomData *data, eCustomDataType type, eCDAllocType alloctype, int totelem)
bool CustomData_data_equals(eCustomDataType type, const void *data1, const void *data2)
int CustomData_number_of_layers(const CustomData *data, eCustomDataType type)
bool CustomData_free_layer_named(CustomData *data, blender::StringRef name)
bool CustomData_free_layer_active(CustomData *data, eCustomDataType type)
void CustomData_bmesh_interp_n(CustomData *data, const void **src_blocks, const float *weights, const float *sub_weights, int count, void *dst_block_ofs, int n)
void old_mdisps_bilinear(float out[3], float(*disps)[3], int st, float u, float v)
Definition multires.cc:1266
#define BLI_array_alloca(arr, realsize)
Definition BLI_alloca.h:18
#define BLI_assert(a)
Definition BLI_assert.h:46
int isect_point_quad_v2(const float p[2], const float v1[2], const float v2[2], const float v3[2], const float v4[2])
float normal_quad_v3(float n[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
Definition math_geom.cc:58
void resolve_quad_uv_v2(float r_uv[2], const float st[2], const float st0[2], const float st1[2], const float st2[2], const float st3[2])
void axis_dominant_v3_to_m3(float r_mat[3][3], const float normal[3])
Normal to x,y matrix.
void interp_weights_poly_v2(float w[], float v[][2], int n, const float co[2])
void mul_v2_m3v3(float r[2], const float M[3][3], const float a[3])
void mid_v3_v3v3v3v3(float v[3], const float v1[3], const float v2[3], const float v3[3], const float v4[3])
void mul_vn_fl(float *array_tar, int size, float f)
MINLINE void sub_v2_v2(float r[2], const float a[2])
MINLINE void sub_v3_v3(float r[3], const float a[3])
MINLINE bool equals_v3v3(const float v1[3], const float v2[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void mul_v2_fl(float r[2], float f)
MINLINE void mul_v3_fl(float r[3], float f)
MINLINE void copy_v3_v3(float r[3], const float a[3])
void copy_vn_fl(float *array_tar, int size, float val)
void project_v3_v3v3(float out[3], const float p[3], const float v_proj[3])
MINLINE float dot_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RESULT
void interp_v3_v3v3(float r[3], const float a[3], const float b[3], float t)
MINLINE void add_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void cross_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE float cross_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE bool is_zero_v3(const float v[3]) ATTR_WARN_UNUSED_RESULT
void mid_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void madd_v3_v3v3fl(float r[3], const float a[3], const float b[3], float f)
MINLINE void mul_v3_v3fl(float r[3], const float a[3], float f)
MINLINE void add_v3_v3(float r[3], const float a[3])
MINLINE float normalize_v3(float n[3])
void * BLI_memarena_alloc(MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
void BLI_mempool_destroy(BLI_mempool *pool) ATTR_NONNULL(1)
void BLI_task_parallel_range(int start, int stop, void *userdata, TaskParallelRangeFunc func, const TaskParallelSettings *settings)
Definition task_range.cc:99
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)
Definition BLI_task.h:221
#define UNUSED_FUNCTION(x)
#define UNUSED_VARS_NDEBUG(...)
#define UNLIKELY(x)
#define LIKELY(x)
@ CD_PROP_FLOAT2
Read Guarded memory(de)allocation.
#define BM_FACE_FIRST_LOOP(p)
@ BM_LOOP
@ BM_ELEM_INTERNAL_TAG
#define BM_ELEM_CD_GET_VOID_P(ele, offset)
#define BM_elem_index_get(ele)
#define BM_elem_flag_disable(ele, hflag)
#define BM_elem_index_set(ele, index)
#define BM_elem_flag_test(ele, hflag)
#define BM_elem_flag_enable(ele, hflag)
void BM_face_interp_multires(BMesh *bm, BMFace *f_dst, const BMFace *f_src)
void BM_uv_map_attr_edge_select_ensure(BMesh *bm, const StringRef uv_map_name)
static int compute_mdisp_quad(const BMLoop *l, const float l_f_center[3], float v1[3], float v2[3], float v3[3], float v4[3], float e1[3], float e2[3])
Multires Interpolation.
bool BM_data_layer_free_named(BMesh *bm, CustomData *data, StringRef name)
static void bm_loop_walk_add(LoopWalkCtx *lwc, BMLoop *l)
void BM_data_layer_free_n(BMesh *bm, CustomData *data, int type, int n)
void BM_data_layer_free(BMesh *bm, CustomData *data, int type)
float BM_elem_float_data_get(CustomData *cd, void *element, int type)
void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
static void loop_interp_multires_cb(void *__restrict userdata, const int ix, const TaskParallelTLS *__restrict)
void BM_uv_map_attr_select_and_pin_ensure(BMesh *bm)
bool BM_uv_map_attr_pin_exists(const BMesh *bm, const StringRef uv_map_name)
bool BM_uv_map_attr_vert_select_exists(const BMesh *bm, const StringRef uv_map_name)
void BM_face_interp_multires_ex(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const float f_dst_center[3], const float f_src_center[3], const int cd_loop_mdisp_offset)
void BM_data_layer_copy(BMesh *bm, CustomData *data, int type, int src_n, int dst_n)
void BM_vert_loop_groups_data_layer_merge(BMesh *bm, LinkNode *groups, const int layer_n)
static void bm_vert_loop_groups_data_layer_merge__single(BMesh *bm, void *lf_p, int layer_n, void *data_tmp)
void BM_vert_loop_groups_data_layer_merge_weights(BMesh *bm, LinkNode *groups, const int layer_n, const float *loop_weights)
void BM_data_layer_add_named(BMesh *bm, CustomData *data, int type, const StringRef name)
void BM_face_interp_from_face_ex(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex, const void **blocks_l, const void **blocks_v, float(*cos_2d)[2], float axis_mat[3][3])
Data Interpolate From Face.
static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
static bool mdisp_in_mdispquad(BMLoop *l_src, BMLoop *l_dst, const float l_dst_f_center[3], const float p[3], int res, float r_axis_x[3], float r_axis_y[3], float r_uv[2])
void BM_loop_interp_multires(BMesh *bm, BMLoop *l_dst, const BMFace *f_src)
void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
void BM_data_interp_from_edges(BMesh *bm, const BMEdge *e_src_1, const BMEdge *e_src_2, BMEdge *e_dst, const float fac)
Data, Interpolate From Edges.
void BM_face_interp_from_face(BMesh *bm, BMFace *f_dst, const BMFace *f_src, const bool do_vertex)
void BM_face_multires_bounds_smooth(BMesh *bm, BMFace *f)
static void bm_loop_walk_data(LoopWalkCtx *lwc, BMLoop *l_walk)
void BM_loop_interp_from_face(BMesh *bm, BMLoop *l_dst, const BMFace *f_src, const bool do_vertex, const bool do_multires)
void BM_data_interp_face_vert_edge(BMesh *bm, const BMVert *v_src_1, const BMVert *, BMVert *v, BMEdge *e, const float fac)
Data Face-Vert Edge Interpolate.
void BM_uv_map_attr_pin_ensure(BMesh *bm, const StringRef uv_map_name)
static bool quad_co(const float v1[3], const float v2[3], const float v3[3], const float v4[3], const float p[3], const float n[3], float r_uv[2])
static void mdisp_axis_from_quad(const float v1[3], const float v2[3], float[3], const float v4[3], float r_axis_x[3], float r_axis_y[3])
LinkNode * BM_vert_loop_groups_data_layer_create(BMesh *bm, BMVert *v, const int layer_n, const float *loop_weights, MemArena *arena)
static void bm_vert_loop_groups_data_layer_merge_weights__single(BMesh *bm, void *lf_p, const int layer_n, void *data_tmp, const float *loop_weights)
static float bm_loop_flip_equotion(float mat[2][2], float b[2], const float target_axis_x[3], const float target_axis_y[3], const float coord[3], int i, int j)
void BM_loop_interp_multires_ex(BMesh *, BMLoop *l_dst, const BMFace *f_src, const float f_dst_center[3], const float f_src_center[3], const int cd_loop_mdisp_offset)
static void UNUSED_FUNCTION BM_Data_Vert_Average(BMesh *, BMFace *)
Data Vert Average.
void BM_elem_float_data_set(CustomData *cd, void *element, int type, const float val)
void BM_data_layer_ensure_named(BMesh *bm, CustomData *data, int type, const StringRef name)
static void bm_loop_flip_disp(const float source_axis_x[3], const float source_axis_y[3], const float target_axis_x[3], const float target_axis_y[3], float disp[3])
static void bm_data_interp_from_elem(CustomData *data_layer, const BMElem *ele_src_1, const BMElem *ele_src_2, BMElem *ele_dst, const float fac)
void BM_uv_map_attr_vert_select_ensure(BMesh *bm, const StringRef uv_map_name)
void BM_data_interp_from_verts(BMesh *bm, const BMVert *v_src_1, const BMVert *v_src_2, BMVert *v_dst, const float fac)
Data, Interpolate From Verts.
#define BM_ITER_ELEM(ele, iter, data, itype)
#define BM_ITER_MESH(ele, iter, bm, itype)
@ BM_EDGES_OF_MESH
@ BM_VERTS_OF_MESH
@ BM_FACES_OF_MESH
@ BM_LOOPS_OF_VERT
@ BM_LOOPS_OF_FACE
BMesh const char void * data
BMesh * bm
#define BM_FACE
#define BM_EDGE
#define BM_VERT
ATTR_WARN_UNUSED_RESULT const void * element
void poly_rotate_plane(const float normal[3], float(*verts)[3], const uint nverts)
POLY ROTATE PLANE.
void BM_face_calc_center_median(const BMFace *f, float r_cent[3])
void BM_vert_normal_update_all(BMVert *v)
bool BM_face_is_normal_valid(const BMFace *f)
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMVert * v
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition btQuadWord.h:119
#define fabsf(x)
#define sqrt
#define MAX_CUSTOMDATA_LAYER_NAME
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_dupallocN(const void *vmemh)
Definition mallocn.cc:143
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
const btScalar eps
Definition poly34.cpp:11
BMHeader head
BMHeader head
BMHeader head
float no[3]
void * data
BMHeader head
struct BMVert * v
struct BMLoop * radial_next
struct BMLoop * prev
struct BMFace * f
struct BMLoop * next
float co[3]
float no[3]
BMHeader head
struct BLI_mempool * pool
CustomDataLayer * layers
void * link
struct LinkNode * next
float * data_weights
const float * loop_weights
float * weight_array
const void * data_ref
int * data_index_array
void ** data_array
MemArena * arena
float(* disps)[3]
i
Definition text_draw.cc:230
PointerRNA * ptr
Definition wm_files.cc:4226