Blender  V2.93
bmesh_polygon_edgenet.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16 
23 // #define DEBUG_PRINT
24 
25 #include "MEM_guardedalloc.h"
26 
27 #include "BLI_alloca.h"
28 #include "BLI_array.h"
29 #include "BLI_kdopbvh.h"
30 #include "BLI_linklist_stack.h"
31 #include "BLI_math.h"
32 #include "BLI_memarena.h"
33 #include "BLI_sort_utils.h"
34 #include "BLI_utildefines_stack.h"
35 
36 #include "BKE_customdata.h"
37 
38 #include "bmesh.h"
39 #include "intern/bmesh_private.h"
40 
41 /* -------------------------------------------------------------------- */
52 /* Note: All these flags _must_ be cleared on exit */
53 
54 /* face is a part of the edge-net (including the original face we're splitting) */
55 #define FACE_NET _FLAG_WALK
56 /* edge is a part of the edge-net we're filling */
57 #define EDGE_NET _FLAG_WALK
58 /* tag verts we've visit */
59 #define VERT_VISIT _FLAG_WALK
60 #define VERT_IN_QUEUE _FLAG_WALK_ALT
61 
62 struct VertOrder {
63  float angle;
65 };
66 
68 {
69  uint count = 0;
70  BMLoop *l;
71 
72  if ((l = e->l)) {
73  do {
75  count++;
76  }
77  } while ((l = l->radial_next) != e->l);
78  }
79  return count;
80 }
81 
83 {
84  BMLoop *l;
85 
86  if ((l = e->l)) {
87  do {
89  return l;
90  }
91  } while ((l = l->radial_next) != e->l);
92  }
93  return NULL;
94 }
95 
96 static void normalize_v2_m3_v3v3(float out[2],
97  const float axis_mat[3][3],
98  const float v1[3],
99  const float v2[3])
100 {
101  float dir[3];
102  sub_v3_v3v3(dir, v1, v2);
103  mul_v2_m3v3(out, axis_mat, dir);
104  normalize_v2(out);
105 }
106 
112  const float face_normal[3],
113  const float face_normal_matrix[3][3],
114  BMEdge *e_pair[2])
115 {
116  /* Always find one boundary edge (to determine winding)
117  * and one wire (if available), otherwise another boundary.
118  */
119 
120  /* detect winding */
121  BMLoop *l_walk;
122  bool swap;
123 
124  BLI_SMALLSTACK_DECLARE(edges_boundary, BMEdge *);
125  BLI_SMALLSTACK_DECLARE(edges_wire, BMEdge *);
126  int edges_boundary_len = 0;
127  int edges_wire_len = 0;
128 
129  {
130  BMEdge *e, *e_first;
131  e = e_first = v_init->e;
132  do {
135  if (count == 1) {
136  BLI_SMALLSTACK_PUSH(edges_boundary, e);
137  edges_boundary_len++;
138  }
139  else if (count == 0) {
140  BLI_SMALLSTACK_PUSH(edges_wire, e);
141  edges_wire_len++;
142  }
143  }
144  } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
145  }
146 
147  /* first edge should always be boundary */
148  if (edges_boundary_len == 0) {
149  return false;
150  }
151  e_pair[0] = BLI_SMALLSTACK_POP(edges_boundary);
152 
153  /* use to hold boundary OR wire edges */
154  BLI_SMALLSTACK_DECLARE(edges_search, BMEdge *);
155 
156  /* attempt one boundary and one wire, or 2 boundary */
157  if (edges_wire_len == 0) {
158  if (edges_boundary_len > 1) {
159  e_pair[1] = BLI_SMALLSTACK_POP(edges_boundary);
160 
161  if (edges_boundary_len > 2) {
162  BLI_SMALLSTACK_SWAP(edges_search, edges_boundary);
163  }
164  }
165  else {
166  /* one boundary and no wire */
167  return false;
168  }
169  }
170  else {
171  e_pair[1] = BLI_SMALLSTACK_POP(edges_wire);
172  if (edges_wire_len > 1) {
173  BLI_SMALLSTACK_SWAP(edges_search, edges_wire);
174  }
175  }
176 
177  /* if we swapped above, search this list for the best edge */
178  if (!BLI_SMALLSTACK_IS_EMPTY(edges_search)) {
179  /* find the best edge in 'edge_list' to use for 'e_pair[1]' */
180  const BMVert *v_prev = BM_edge_other_vert(e_pair[0], v_init);
181  const BMVert *v_next = BM_edge_other_vert(e_pair[1], v_init);
182 
183  float dir_prev[2], dir_next[2];
184 
185  normalize_v2_m3_v3v3(dir_prev, face_normal_matrix, v_prev->co, v_init->co);
186  normalize_v2_m3_v3v3(dir_next, face_normal_matrix, v_next->co, v_init->co);
187  float angle_best_cos = dot_v2v2(dir_next, dir_prev);
188 
189  BMEdge *e;
190  while ((e = BLI_SMALLSTACK_POP(edges_search))) {
191  v_next = BM_edge_other_vert(e, v_init);
192  float dir_test[2];
193 
194  normalize_v2_m3_v3v3(dir_test, face_normal_matrix, v_next->co, v_init->co);
195  const float angle_test_cos = dot_v2v2(dir_prev, dir_test);
196 
197  if (angle_test_cos > angle_best_cos) {
198  angle_best_cos = angle_test_cos;
199  e_pair[1] = e;
200  }
201  }
202  }
203 
204  /* flip based on winding */
205  l_walk = bm_edge_flagged_radial_first(e_pair[0]);
206  swap = false;
207  if (face_normal == l_walk->f->no) {
208  swap = !swap;
209  }
210  if (l_walk->v != v_init) {
211  swap = !swap;
212  }
213  if (swap) {
214  SWAP(BMEdge *, e_pair[0], e_pair[1]);
215  }
216 
217  return true;
218 }
219 
229 {
230  int edges_boundary_len = 0;
231  int edges_wire_len = 0;
232 
233  {
234  BMEdge *e, *e_first;
235  e = e_first = v_init->e;
236  do {
239  if (count == 1) {
240  edges_boundary_len++;
241  }
242  else if (count == 0) {
243  edges_wire_len++;
244  }
245  }
246  } while ((e = BM_DISK_EDGE_NEXT(e, v_init)) != e_first);
247  }
248 
249  /* first edge should always be boundary */
250  if (edges_boundary_len == 0) {
251  return false;
252  }
253 
254  /* attempt one boundary and one wire, or 2 boundary */
255  if (edges_wire_len == 0) {
256  if (edges_boundary_len >= 2) {
257  /* pass */
258  }
259  else {
260  /* one boundary and no wire */
261  return false;
262  }
263  }
264  else {
265  /* pass */
266  }
267 
268  return true;
269 }
270 
272  const float face_normal[3],
273  /* cache to avoid realloc every time */
274  struct VertOrder *edge_order,
275  const uint edge_order_len,
276  BMEdge *e_pair[2])
277 {
278  /* fast-path for the common case (avoid push-pop).
279  * Also avoids tagging as visited since we know we
280  * can't reach these verts some other way */
281 #define USE_FASTPATH_NOFORK
282 
283  BMVert *v;
284  BMVert *v_dst;
285  bool found = false;
286 
287  struct VertOrder *eo;
288  STACK_DECLARE(edge_order);
289 
290  /* store visited verts so we can clear the visit flag after execution */
291  BLI_SMALLSTACK_DECLARE(vert_visit, BMVert *);
292 
293  /* likely this will stay very small
294  * all verts pushed into this stack _must_ have their previous edges set! */
295  BLI_SMALLSTACK_DECLARE(vert_stack, BMVert *);
296  BLI_SMALLSTACK_DECLARE(vert_stack_next, BMVert *);
297 
298  STACK_INIT(edge_order, edge_order_len);
299 
300  /* start stepping */
301  v = BM_edge_other_vert(e_pair[0], v_init);
302  v->e = e_pair[0];
303  BLI_SMALLSTACK_PUSH(vert_stack, v);
304 
305  v_dst = BM_edge_other_vert(e_pair[1], v_init);
306 
307 #ifdef DEBUG_PRINT
308  printf("%s: vert (search) %d\n", __func__, BM_elem_index_get(v_init));
309 #endif
310 
311  /* This loop will keep stepping over the best possible edge,
312  * in most cases it finds the direct route to close the face.
313  *
314  * In cases where paths can't be closed,
315  * alternatives are stored in the 'vert_stack'.
316  */
317  while ((v = BLI_SMALLSTACK_POP_EX(vert_stack, vert_stack_next))) {
318 #ifdef USE_FASTPATH_NOFORK
319  walk_nofork:
320 #else
321  BLI_SMALLSTACK_PUSH(vert_visit, v);
323 #endif
324 
325  BLI_assert(STACK_SIZE(edge_order) == 0);
326 
327  /* check if we're done! */
328  if (v == v_dst) {
329  found = true;
330  goto finally;
331  }
332 
333  BMEdge *e_next, *e_first;
334  e_first = v->e;
335  e_next = BM_DISK_EDGE_NEXT(e_first, v); /* always skip this verts edge */
336 
337  /* in rare cases there may be edges with a single connecting vertex */
338  if (e_next != e_first) {
339  do {
340  if ((BM_ELEM_API_FLAG_TEST(e_next, EDGE_NET)) &&
341  (bm_edge_flagged_radial_count(e_next) < 2)) {
342  BMVert *v_next;
343 
344  v_next = BM_edge_other_vert(e_next, v);
345  BLI_assert(v->e != e_next);
346 
347 #ifdef DEBUG_PRINT
348  /* indent and print */
349  {
350  BMVert *_v = v;
351  do {
352  printf(" ");
353  } while ((_v = BM_edge_other_vert(_v->e, _v)) != v_init);
354  printf("vert %d -> %d (add=%d)\n",
356  BM_elem_index_get(v_next),
357  BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT) == 0);
358  }
359 #endif
360 
361  if (!BM_ELEM_API_FLAG_TEST(v_next, VERT_VISIT)) {
362  eo = STACK_PUSH_RET_PTR(edge_order);
363  eo->v = v_next;
364 
365  v_next->e = e_next;
366  }
367  }
368  } while ((e_next = BM_DISK_EDGE_NEXT(e_next, v)) != e_first);
369  }
370 
371 #ifdef USE_FASTPATH_NOFORK
372  if (STACK_SIZE(edge_order) == 1) {
373  eo = STACK_POP_PTR(edge_order);
374  v = eo->v;
375 
376  goto walk_nofork;
377  }
378 #endif
379 
380  /* sort by angle if needed */
381  if (STACK_SIZE(edge_order) > 1) {
382  uint j;
383  BMVert *v_prev = BM_edge_other_vert(v->e, v);
384 
385  for (j = 0; j < STACK_SIZE(edge_order); j++) {
386  edge_order[j].angle = angle_signed_on_axis_v3v3v3_v3(
387  v_prev->co, v->co, edge_order[j].v->co, face_normal);
388  }
389  qsort(edge_order,
390  STACK_SIZE(edge_order),
391  sizeof(struct VertOrder),
393 
394 #ifdef USE_FASTPATH_NOFORK
395  /* only tag forks */
396  BLI_SMALLSTACK_PUSH(vert_visit, v);
398 #endif
399  }
400 
401  while ((eo = STACK_POP_PTR(edge_order))) {
402  BLI_SMALLSTACK_PUSH(vert_stack_next, eo->v);
403  }
404 
405  if (!BLI_SMALLSTACK_IS_EMPTY(vert_stack_next)) {
406  BLI_SMALLSTACK_SWAP(vert_stack, vert_stack_next);
407  }
408  }
409 
410 finally:
411  /* clear flag for next execution */
412  while ((v = BLI_SMALLSTACK_POP(vert_visit))) {
414  }
415 
416  return found;
417 
418 #undef USE_FASTPATH_NOFORK
419 }
420 
422  const float face_normal[3],
423  const float face_normal_matrix[3][3],
424  /* cache to avoid realloc every time */
425  struct VertOrder *edge_order,
426  const uint edge_order_len,
427  BMVert **r_face_verts,
428  int *r_face_verts_len)
429 {
430  BMEdge *e_pair[2];
431  BMVert *v;
432 
433  if (!bm_face_split_edgenet_find_loop_pair(v_init, face_normal, face_normal_matrix, e_pair)) {
434  return false;
435  }
436 
437  BLI_assert((bm_edge_flagged_radial_count(e_pair[0]) == 1) ||
438  (bm_edge_flagged_radial_count(e_pair[1]) == 1));
439 
441  v_init, face_normal, edge_order, edge_order_len, e_pair)) {
442  uint i = 0;
443 
444  r_face_verts[i++] = v_init;
445  v = BM_edge_other_vert(e_pair[1], v_init);
446  do {
447  r_face_verts[i++] = v;
448  } while ((v = BM_edge_other_vert(v->e, v)) != v_init);
449  *r_face_verts_len = i;
450  return (i > 2) ? true : false;
451  }
452  return false;
453 }
454 
464  BMFace *f,
465  BMEdge **edge_net,
466  const int edge_net_len,
467  BMFace ***r_face_arr,
468  int *r_face_arr_len)
469 {
470  /* re-use for new face verts */
471  BMVert **face_verts;
472  int face_verts_len;
473 
474  BMFace **face_arr = NULL;
475  BLI_array_declare(face_arr);
476 
477  BMVert **vert_queue;
478  STACK_DECLARE(vert_queue);
479  int i;
480 
481  struct VertOrder *edge_order;
482  const uint edge_order_len = edge_net_len + 2;
483 
484  BMVert *v;
485 
486  BMLoop *l_iter, *l_first;
487 
488  if (!edge_net_len) {
489  if (r_face_arr) {
490  *r_face_arr = NULL;
491  *r_face_arr_len = 0;
492  }
493  return false;
494  }
495 
496  /* These arrays used to be stack memory, however they can be
497  * large for single faces with complex edge-nets, see: T65980. */
498 
499  /* over-alloc (probably 2-4 is only used in most cases), for the biggest-fan */
500  edge_order = MEM_mallocN(sizeof(*edge_order) * edge_order_len, __func__);
501 
502  /* use later */
503  face_verts = MEM_mallocN(sizeof(*face_verts) * (edge_net_len + f->len), __func__);
504 
505  vert_queue = MEM_mallocN(sizeof(vert_queue) * (edge_net_len + f->len), __func__);
506  STACK_INIT(vert_queue, f->len + edge_net_len);
507 
510 
511 #ifdef DEBUG
512  for (i = 0; i < edge_net_len; i++) {
513  BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0);
514  BLI_assert(BM_edge_in_face(edge_net[i], f) == false);
515  }
516  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
517  do {
518  BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter->e, EDGE_NET) == 0);
519  } while ((l_iter = l_iter->next) != l_first);
520 #endif
521 
522  /* Note: 'VERT_IN_QUEUE' is often not needed at all,
523  * however in rare cases verts are added multiple times to the queue,
524  * that on its own is harmless but in _very_ rare cases,
525  * the queue will overflow its maximum size,
526  * so we better be strict about this! see: T51539 */
527 
528  for (i = 0; i < edge_net_len; i++) {
529  BM_ELEM_API_FLAG_ENABLE(edge_net[i], EDGE_NET);
532  }
533  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
534  do {
537  } while ((l_iter = l_iter->next) != l_first);
538 
539  float face_normal_matrix[3][3];
540  axis_dominant_v3_to_m3(face_normal_matrix, f->no);
541 
542  /* any vert can be used to begin with */
543  STACK_PUSH(vert_queue, l_first->v);
545 
546  while ((v = STACK_POP(vert_queue))) {
549  f->no,
550  face_normal_matrix,
551  edge_order,
552  edge_order_len,
553  face_verts,
554  &face_verts_len)) {
555  BMFace *f_new;
556 
557  f_new = BM_face_create_verts(bm, face_verts, face_verts_len, f, BM_CREATE_NOP, false);
558 
559  for (i = 0; i < edge_net_len; i++) {
561  }
562 
563  if (f_new) {
564  BLI_array_append(face_arr, f_new);
565  copy_v3_v3(f_new->no, f->no);
566 
567  /* warning, normally don't do this,
568  * its needed for mesh intersection - which tracks face-sides based on selection */
569  f_new->head.hflag = f->head.hflag;
570  if (f->head.hflag & BM_ELEM_SELECT) {
571  bm->totfacesel++;
572  }
573 
575 
576  /* add new verts to keep finding loops for
577  * (verts between boundary and manifold edges) */
578  l_iter = l_first = BM_FACE_FIRST_LOOP(f_new);
579  do {
580  /* Avoid adding to queue multiple times (not common but happens). */
581  if (!BM_ELEM_API_FLAG_TEST(l_iter->v, VERT_IN_QUEUE) &&
583  STACK_PUSH(vert_queue, l_iter->v);
585  }
586  } while ((l_iter = l_iter->next) != l_first);
587  }
588  }
589  }
590 
591  if (CustomData_has_math(&bm->ldata)) {
592  /* reuse VERT_VISIT here to tag vert's already interpolated */
593  BMIter iter;
594  BMLoop *l_other;
595 
596  /* see: #BM_loop_interp_from_face for similar logic */
597  void **blocks = BLI_array_alloca(blocks, f->len);
598  float(*cos_2d)[2] = BLI_array_alloca(cos_2d, f->len);
599  float *w = BLI_array_alloca(w, f->len);
600  float axis_mat[3][3];
601  float co[2];
602 
603  /* interior loops */
604  axis_dominant_v3_to_m3(axis_mat, f->no);
605 
606  /* first simply copy from existing face */
607  i = 0;
608  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
609  do {
610  BM_ITER_ELEM (l_other, &iter, l_iter->v, BM_LOOPS_OF_VERT) {
611  if ((l_other->f != f) && BM_ELEM_API_FLAG_TEST(l_other->f, FACE_NET)) {
613  &bm->ldata, &bm->ldata, l_iter->head.data, &l_other->head.data);
614  }
615  }
616  /* tag not to interpolate */
618 
619  mul_v2_m3v3(cos_2d[i], axis_mat, l_iter->v->co);
620  blocks[i] = l_iter->head.data;
621 
622  } while ((void)i++, (l_iter = l_iter->next) != l_first);
623 
624  for (i = 0; i < edge_net_len; i++) {
625  BM_ITER_ELEM (v, &iter, edge_net[i], BM_VERTS_OF_EDGE) {
627  BMIter liter;
628 
630 
631  /* interpolate this loop, then copy to the rest */
632  l_first = NULL;
633 
634  BM_ITER_ELEM (l_iter, &liter, v, BM_LOOPS_OF_VERT) {
635  if (BM_ELEM_API_FLAG_TEST(l_iter->f, FACE_NET)) {
636  if (l_first == NULL) {
637  mul_v2_m3v3(co, axis_mat, v->co);
638  interp_weights_poly_v2(w, cos_2d, f->len, co);
640  &bm->ldata, (const void **)blocks, w, NULL, f->len, l_iter->head.data);
641  l_first = l_iter;
642  }
643  else {
645  &bm->ldata, &bm->ldata, l_first->head.data, &l_iter->head.data);
646  }
647  }
648  }
649  }
650  }
651  }
652  }
653 
654  /* cleanup */
655  for (i = 0; i < edge_net_len; i++) {
656  BM_ELEM_API_FLAG_DISABLE(edge_net[i], EDGE_NET);
657  /* from interp only */
658  BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v1, VERT_VISIT);
659  BM_ELEM_API_FLAG_DISABLE(edge_net[i]->v2, VERT_VISIT);
660  }
661  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
662  do {
664  /* from interp only */
666  } while ((l_iter = l_iter->next) != l_first);
667 
668  if (BLI_array_len(face_arr)) {
669  bmesh_face_swap_data(f, face_arr[0]);
670  BM_face_kill(bm, face_arr[0]);
671  face_arr[0] = f;
672  }
673  else {
675  }
676 
677  for (i = 0; i < BLI_array_len(face_arr); i++) {
678  BM_ELEM_API_FLAG_DISABLE(face_arr[i], FACE_NET);
679  }
680 
681  if (r_face_arr) {
682  *r_face_arr = face_arr;
683  *r_face_arr_len = BLI_array_len(face_arr);
684  }
685  else {
686  if (face_arr) {
687  MEM_freeN(face_arr);
688  }
689  }
690 
691  MEM_freeN(edge_order);
692  MEM_freeN(face_verts);
693  MEM_freeN(vert_queue);
694 
695  return true;
696 }
697 
698 #undef FACE_NET
699 #undef VERT_VISIT
700 #undef EDGE_NET
701 
704 /* -------------------------------------------------------------------- */
718 #define USE_PARTIAL_CONNECT
719 
720 #define VERT_IS_VALID BM_ELEM_INTERNAL_TAG
721 
722 /* can be X or Y */
723 #define SORT_AXIS 0
724 
726  const BMVert *v_a,
727  const BMVert *v_b,
728  float r_isect[2])
729 {
730  /* This bias seems like it could be too large,
731  * mostly its not needed, see T52329 for example where it is. */
732  const float endpoint_bias = 1e-4f;
733  return ((isect_seg_seg_v2_point_ex(
734  v_a->co, v_b->co, e->v1->co, e->v2->co, endpoint_bias, r_isect) == 1) &&
735  ((e->v1 != v_a) && (e->v2 != v_a) && (e->v1 != v_b) && (e->v2 != v_b)));
736 }
737 
738 BLI_INLINE int axis_pt_cmp(const float pt_a[2], const float pt_b[2])
739 {
740  if (pt_a[0] < pt_b[0]) {
741  return -1;
742  }
743  if (pt_a[0] > pt_b[0]) {
744  return 1;
745  }
746  if (pt_a[1] < pt_b[1]) {
747  return -1;
748  }
749  if (pt_a[1] > pt_b[1]) {
750  return 1;
751  }
752  return 0;
753 }
754 
761  LinkNode edge_links; /* keep first */
763 
764  /* Set the following vars once we have >1 groups */
765 
766  /* when an edge in a previous group connects to this one,
767  * so there's no need to create one pointing back. */
769 
770  /* verts in the group which has the lowest & highest values,
771  * the lower vertex is connected to the first edge */
772  struct {
774  /* used for sorting only */
775  float min_axis[2];
776  float max_axis[2];
778 };
779 
780 static int group_min_cmp_fn(const void *p1, const void *p2)
781 {
782  const struct EdgeGroupIsland *g1 = *(struct EdgeGroupIsland **)p1;
783  const struct EdgeGroupIsland *g2 = *(struct EdgeGroupIsland **)p2;
784  /* min->co[SORT_AXIS] hasn't been applied yet */
785  int test = axis_pt_cmp(g1->vert_span.min_axis, g2->vert_span.min_axis);
786  if (UNLIKELY(test == 0)) {
788  }
789  return test;
790 }
791 
793  float dist_orig;
795 
798 
799  const uint *vert_range;
800 };
801 
804 
806 
807  const uint *vert_range;
808 };
809 
811  int index,
812  const BVHTreeRay *UNUSED(ray),
813  BVHTreeRayHit *hit)
814 {
816  const BMEdge *e = data->edge_arr[index];
817  const int v1_index = BM_elem_index_get(e->v1);
818  float co_isect[2];
819 
820  if (edge_isect_verts_point_2d(e, data->v_origin, data->v_other, co_isect)) {
821  const float t = line_point_factor_v2(co_isect, data->v_origin->co, data->v_other->co);
822  const float dist_new = data->dist_orig * t;
823  /* avoid float precision issues, possible this is greater,
824  * check above zero to allow some overlap
825  * (and needed for partial-connect which will overlap vertices) */
826  if (LIKELY((dist_new < hit->dist) && (dist_new > 0.0f))) {
827  /* v1/v2 will both be in the same group */
828  if (v1_index < (int)data->vert_range[0] || v1_index >= (int)data->vert_range[1]) {
829  hit->dist = dist_new;
830  hit->index = index;
831  }
832  }
833  }
834 }
835 
837  int index,
838  const BVHTreeRay *ray,
839  BVHTreeRayHit *hit)
840 {
842  const BMEdge *e = data->edge_arr[index];
843 
844  /* direction is normalized, so this will be the distance */
845  float dist_new;
846  if (isect_ray_seg_v2(
847  data->v_origin->co, ray->direction, e->v1->co, e->v2->co, &dist_new, NULL)) {
848  /* avoid float precision issues, possible this is greater,
849  * check above zero to allow some overlap
850  * (and needed for partial-connect which will overlap vertices) */
851  if (LIKELY(dist_new < hit->dist && (dist_new > 0.0f))) {
852  if (e->v1 != data->v_origin && e->v2 != data->v_origin) {
853  const int v1_index = BM_elem_index_get(e->v1);
854  /* v1/v2 will both be in the same group */
855  if (v1_index < (int)data->vert_range[0] || v1_index >= (int)data->vert_range[1]) {
856  hit->dist = dist_new;
857  hit->index = index;
858  }
859  }
860  }
861  }
862 }
863 
874 
877 
878  const uint *vert_range;
879 };
880 
882  BMVert *v_origin,
883  BMVert *v_other)
884 {
885  int index;
886 
887  BVHTreeRayHit hit = {0};
888  float dir[3];
889 
890  sub_v2_v2v2(dir, v_other->co, v_origin->co);
891  dir[2] = 0.0f;
892  hit.index = -1;
893  hit.dist = normalize_v2(dir);
894 
896  user_data.dist_orig = hit.dist;
897  user_data.edge_arr = args->edge_arr;
898  user_data.v_origin = v_origin;
899  user_data.v_other = v_other;
900  user_data.vert_range = args->vert_range;
901 
902  index = BLI_bvhtree_ray_cast_ex(args->bvhtree,
903  v_origin->co,
904  dir,
905  0.0f,
906  &hit,
908  &user_data,
909  0);
910 
911  BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
912 
913  /* check existing connections (no spatial optimization here since we're continually adding). */
914  if (LIKELY(index == -1)) {
915  float t_best = 1.0f;
916  for (uint i = 0; i < args->edge_arr_new_len; i++) {
917  float co_isect[2];
918  if (UNLIKELY(
919  edge_isect_verts_point_2d(args->edge_arr_new[i], v_origin, v_other, co_isect))) {
920  const float t_test = line_point_factor_v2(co_isect, v_origin->co, v_other->co);
921  if (t_test < t_best) {
922  t_best = t_test;
923 
924  e_hit = args->edge_arr_new[i];
925  }
926  }
927  }
928  }
929 
930  return e_hit;
931 }
932 
938  BMVert *v_origin,
939  const float dir[3])
940 {
941  int index;
942  BVHTreeRayHit hit = {0};
943 
944  BLI_ASSERT_UNIT_V2(dir);
945 
946  hit.index = -1;
948 
950  user_data.edge_arr = args->edge_arr;
951  user_data.v_origin = v_origin;
952  user_data.vert_range = args->vert_range;
953 
954  index = BLI_bvhtree_ray_cast_ex(args->bvhtree,
955  v_origin->co,
956  dir,
957  0.0f,
958  &hit,
960  &user_data,
961  0);
962 
963  BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
964 
965  /* check existing connections (no spatial optimization here since we're continually adding). */
966  if (LIKELY(index != -1)) {
967  for (uint i = 0; i < args->edge_arr_new_len; i++) {
968  BMEdge *e = args->edge_arr_new[i];
969  float dist_new;
970  if (isect_ray_seg_v2(v_origin->co, dir, e->v1->co, e->v2->co, &dist_new, NULL)) {
971  if (e->v1 != v_origin && e->v2 != v_origin) {
972  /* avoid float precision issues, possible this is greater */
973  if (LIKELY(dist_new < hit.dist)) {
974  hit.dist = dist_new;
975 
976  e_hit = args->edge_arr_new[i];
977  }
978  }
979  }
980  }
981  }
982 
983  return e_hit;
984 }
985 
987  BMVert *v_origin,
988  /* false = negative, true = positive */
989  bool direction_sign)
990 {
1005  const float dir[3] = {[SORT_AXIS] = direction_sign ? 1.0f : -1.0f};
1006  BMEdge *e_hit = test_edges_isect_2d_ray(args, v_origin, dir);
1007  BMVert *v_other = NULL;
1008 
1009  if (e_hit) {
1010  BMVert *v_other_fallback = NULL;
1011 
1012  BLI_SMALLSTACK_DECLARE(vert_search, BMVert *);
1013 
1014  /* ensure we never add verts multiple times (not all that likely - but possible) */
1015  BLI_SMALLSTACK_DECLARE(vert_blacklist, BMVert *);
1016 
1017  do {
1018  BMVert *v_pair[2];
1019  /* ensure the closest vertex is popped back off the stack first */
1020  if (len_squared_v2v2(v_origin->co, e_hit->v1->co) >
1021  len_squared_v2v2(v_origin->co, e_hit->v2->co)) {
1022  ARRAY_SET_ITEMS(v_pair, e_hit->v1, e_hit->v2);
1023  }
1024  else {
1025  ARRAY_SET_ITEMS(v_pair, e_hit->v2, e_hit->v1);
1026  }
1027 
1028  for (int j = 0; j < 2; j++) {
1029  BMVert *v_iter = v_pair[j];
1030  if (BM_elem_flag_test(v_iter, VERT_IS_VALID)) {
1031  if (direction_sign ? (v_iter->co[SORT_AXIS] > v_origin->co[SORT_AXIS]) :
1032  (v_iter->co[SORT_AXIS] < v_origin->co[SORT_AXIS])) {
1033  BLI_SMALLSTACK_PUSH(vert_search, v_iter);
1034  BLI_SMALLSTACK_PUSH(vert_blacklist, v_iter);
1036  }
1037  }
1038  }
1039  v_other_fallback = v_other;
1040 
1041  } while ((v_other = BLI_SMALLSTACK_POP(vert_search)) &&
1042  (e_hit = test_edges_isect_2d_vert(args, v_origin, v_other)));
1043 
1044  if (v_other == NULL) {
1045  printf("Using fallback\n");
1046  v_other = v_other_fallback;
1047  }
1048 
1049  /* reset the blacklist flag, for future use */
1050  BMVert *v;
1051  while ((v = BLI_SMALLSTACK_POP(vert_blacklist))) {
1053  }
1054  }
1055 
1056  /* if we reach this line, v_other is either the best vertex or its NULL */
1057  return v_other ? BM_elem_index_get(v_other) : -1;
1058 }
1059 
1064 #ifdef USE_PARTIAL_CONNECT
1065 
1070 static bool test_tagged_and_notface(BMEdge *e, void *fptr)
1071 {
1072  BMFace *f = (BMFace *)fptr;
1074 }
1075 
1084 {
1085  /* -------------------------------------------------------------------- */
1086  /* Initial check that we may be a delimiting vert (keep this fast) */
1087 
1088  /* initial check - see if we have 3+ flagged edges attached to 'v_delimit'
1089  * if not, we can early exit */
1090  LinkNode *e_delimit_list = NULL;
1091  uint e_delimit_list_len = 0;
1092 
1093 # define EDGE_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1094 # define VERT_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1095 
1096 # define FOREACH_VERT_EDGE(v_, e_, body_) \
1097  { \
1098  BMEdge *e_ = v_->e; \
1099  do { \
1100  body_ \
1101  } while ((e_ = BM_DISK_EDGE_NEXT(e_, v_)) != v_->e); \
1102  } \
1103  ((void)0)
1104 
1105  /* start with face edges, since we need to split away wire-only edges */
1106  BMEdge *e_face_init = NULL;
1107 
1108  FOREACH_VERT_EDGE(v_delimit, e_iter, {
1109  if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1111  BLI_linklist_prepend_alloca(&e_delimit_list, e_iter);
1112  e_delimit_list_len++;
1113  if (e_iter->l != NULL && BM_edge_in_face(e_iter, f)) {
1114  e_face_init = e_iter;
1115  }
1116  }
1117  });
1118 
1119  /* skip typical edge-chain verts */
1120  if (LIKELY(e_delimit_list_len <= 2)) {
1121  return NULL;
1122  }
1123 
1124  /* -------------------------------------------------------------------- */
1125  /* Complicated stuff starts now! */
1126 
1127  /* Store connected vertices for restoring the flag */
1128  LinkNode *vert_stack = NULL;
1129  BLI_linklist_prepend_alloca(&vert_stack, v_delimit);
1131 
1132  /* Walk the net... */
1133  {
1134  BLI_SMALLSTACK_DECLARE(search, BMVert *);
1135  BMVert *v_other = BM_edge_other_vert(e_face_init ? e_face_init : v_delimit->e, v_delimit);
1136 
1137  BLI_SMALLSTACK_PUSH(search, v_other);
1139 
1140  while ((v_other = BLI_SMALLSTACK_POP(search))) {
1141  BLI_assert(BM_elem_flag_test(v_other, VERT_NOT_IN_STACK) == false);
1142  BLI_linklist_prepend_alloca(&vert_stack, v_other);
1143  BMEdge *e_iter = v_other->e;
1144  do {
1145  BMVert *v_step = BM_edge_other_vert(e_iter, v_other);
1146  if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1147  if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK)) {
1149  BLI_SMALLSTACK_PUSH(search, v_step);
1150  }
1151  }
1152  } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_other)) != v_other->e);
1153  }
1154  }
1155 
1156  /* Detect if this is a delimiter
1157  * by checking if we didn't walk any of edges connected to 'v_delimit'. */
1158  bool is_delimit = false;
1159  FOREACH_VERT_EDGE(v_delimit, e_iter, {
1160  BMVert *v_step = BM_edge_other_vert(e_iter, v_delimit);
1161  if (BM_elem_flag_test(v_step, VERT_NOT_IN_STACK) && !BM_edge_in_face(e_iter, f)) {
1162  is_delimit = true; /* if one vertex is valid - we have a mix */
1163  }
1164  else {
1165  /* match the vertex flag (only for edges around 'v_delimit') */
1167  }
1168  });
1169 
1170 # undef FOREACH_VERT_EDGE
1171 
1172  /* Execute the split */
1173  BMVert *v_split = NULL;
1174  if (is_delimit) {
1175  v_split = BM_vert_create(bm, v_delimit->co, NULL, 0);
1176  BM_vert_separate_tested_edges(bm, v_split, v_delimit, test_tagged_and_notface, f);
1178 
1179  BLI_assert(v_delimit->e != NULL);
1180 
1181  /* Degenerate, avoid eternal loop, see: T59074. */
1182 # if 0
1183  BLI_assert(v_split->e != NULL);
1184 # else
1185  if (UNLIKELY(v_split->e == NULL)) {
1186  BM_vert_kill(bm, v_split);
1187  v_split = NULL;
1188  }
1189 # endif
1190  }
1191 
1192  /* Restore flags */
1193  do {
1195  } while ((vert_stack = vert_stack->next));
1196 
1197  do {
1198  BM_elem_flag_enable((BMEdge *)e_delimit_list->link, EDGE_NOT_IN_STACK);
1199  } while ((e_delimit_list = e_delimit_list->next));
1200 
1201 # undef EDGE_NOT_IN_STACK
1202 # undef VERT_NOT_IN_STACK
1203 
1204  return v_split;
1205 }
1206 
1210 static bool bm_vert_partial_connect_check_overlap(const int *remap,
1211  const int v_a_index,
1212  const int v_b_index)
1213 {
1214  /* connected to eachother */
1215  if (UNLIKELY((remap[v_a_index] == v_b_index) || (remap[v_b_index] == v_a_index))) {
1216  return true;
1217  }
1218  return false;
1219 }
1220 
1221 #endif /* USE_PARTIAL_CONNECT */
1222 
1232  BMFace *f,
1233  BMEdge **edge_net_init,
1234  const uint edge_net_init_len,
1235  bool use_partial_connect,
1237  BMEdge ***r_edge_net_new,
1238  uint *r_edge_net_new_len)
1239 {
1240  /* -------------------------------------------------------------------- */
1253  const uint edge_arr_len = (uint)edge_net_init_len + (uint)f->len;
1254  BMEdge **edge_arr = BLI_memarena_alloc(mem_arena, sizeof(*edge_arr) * edge_arr_len);
1255  bool ok = false;
1256  uint edge_net_new_len = (uint)edge_net_init_len;
1257 
1258  memcpy(edge_arr, edge_net_init, sizeof(*edge_arr) * (size_t)edge_net_init_len);
1259 
1260  /* _must_ clear on exit */
1261 #define EDGE_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1262 #define VERT_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
1263 
1264  {
1265  uint i = edge_net_init_len;
1266  BMLoop *l_iter, *l_first;
1267  l_iter = l_first = BM_FACE_FIRST_LOOP(f);
1268  do {
1271  edge_arr[i++] = l_iter->e;
1272  } while ((l_iter = l_iter->next) != l_first);
1273  BLI_assert(i == edge_arr_len);
1274  }
1275 
1276  for (uint i = 0; i < edge_arr_len; i++) {
1280  }
1281 
1282 #ifdef USE_PARTIAL_CONNECT
1283  /* Split-out delimiting vertices */
1284  struct TempVertPair {
1285  struct TempVertPair *next;
1286  BMVert *v_temp;
1287  BMVert *v_orig;
1288  };
1289 
1290  struct {
1291  struct TempVertPair *list;
1292  uint len;
1293  int *remap; /* temp -> orig mapping */
1294  } temp_vert_pairs = {NULL};
1295 
1296  if (use_partial_connect) {
1297  for (uint i = 0; i < edge_net_init_len; i++) {
1298  for (uint j = 0; j < 2; j++) {
1299  BMVert *v_delimit = (&edge_arr[i]->v1)[j];
1300  BMVert *v_other;
1301 
1302  /* note, remapping will _never_ map a vertex to an already mapped vertex */
1303  while (UNLIKELY((v_other = bm_face_split_edgenet_partial_connect(bm, v_delimit, f)))) {
1304  struct TempVertPair *tvp = BLI_memarena_alloc(mem_arena, sizeof(*tvp));
1305  tvp->next = temp_vert_pairs.list;
1306  tvp->v_orig = v_delimit;
1307  tvp->v_temp = v_other;
1308  temp_vert_pairs.list = tvp;
1309  temp_vert_pairs.len++;
1310  }
1311  }
1312  }
1313 
1314  if (temp_vert_pairs.len == 0) {
1315  use_partial_connect = false;
1316  }
1317  }
1318 #endif /* USE_PARTIAL_CONNECT */
1319 
1320  uint group_arr_len = 0;
1321  LinkNode *group_head = NULL;
1322  {
1323  /* scan 'edge_arr' backwards so the outer face boundary is handled first
1324  * (since its likely to be the largest) */
1325  uint edge_index = (edge_arr_len - 1);
1326  uint edge_in_group_tot = 0;
1327 
1328  BLI_SMALLSTACK_DECLARE(vstack, BMVert *);
1329 
1330  while (true) {
1331  LinkNode *edge_links = NULL;
1332  uint unique_verts_in_group = 0, unique_edges_in_group = 0;
1333 
1334  /* list of groups */
1335  BLI_assert(BM_elem_flag_test(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK));
1336  BLI_SMALLSTACK_PUSH(vstack, edge_arr[edge_index]->v1);
1337  BM_elem_flag_disable(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK);
1338 
1339  BMVert *v_iter;
1340  while ((v_iter = BLI_SMALLSTACK_POP(vstack))) {
1341  unique_verts_in_group++;
1342 
1343  BMEdge *e_iter = v_iter->e;
1344  do {
1345  if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
1347  unique_edges_in_group++;
1348 
1349  BLI_linklist_prepend_arena(&edge_links, e_iter, mem_arena);
1350 
1351  BMVert *v_other = BM_edge_other_vert(e_iter, v_iter);
1352  if (BM_elem_flag_test(v_other, VERT_NOT_IN_STACK)) {
1353  BLI_SMALLSTACK_PUSH(vstack, v_other);
1355  }
1356  }
1357  } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_iter)) != v_iter->e);
1358  }
1359 
1360  struct EdgeGroupIsland *g = BLI_memarena_alloc(mem_arena, sizeof(*g));
1361  g->vert_len = unique_verts_in_group;
1362  g->edge_len = unique_edges_in_group;
1363  edge_in_group_tot += unique_edges_in_group;
1364 
1365  BLI_linklist_prepend_nlink(&group_head, edge_links, (LinkNode *)g);
1366 
1367  group_arr_len++;
1368 
1369  if (edge_in_group_tot == edge_arr_len) {
1370  break;
1371  }
1372 
1373  /* skip edges in the stack */
1374  while (BM_elem_flag_test(edge_arr[edge_index], EDGE_NOT_IN_STACK) == false) {
1375  BLI_assert(edge_index != 0);
1376  edge_index--;
1377  }
1378  }
1379  }
1380 
1381  /* single group - no holes */
1382  if (group_arr_len == 1) {
1383  goto finally;
1384  }
1385 
1386  /* -------------------------------------------------------------------- */
1387  /* Previous checks need to be kept fast, since they will run very often,
1388  * now we know there are holes, so calculate a spatial lookup info and
1389  * other per-group data.
1390  */
1391 
1392  float axis_mat[3][3];
1393  axis_dominant_v3_to_m3(axis_mat, f->no);
1394 
1395 #define VERT_IN_ARRAY BM_ELEM_INTERNAL_TAG
1396 
1397  struct EdgeGroupIsland **group_arr = BLI_memarena_alloc(mem_arena,
1398  sizeof(*group_arr) * group_arr_len);
1399  uint vert_arr_len = 0;
1400  /* sort groups by lowest value vertex */
1401  {
1402  /* fill 'groups_arr' in reverse order so the boundary face is first */
1403  struct EdgeGroupIsland **group_arr_p = &group_arr[group_arr_len];
1404 
1405  for (struct EdgeGroupIsland *g = (void *)group_head; g;
1406  g = (struct EdgeGroupIsland *)g->edge_links.next) {
1407  LinkNode *edge_links = g->edge_links.link;
1408 
1409  /* init with *any* different verts */
1410  g->vert_span.min = ((BMEdge *)edge_links->link)->v1;
1411  g->vert_span.max = ((BMEdge *)edge_links->link)->v2;
1412  float min_axis[2] = {FLT_MAX, FLT_MAX};
1413  float max_axis[2] = {-FLT_MAX, -FLT_MAX};
1414 
1415  do {
1416  BMEdge *e = edge_links->link;
1417  BLI_assert(e->head.htype == BM_EDGE);
1418 
1419  for (int j = 0; j < 2; j++) {
1420  BMVert *v_iter = (&e->v1)[j];
1421  BLI_assert(v_iter->head.htype == BM_VERT);
1422  /* ideally we could use 'v_iter->co[SORT_AXIS]' here,
1423  * but we need to sort the groups before setting the vertex array order */
1424  const float axis_value[2] = {
1425 #if SORT_AXIS == 0
1426  dot_m3_v3_row_x(axis_mat, v_iter->co),
1427  dot_m3_v3_row_y(axis_mat, v_iter->co),
1428 #else
1429  dot_m3_v3_row_y(axis_mat, v_iter->co),
1430  dot_m3_v3_row_x(axis_mat, v_iter->co),
1431 #endif
1432  };
1433 
1434  if (axis_pt_cmp(axis_value, min_axis) == -1) {
1435  g->vert_span.min = v_iter;
1436  copy_v2_v2(min_axis, axis_value);
1437  }
1438  if (axis_pt_cmp(axis_value, max_axis) == 1) {
1439  g->vert_span.max = v_iter;
1440  copy_v2_v2(max_axis, axis_value);
1441  }
1442  }
1443  } while ((edge_links = edge_links->next));
1444 
1445  copy_v2_v2(g->vert_span.min_axis, min_axis);
1446  copy_v2_v2(g->vert_span.max_axis, max_axis);
1447 
1448  g->has_prev_edge = false;
1449 
1450  vert_arr_len += g->vert_len;
1451 
1452  *(--group_arr_p) = g;
1453  }
1454  }
1455 
1456  qsort(group_arr, group_arr_len, sizeof(*group_arr), group_min_cmp_fn);
1457 
1458  /* we don't know how many unique verts there are connecting the edges, so over-alloc */
1459  BMVert **vert_arr = BLI_memarena_alloc(mem_arena, sizeof(*vert_arr) * vert_arr_len);
1460  /* map vertex -> group index */
1461  uint *verts_group_table = BLI_memarena_alloc(mem_arena,
1462  sizeof(*verts_group_table) * vert_arr_len);
1463 
1464  float(*vert_coords_backup)[3] = BLI_memarena_alloc(mem_arena,
1465  sizeof(*vert_coords_backup) * vert_arr_len);
1466 
1467  {
1468  /* relative location, for higher precision calculations */
1469  const float f_co_ref[3] = {UNPACK3(BM_FACE_FIRST_LOOP(f)->v->co)};
1470 
1471  int v_index = 0; /* global vert index */
1472  for (uint g_index = 0; g_index < group_arr_len; g_index++) {
1473  LinkNode *edge_links = group_arr[g_index]->edge_links.link;
1474  do {
1475  BMEdge *e = edge_links->link;
1476  for (int j = 0; j < 2; j++) {
1477  BMVert *v_iter = (&e->v1)[j];
1478  if (!BM_elem_flag_test(v_iter, VERT_IN_ARRAY)) {
1480 
1481  /* not nice, but alternatives aren't much better :S */
1482  {
1483  copy_v3_v3(vert_coords_backup[v_index], v_iter->co);
1484 
1485  /* for higher precision */
1486  sub_v3_v3(v_iter->co, f_co_ref);
1487 
1488  float co_2d[2];
1489  mul_v2_m3v3(co_2d, axis_mat, v_iter->co);
1490  v_iter->co[0] = co_2d[0];
1491  v_iter->co[1] = co_2d[1];
1492  v_iter->co[2] = 0.0f;
1493  }
1494 
1495  BM_elem_index_set(v_iter, v_index); /* set_dirty */
1496 
1497  vert_arr[v_index] = v_iter;
1498  verts_group_table[v_index] = g_index;
1499  v_index++;
1500  }
1501  }
1502  } while ((edge_links = edge_links->next));
1503  }
1504  }
1505 
1507 
1508  /* Now create bvh tree
1509  *
1510  * Note that a large epsilon is used because meshes with dimensions of around 100+ need it.
1511  * see T52329. */
1512  BVHTree *bvhtree = BLI_bvhtree_new(edge_arr_len, 1e-4f, 8, 8);
1513  for (uint i = 0; i < edge_arr_len; i++) {
1514  const float e_cos[2][3] = {
1515  {UNPACK2(edge_arr[i]->v1->co), 0.0f},
1516  {UNPACK2(edge_arr[i]->v2->co), 0.0f},
1517  };
1518  BLI_bvhtree_insert(bvhtree, i, (const float *)e_cos, 2);
1519  }
1520  BLI_bvhtree_balance(bvhtree);
1521 
1522 #ifdef USE_PARTIAL_CONNECT
1523  if (use_partial_connect) {
1524  /* needs to be done once the vertex indices have been written into */
1525  temp_vert_pairs.remap = BLI_memarena_alloc(mem_arena,
1526  sizeof(*temp_vert_pairs.remap) * vert_arr_len);
1527  copy_vn_i(temp_vert_pairs.remap, vert_arr_len, -1);
1528 
1529  struct TempVertPair *tvp = temp_vert_pairs.list;
1530  do {
1531  temp_vert_pairs.remap[BM_elem_index_get(tvp->v_temp)] = BM_elem_index_get(tvp->v_orig);
1532  } while ((tvp = tvp->next));
1533  }
1534 #endif /* USE_PARTIAL_CONNECT */
1535 
1536  /* Create connections between groups */
1537 
1538  /* may be an over-alloc, but not by much */
1539  edge_net_new_len = (uint)edge_net_init_len + ((group_arr_len - 1) * 2);
1540  BMEdge **edge_net_new = BLI_memarena_alloc(mem_arena, sizeof(*edge_net_new) * edge_net_new_len);
1541  memcpy(edge_net_new, edge_net_init, sizeof(*edge_net_new) * (size_t)edge_net_init_len);
1542 
1543  {
1544  uint edge_net_new_index = edge_net_init_len;
1545  /* start-end of the verts in the current group */
1546 
1547  uint vert_range[2];
1548 
1549  vert_range[0] = 0;
1550  vert_range[1] = group_arr[0]->vert_len;
1551 
1552  struct EdgeGroup_FindConnection_Args args = {
1553  .bvhtree = bvhtree,
1554 
1555  /* use the new edge array so we can scan edges which have been added */
1556  .edge_arr = edge_arr,
1557  .edge_arr_len = edge_arr_len,
1558 
1559  /* we only want to check newly created edges */
1560  .edge_arr_new = edge_net_new + edge_net_init_len,
1561  .edge_arr_new_len = 0,
1562 
1563  .vert_range = vert_range,
1564  };
1565 
1566  for (uint g_index = 1; g_index < group_arr_len; g_index++) {
1567  struct EdgeGroupIsland *g = group_arr[g_index];
1568 
1569  /* The range of verts this group uses in 'verts_arr' (not including the last index). */
1570  vert_range[0] = vert_range[1];
1571  vert_range[1] += g->vert_len;
1572 
1573  if (g->has_prev_edge == false) {
1574  BMVert *v_origin = g->vert_span.min;
1575 
1576  const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, false);
1577  // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1578 
1579  /* only for degenerate geometry */
1580  if (index_other != -1) {
1581 #ifdef USE_PARTIAL_CONNECT
1582  if ((use_partial_connect == false) ||
1584  temp_vert_pairs.remap, BM_elem_index_get(v_origin), index_other) == false))
1585 #endif
1586  {
1587  BMVert *v_end = vert_arr[index_other];
1588 
1589  edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1590 #ifdef USE_PARTIAL_CONNECT
1591  BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1592 #endif
1593  edge_net_new_index++;
1594  args.edge_arr_new_len++;
1595  }
1596  }
1597  }
1598 
1599  {
1600  BMVert *v_origin = g->vert_span.max;
1601 
1602  const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, true);
1603  // BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
1604 
1605  /* only for degenerate geometry */
1606  if (index_other != -1) {
1607 #ifdef USE_PARTIAL_CONNECT
1608  if ((use_partial_connect == false) ||
1610  temp_vert_pairs.remap, BM_elem_index_get(v_origin), index_other) == false))
1611 #endif
1612  {
1613  BMVert *v_end = vert_arr[index_other];
1614  edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
1615 #ifdef USE_PARTIAL_CONNECT
1616  BM_elem_index_set(edge_net_new[edge_net_new_index], edge_net_new_index);
1617 #endif
1618  edge_net_new_index++;
1619  args.edge_arr_new_len++;
1620  }
1621 
1622  /* tell the 'next' group it doesn't need to create its own back-link */
1623  uint g_index_other = verts_group_table[index_other];
1624  group_arr[g_index_other]->has_prev_edge = true;
1625  }
1626  }
1627  }
1628  BLI_assert(edge_net_new_len >= edge_net_new_index);
1629  edge_net_new_len = edge_net_new_index;
1630  }
1631 
1632  BLI_bvhtree_free(bvhtree);
1633 
1634  *r_edge_net_new = edge_net_new;
1635  *r_edge_net_new_len = edge_net_new_len;
1636  ok = true;
1637 
1638  for (uint i = 0; i < vert_arr_len; i++) {
1639  copy_v3_v3(vert_arr[i]->co, vert_coords_backup[i]);
1640  }
1641 
1642 finally:
1643 
1644 #ifdef USE_PARTIAL_CONNECT
1645  /* don't free 'vert_temp_pair_list', its part of the arena */
1646  if (use_partial_connect) {
1647 
1648  /* Sanity check: ensure we don't have connecting edges before splicing begins. */
1649 # ifdef DEBUG
1650  {
1651  struct TempVertPair *tvp = temp_vert_pairs.list;
1652  do {
1653  /* We must _never_ create connections here
1654  * (in case the islands can't have a connection at all). */
1655  BLI_assert(BM_edge_exists(tvp->v_orig, tvp->v_temp) == NULL);
1656  } while ((tvp = tvp->next));
1657  }
1658 # endif
1659 
1660  struct TempVertPair *tvp = temp_vert_pairs.list;
1661  do {
1662  /* its _very_ unlikely the edge exists,
1663  * however splicing may case this. see: T48012 */
1664  if (!BM_edge_exists(tvp->v_orig, tvp->v_temp)) {
1665  BM_vert_splice(bm, tvp->v_orig, tvp->v_temp);
1666  }
1667  } while ((tvp = tvp->next));
1668 
1669  /* Remove edges which have become doubles since splicing vertices together,
1670  * its less trouble than detecting future-doubles on edge-creation. */
1671  for (uint i = edge_net_init_len; i < edge_net_new_len; i++) {
1672  while (BM_edge_find_double(edge_net_new[i])) {
1673  BM_edge_kill(bm, edge_net_new[i]);
1674  edge_net_new_len--;
1675  if (i == edge_net_new_len) {
1676  break;
1677  }
1678  edge_net_new[i] = edge_net_new[edge_net_new_len];
1679  }
1680  }
1681 
1682  *r_edge_net_new_len = edge_net_new_len;
1683  }
1684 #endif
1685 
1686  for (uint i = 0; i < edge_arr_len; i++) {
1688  BM_elem_flag_disable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
1689  BM_elem_flag_disable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
1690  }
1691 
1692 #undef VERT_IN_ARRAY
1693 #undef VERT_NOT_IN_STACK
1694 #undef EDGE_NOT_IN_STACK
1695 
1696  return ok;
1697 }
1698 
1699 #undef SORT_AXIS
1700 
typedef float(TangentPoint)[2]
CustomData interface, see also DNA_customdata_types.h.
bool CustomData_has_math(const struct CustomData *data)
Definition: customdata.c:3843
void CustomData_bmesh_copy_data(const struct CustomData *source, struct CustomData *dest, void *src_block, void **dest_block)
void CustomData_bmesh_interp(struct CustomData *data, const void **src_blocks, const float *weights, const float *sub_weights, int count, void *dst_block)
Definition: customdata.c:4048
#define BLI_array_alloca(arr, realsize)
Definition: BLI_alloca.h:36
A (mainly) macro array library.
#define BLI_array_append(arr, item)
Definition: BLI_array.h:104
#define BLI_array_declare(arr)
Definition: BLI_array.h:62
#define BLI_array_len(arr)
Definition: BLI_array.h:74
#define BLI_assert(a)
Definition: BLI_assert.h:58
#define BLI_INLINE
MINLINE axis_t min_axis(axis_t a, axis_t b)
Definition: BLI_kdopbvh.c:223
#define BVH_RAYCAST_DIST_MAX
Definition: BLI_kdopbvh.h:105
void BLI_bvhtree_balance(BVHTree *tree)
Definition: BLI_kdopbvh.c:956
BVHTree * BLI_bvhtree_new(int maxsize, float epsilon, char tree_type, char axis)
Definition: BLI_kdopbvh.c:873
int BLI_bvhtree_ray_cast_ex(BVHTree *tree, const float co[3], const float dir[3], float radius, BVHTreeRayHit *hit, BVHTree_RayCastCallback callback, void *userdata, int flag)
Definition: BLI_kdopbvh.c:1939
void BLI_bvhtree_free(BVHTree *tree)
Definition: BLI_kdopbvh.c:945
void BLI_bvhtree_insert(BVHTree *tree, int index, const float co[3], int numpoints)
Definition: BLI_kdopbvh.c:998
#define BLI_ASSERT_UNIT_V2(v)
bool isect_ray_seg_v2(const float ray_origin[2], const float ray_direction[2], const float v0[2], const float v1[2], float *r_lambda, float *r_u)
Definition: math_geom.c:2077
float line_point_factor_v2(const float p[2], const float l1[2], const float l2[2])
Definition: math_geom.c:3469
void axis_dominant_v3_to_m3(float r_mat[3][3], const float normal[3])
Normal to x,y matrix.
Definition: math_geom.c:3752
void interp_weights_poly_v2(float w[], float v[][2], const int n, const float co[2])
int isect_seg_seg_v2_point_ex(const float v0[2], const float v1[2], const float v2[2], const float v3[2], const float endpoint_bias, float vi[2])
Definition: math_geom.c:1252
void mul_v2_m3v3(float r[2], const float M[3][3], const float a[3])
Definition: math_matrix.c:921
MINLINE float len_squared_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
void copy_vn_i(int *array_tar, const int size, const int val)
Definition: math_vector.c:1374
MINLINE void sub_v3_v3(float r[3], const float a[3])
MINLINE void sub_v3_v3v3(float r[3], const float a[3], const float b[3])
MINLINE void copy_v2_v2(float r[2], const float a[2])
MINLINE float dot_m3_v3_row_x(const float M[3][3], const float a[3]) ATTR_WARN_UNUSED_RESULT
float angle_signed_on_axis_v3v3v3_v3(const float v1[3], const float v2[3], const float v3[3], const float axis[3]) ATTR_WARN_UNUSED_RESULT
Definition: math_vector.c:587
MINLINE void copy_v3_v3(float r[3], const float a[3])
MINLINE float dot_m3_v3_row_y(const float M[3][3], const float a[3]) ATTR_WARN_UNUSED_RESULT
MINLINE void sub_v2_v2v2(float r[2], const float a[2], const float b[2])
MINLINE float dot_v2v2(const float a[2], const float b[2]) ATTR_WARN_UNUSED_RESULT
MINLINE float normalize_v2(float r[2])
void * BLI_memarena_alloc(struct MemArena *ma, size_t size) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1) ATTR_MALLOC ATTR_ALLOC_SIZE(2)
Definition: BLI_memarena.c:131
int BLI_sortutil_cmp_float_reverse(const void *a_, const void *b_)
Definition: sort_utils.c:54
unsigned int uint
Definition: BLI_sys_types.h:83
#define UNPACK2(a)
#define ARRAY_SET_ITEMS(...)
#define SWAP(type, a, b)
#define UNUSED(x)
#define UNPACK3(a)
#define UNLIKELY(x)
#define LIKELY(x)
#define STACK_POP(stack)
#define STACK_POP_PTR(stack)
#define STACK_PUSH(stack, val)
#define STACK_DECLARE(stack)
#define STACK_INIT(stack, tot)
#define STACK_SIZE(stack)
#define STACK_PUSH_RET_PTR(stack)
void swap(T &a, T &b)
Definition: Common.h:33
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
Read Guarded memory(de)allocation.
#define BM_DISK_EDGE_NEXT(e, v)
Definition: bmesh_class.h:556
@ BM_VERT
Definition: bmesh_class.h:383
@ BM_EDGE
Definition: bmesh_class.h:384
@ BM_ELEM_SELECT
Definition: bmesh_class.h:471
@ BM_ELEM_INTERNAL_TAG
Definition: bmesh_class.h:496
#define BM_FACE_FIRST_LOOP(p)
Definition: bmesh_class.h:553
void bmesh_face_swap_data(BMFace *f_a, BMFace *f_b)
Definition: bmesh_core.c:2952
BMVert * BM_vert_create(BMesh *bm, const float co[3], const BMVert *v_example, const eBMCreateFlag create_flag)
Main function for creating a new vertex.
Definition: bmesh_core.c:58
BMFace * BM_face_create_verts(BMesh *bm, BMVert **vert_arr, const int len, const BMFace *f_example, const eBMCreateFlag create_flag, const bool create_edges)
Definition: bmesh_core.c:500
bool BM_vert_splice(BMesh *bm, BMVert *v_dst, BMVert *v_src)
Splice Vert.
Definition: bmesh_core.c:2284
void BM_vert_kill(BMesh *bm, BMVert *v)
Definition: bmesh_core.c:1002
void BM_face_kill(BMesh *bm, BMFace *f)
Definition: bmesh_core.c:881
void BM_vert_separate_tested_edges(BMesh *UNUSED(bm), BMVert *v_dst, BMVert *v_src, bool(*testfn)(BMEdge *, void *arg), void *arg)
Definition: bmesh_core.c:2561
void BM_edge_kill(BMesh *bm, BMEdge *e)
Definition: bmesh_core.c:987
BMEdge * BM_edge_create(BMesh *bm, BMVert *v1, BMVert *v2, const BMEdge *e_example, const eBMCreateFlag create_flag)
Main function for creating a new edge.
Definition: bmesh_core.c:147
@ BM_CREATE_NOP
Definition: bmesh_core.h:27
#define BM_elem_index_get(ele)
Definition: bmesh_inline.h:124
#define BM_elem_flag_disable(ele, hflag)
Definition: bmesh_inline.h:29
#define BM_elem_index_set(ele, index)
Definition: bmesh_inline.h:125
#define BM_elem_flag_test(ele, hflag)
Definition: bmesh_inline.h:26
#define BM_elem_flag_enable(ele, hflag)
Definition: bmesh_inline.h:28
#define BM_ITER_ELEM(ele, iter, data, itype)
@ BM_VERTS_OF_EDGE
@ BM_LOOPS_OF_VERT
ATTR_WARN_UNUSED_RESULT BMesh * bm
static bool bm_face_split_edgenet_find_loop_pair(BMVert *v_init, const float face_normal[3], const float face_normal_matrix[3][3], BMEdge *e_pair[2])
static void bvhtree_test_edges_isect_2d_vert_cb(void *user_data, int index, const BVHTreeRay *UNUSED(ray), BVHTreeRayHit *hit)
static BMLoop * bm_edge_flagged_radial_first(BMEdge *e)
#define EDGE_NET
static BMEdge * test_edges_isect_2d_vert(const struct EdgeGroup_FindConnection_Args *args, BMVert *v_origin, BMVert *v_other)
static bool bm_face_split_edgenet_find_loop(BMVert *v_init, const float face_normal[3], const float face_normal_matrix[3][3], struct VertOrder *edge_order, const uint edge_order_len, BMVert **r_face_verts, int *r_face_verts_len)
static bool bm_face_split_edgenet_find_loop_walk(BMVert *v_init, const float face_normal[3], struct VertOrder *edge_order, const uint edge_order_len, BMEdge *e_pair[2])
static bool bm_vert_partial_connect_check_overlap(const int *remap, const int v_a_index, const int v_b_index)
#define VERT_NOT_IN_STACK
static BMEdge * test_edges_isect_2d_ray(const struct EdgeGroup_FindConnection_Args *args, BMVert *v_origin, const float dir[3])
bool BM_face_split_edgenet_connect_islands(BMesh *bm, BMFace *f, BMEdge **edge_net_init, const uint edge_net_init_len, bool use_partial_connect, MemArena *mem_arena, BMEdge ***r_edge_net_new, uint *r_edge_net_new_len)
static void bvhtree_test_edges_isect_2d_ray_cb(void *user_data, int index, const BVHTreeRay *ray, BVHTreeRayHit *hit)
static BMVert * bm_face_split_edgenet_partial_connect(BMesh *bm, BMVert *v_delimit, BMFace *f)
#define VERT_IS_VALID
#define SORT_AXIS
static int bm_face_split_edgenet_find_connection(const struct EdgeGroup_FindConnection_Args *args, BMVert *v_origin, bool direction_sign)
#define EDGE_NOT_IN_STACK
#define VERT_IN_QUEUE
#define FOREACH_VERT_EDGE(v_, e_, body_)
bool BM_face_split_edgenet(BMesh *bm, BMFace *f, BMEdge **edge_net, const int edge_net_len, BMFace ***r_face_arr, int *r_face_arr_len)
#define VERT_VISIT
#define FACE_NET
static void normalize_v2_m3_v3v3(float out[2], const float axis_mat[3][3], const float v1[3], const float v2[3])
static bool bm_face_split_edgenet_find_loop_pair_exists(BMVert *v_init)
static bool test_tagged_and_notface(BMEdge *e, void *fptr)
#define VERT_IN_ARRAY
BLI_INLINE bool edge_isect_verts_point_2d(const BMEdge *e, const BMVert *v_a, const BMVert *v_b, float r_isect[2])
static uint bm_edge_flagged_radial_count(BMEdge *e)
static int group_min_cmp_fn(const void *p1, const void *p2)
BLI_INLINE int axis_pt_cmp(const float pt_a[2], const float pt_b[2])
#define BM_ELEM_API_FLAG_DISABLE(element, f)
Definition: bmesh_private.h:76
#define BM_ELEM_API_FLAG_TEST(element, f)
Definition: bmesh_private.h:81
#define BM_ELEM_API_FLAG_ENABLE(element, f)
Definition: bmesh_private.h:71
BMEdge * BM_edge_exists(BMVert *v_a, BMVert *v_b)
Definition: bmesh_query.c:1995
BMEdge * BM_edge_find_double(BMEdge *e)
Definition: bmesh_query.c:2028
bool BM_edge_in_face(const BMEdge *e, const BMFace *f)
Definition: bmesh_query.c:555
BLI_INLINE BMVert * BM_edge_other_vert(BMEdge *e, const BMVert *v) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
ATTR_WARN_UNUSED_RESULT const BMVert * v2
ATTR_WARN_UNUSED_RESULT const BMLoop * l
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
Definition: btQuadWord.h:119
void * user_data
int count
static MemArena * mem_arena
Definition: makesdna.c:155
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:47
static ulong * next
BMVert * v1
Definition: bmesh_class.h:134
BMVert * v2
Definition: bmesh_class.h:134
int len
Definition: bmesh_class.h:279
BMHeader head
Definition: bmesh_class.h:267
float no[3]
Definition: bmesh_class.h:280
char htype
Definition: bmesh_class.h:76
char hflag
Definition: bmesh_class.h:78
void * data
Definition: bmesh_class.h:63
BMHeader head
Definition: bmesh_class.h:157
struct BMVert * v
Definition: bmesh_class.h:165
struct BMEdge * e
Definition: bmesh_class.h:176
struct BMLoop * radial_next
Definition: bmesh_class.h:216
struct BMFace * f
Definition: bmesh_class.h:183
struct BMLoop * next
Definition: bmesh_class.h:245
float co[3]
Definition: bmesh_class.h:99
struct BMEdge * e
Definition: bmesh_class.h:109
BMHeader head
Definition: bmesh_class.h:97
int totfacesel
Definition: bmesh_class.h:298
char elem_index_dirty
Definition: bmesh_class.h:305
CustomData ldata
Definition: bmesh_class.h:337
float direction[3]
Definition: BLI_kdopbvh.h:72
struct EdgeGroupIsland::@156 vert_span
void * link
Definition: BLI_linklist.h:40
struct LinkNode * next
Definition: BLI_linklist.h:39
uint len