Blender V4.5
editmesh_undo.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#include <algorithm>
10#include <variant>
11
12#include "MEM_guardedalloc.h"
13
14#include "CLG_log.h"
15
16#include "DNA_key_types.h"
17#include "DNA_layer_types.h"
18#include "DNA_mesh_types.h"
19#include "DNA_meshdata_types.h"
20#include "DNA_object_types.h"
21#include "DNA_scene_types.h"
22
23#include "BLI_array_utils.h"
25#include "BLI_listbase.h"
26#include "BLI_math_base.h"
27#include "BLI_string.h"
28#include "BLI_task.hh"
29#include "BLI_vector.hh"
30
31#include "BKE_context.hh"
32#include "BKE_customdata.hh"
33#include "BKE_deform.hh"
34#include "BKE_editmesh.hh"
35#include "BKE_key.hh"
36#include "BKE_layer.hh"
37#include "BKE_lib_id.hh"
38#include "BKE_main.hh"
39#include "BKE_mesh.hh"
40#include "BKE_object.hh"
41#include "BKE_undo_system.hh"
42
43#include "DEG_depsgraph.hh"
44
45#include "ED_mesh.hh"
46#include "ED_object.hh"
47#include "ED_undo.hh"
48#include "ED_util.hh"
49
50#include "WM_api.hh"
51#include "WM_types.hh"
52
53#define USE_ARRAY_STORE
54
55#ifdef USE_ARRAY_STORE
56// # define DEBUG_PRINT
57// # define DEBUG_TIME
58# ifdef DEBUG_TIME
59# include "BLI_time_utildefines.h"
60# endif
61
62# include "BLI_array_store.h"
71# define ARRAY_CHUNK_SIZE_IN_BYTES 65536
72# define ARRAY_CHUNK_NUM_MIN 256
73
74# define USE_ARRAY_STORE_THREAD
75
98# define USE_ARRAY_STORE_RLE
99#endif
100
101#ifdef USE_ARRAY_STORE_THREAD
102# include "BLI_task.h"
103#endif
104
106static CLG_LogRef LOG = {"ed.undo.mesh"};
107
108/* -------------------------------------------------------------------- */
111
112#ifdef USE_ARRAY_STORE
113
114static size_t array_chunk_size_calc(const size_t stride)
115{
116 /* Return a chunk size that targets a size in bytes,
117 * this is done so boolean arrays don't add so much overhead and
118 * larger arrays aren't so big as to waste memory, see: #105205. */
120}
121
122/* Single linked list of layers stored per type */
128
129# ifdef USE_ARRAY_STORE_RLE
131{
132 /* NOTE(@ideasman42): This could be enabled for all byte sized layers.
133 * for now only use for boolean layers to address: #136737. */
134 if (bcd->type == CD_PROP_BOOL) {
136 return true;
137 }
138 return false;
139}
140# endif
141
142#endif
143
144struct UndoMesh {
150
154
167
168#ifdef USE_ARRAY_STORE
169 /* Null arrays are considered empty. */
170 struct { /* most data is stored as 'custom' data */
176#endif /* USE_ARRAY_STORE */
177
178 size_t undo_size;
179};
180
181#ifdef USE_ARRAY_STORE
182
183/* -------------------------------------------------------------------- */
186
191enum {
199};
200# define ARRAY_STORE_INDEX_NUM (ARRAY_STORE_INDEX_MSEL + 1)
201
202static struct {
204 int users;
205
211
212# ifdef USE_ARRAY_STORE_THREAD
214# endif
215
216} um_arraystore = {{{nullptr}}};
217
219 const size_t data_len,
220 const bool create,
221 const int bs_index,
222 const BArrayCustomData *bcd_reference,
223 BArrayCustomData **r_bcd_first)
224{
225 using namespace blender;
226 if (data_len == 0) {
227 if (create) {
228 *r_bcd_first = nullptr;
229 }
230 }
231
232 const BArrayCustomData *bcd_reference_current = bcd_reference;
233 BArrayCustomData *bcd = nullptr, *bcd_first = nullptr, *bcd_prev = nullptr;
234 for (int layer_start = 0, layer_end; layer_start < cdata->totlayer; layer_start = layer_end) {
235 const eCustomDataType type = eCustomDataType(cdata->layers[layer_start].type);
236
237 /* Perform a full copy on dynamic layers.
238 *
239 * Unfortunately we can't compare dynamic layer types as they contain allocated pointers,
240 * which burns CPU cycles looking for duplicate data that doesn't exist.
241 * The array data isn't comparable once copied from the mesh,
242 * this bottlenecks on high poly meshes, see #84114.
243 *
244 * Ideally the data would be expanded into a format that could be de-duplicated effectively,
245 * this would require a flat representation of each dynamic custom-data layer.
246 *
247 * Instead, these non-trivial custom data layer are stored in the undo system using implicit
248 * sharing, to avoid the copy from the undo mesh.
249 */
250 const bool layer_type_is_dynamic = CustomData_layertype_is_dynamic(type);
251
252 layer_end = layer_start + 1;
253 while ((layer_end < cdata->totlayer) && (type == cdata->layers[layer_end].type)) {
254 layer_end++;
255 }
256
257 const int stride = CustomData_sizeof(type);
258 BArrayStore *bs = create ? BLI_array_store_at_size_ensure(&um_arraystore.bs_stride[bs_index],
259 stride,
260 array_chunk_size_calc(stride)) :
261 nullptr;
262 const int layer_len = layer_end - layer_start;
263
264 if (create) {
265 if (bcd_reference_current && (bcd_reference_current->type == type)) {
266 /* common case, the reference is aligned */
267 }
268 else {
269 bcd_reference_current = nullptr;
270
271 /* Do a full lookup when unaligned. */
272 if (bcd_reference) {
273 const BArrayCustomData *bcd_iter = bcd_reference;
274 while (bcd_iter) {
275 if (bcd_iter->type == type) {
276 bcd_reference_current = bcd_iter;
277 break;
278 }
279 bcd_iter = bcd_iter->next;
280 }
281 }
282 }
283 }
284
285 if (create) {
286 bcd = MEM_new<BArrayCustomData>(__func__);
287 bcd->next = nullptr;
288 bcd->type = type;
289 bcd->states.reinitialize(layer_end - layer_start);
290
291 if (bcd_prev) {
292 bcd_prev->next = bcd;
293 bcd_prev = bcd;
294 }
295 else {
296 bcd_first = bcd;
297 bcd_prev = bcd;
298 }
299 }
300
301 CustomDataLayer *layer = &cdata->layers[layer_start];
302 for (int i = 0; i < layer_len; i++, layer++) {
303 if (create) {
304 if (layer->data) {
305 if (layer_type_is_dynamic) {
306 /* See comment on `layer_type_is_dynamic` above. */
307 const ImplicitSharingInfo *sharing_info;
308 if (layer->sharing_info) {
309 sharing_info = layer->sharing_info;
310 sharing_info->add_user();
311 }
312 else {
313 sharing_info = implicit_sharing::info_for_mem_free(layer->data);
314 }
315 bcd->states[i] = ImplicitSharingInfoAndData{sharing_info, layer->data};
316 }
317 else {
318 BArrayState *state_reference = nullptr;
319 if (bcd_reference_current && i < bcd_reference_current->states.size()) {
320 state_reference = std::get<BArrayState *>(bcd_reference_current->states[i]);
321 }
322
323 void *data_final = layer->data;
324 size_t data_final_size = size_t(data_len) * stride;
325
326# ifdef USE_ARRAY_STORE_RLE
327 const bool use_rle = um_customdata_layer_use_rle(bcd);
328 uint8_t *data_enc = nullptr;
329 if (use_rle) {
330 /* Store the size in the encoded data (for convenience). */
331 size_t data_enc_extra_size = sizeof(size_t);
332 size_t data_enc_len;
333 data_enc = BLI_array_store_rle_encode(reinterpret_cast<const uint8_t *>(data_final),
334 data_final_size,
335 data_enc_extra_size,
336 &data_enc_len);
337 memcpy(data_enc, &data_final_size, data_enc_extra_size);
338 data_final = data_enc;
339 data_final_size = data_enc_extra_size + data_enc_len;
340 }
341# endif
342
343 bcd->states[i] = {
344 BLI_array_store_state_add(bs, data_final, data_final_size, state_reference),
345 };
346
347# ifdef USE_ARRAY_STORE_RLE
348 if (use_rle) {
349 MEM_freeN(data_enc);
350 }
351# endif
352 }
353 }
354 else {
355 bcd->states[i] = nullptr;
356 }
357 }
358
359 if (layer->data) {
360 if (layer->sharing_info) {
361 layer->sharing_info->remove_user_and_delete_if_last();
362 layer->sharing_info = nullptr;
363 layer->data = nullptr;
364 }
365 else {
366 MEM_SAFE_FREE(layer->data);
367 }
368 }
369 }
370
371 if (create) {
372 if (bcd_reference_current) {
373 bcd_reference_current = bcd_reference_current->next;
374 }
375 }
376 }
377
378 if (create) {
379 *r_bcd_first = bcd_first;
380 }
381}
382
388 CustomData *cdata,
389 const size_t data_len)
390{
391 using namespace blender;
392 CustomDataLayer *layer = cdata->layers;
393 while (bcd) {
394 const int stride = CustomData_sizeof(bcd->type);
395 for (int i = 0; i < bcd->states.size(); i++) {
396 BLI_assert(bcd->type == layer->type);
397 if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
398 BArrayState *state = std::get<BArrayState *>(bcd->states[i]);
399 if (state) {
400 size_t state_len;
402
403# ifdef USE_ARRAY_STORE_RLE
404 const bool use_rle = um_customdata_layer_use_rle(bcd);
405 if (use_rle) {
406 /* Store the size in the encoded data (for convenience). */
407 size_t data_enc_extra_size = sizeof(size_t);
408 const uint8_t *data_enc = reinterpret_cast<uint8_t *>(data);
409 size_t data_dec_len;
410 memcpy(&data_dec_len, data_enc, sizeof(size_t));
411 uint8_t *data_dec = MEM_malloc_arrayN<uint8_t>(data_dec_len, __func__);
412 BLI_array_store_rle_decode(data_enc + data_enc_extra_size,
413 state_len - data_enc_extra_size,
414 data_dec,
415 data_dec_len);
417 data = static_cast<void *>(data_dec);
418 /* Just for the assert to succeed. */
419 state_len = data_dec_len;
420 }
421# endif
422
423 layer->data = data;
425 BLI_assert(stride * data_len == state_len);
426 UNUSED_VARS_NDEBUG(stride, data_len);
427 }
428 else {
429 layer->data = nullptr;
430 }
431 }
432 else {
433 ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
434 layer->data = const_cast<void *>(state.data);
435 layer->sharing_info = state.sharing_info;
436 layer->sharing_info->add_user();
437 }
438 layer++;
439 }
440 bcd = bcd->next;
441 }
442}
443
444static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
445{
446 using namespace blender;
447 while (bcd) {
448 BArrayCustomData *bcd_next = bcd->next;
449 const int stride = CustomData_sizeof(bcd->type);
450 BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride[bs_index], stride);
451 for (int i = 0; i < bcd->states.size(); i++) {
452 if (std::holds_alternative<BArrayState *>(bcd->states[i])) {
453 if (BArrayState *state = std::get<BArrayState *>(bcd->states[i])) {
455 }
456 }
457 else {
458 ImplicitSharingInfoAndData state = std::get<ImplicitSharingInfoAndData>(bcd->states[i]);
459 state.sharing_info->remove_user_and_delete_if_last();
460 }
461 }
462 MEM_delete(bcd);
463 bcd = bcd_next;
464 }
465}
466
472static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool create)
473{
474 Mesh *mesh = um->mesh;
475
476 /* Compacting can be time consuming, run in parallel.
477 *
478 * NOTE(@ideasman42): this could be further parallelized with every custom-data layer
479 * running in its own thread. If this is a bottleneck it's worth considering.
480 * At the moment it seems fast enough to split by domain.
481 * Since this is itself a background thread, using too many threads here could
482 * interfere with foreground tasks. */
484 4096 < (mesh->verts_num + mesh->edges_num + mesh->corners_num + mesh->faces_num),
485 [&]() {
486 um_arraystore_cd_compact(&mesh->vert_data,
487 mesh->verts_num,
488 create,
489 ARRAY_STORE_INDEX_VERT,
490 um_ref ? um_ref->store.vdata : nullptr,
491 &um->store.vdata);
492 },
493 [&]() {
494 um_arraystore_cd_compact(&mesh->edge_data,
495 mesh->edges_num,
496 create,
497 ARRAY_STORE_INDEX_EDGE,
498 um_ref ? um_ref->store.edata : nullptr,
499 &um->store.edata);
500 },
501 [&]() {
502 um_arraystore_cd_compact(&mesh->corner_data,
503 mesh->corners_num,
504 create,
505 ARRAY_STORE_INDEX_LOOP,
506 um_ref ? um_ref->store.ldata : nullptr,
507 &um->store.ldata);
508 },
509 [&]() {
510 um_arraystore_cd_compact(&mesh->face_data,
511 mesh->faces_num,
512 create,
513 ARRAY_STORE_INDEX_POLY,
514 um_ref ? um_ref->store.pdata : nullptr,
515 &um->store.pdata);
516 },
517 [&]() {
518 if (mesh->face_offset_indices) {
519 BLI_assert(create == (um->store.face_offset_indices == nullptr));
520 if (create) {
521 BArrayState *state_reference = um_ref ? um_ref->store.face_offset_indices : nullptr;
522 const size_t stride = sizeof(*mesh->face_offset_indices);
523 BArrayStore *bs = BLI_array_store_at_size_ensure(
524 &um_arraystore.bs_stride[ARRAY_STORE_INDEX_POLY_OFFSETS],
525 stride,
526 array_chunk_size_calc(stride));
527 um->store.face_offset_indices = BLI_array_store_state_add(bs,
528 mesh->face_offset_indices,
529 size_t(mesh->faces_num + 1) *
530 stride,
531 state_reference);
532 }
533 blender::implicit_sharing::free_shared_data(&mesh->face_offset_indices,
534 &mesh->runtime->face_offsets_sharing_info);
535 }
536 },
537 [&]() {
538 if (mesh->key && mesh->key->totkey) {
539 const size_t stride = mesh->key->elemsize;
542 stride,
543 array_chunk_size_calc(stride)) :
544 nullptr;
545 if (create) {
546 um->store.keyblocks = static_cast<BArrayState **>(
547 MEM_mallocN(mesh->key->totkey * sizeof(*um->store.keyblocks), __func__));
548 }
549 KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
550 for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
551 if (create) {
552 BArrayState *state_reference = (um_ref && um_ref->mesh->key &&
553 (i < um_ref->mesh->key->totkey)) ?
554 um_ref->store.keyblocks[i] :
555 nullptr;
556 um->store.keyblocks[i] = BLI_array_store_state_add(
557 bs, keyblock->data, size_t(keyblock->totelem) * stride, state_reference);
558 }
559
560 if (keyblock->data) {
561 MEM_freeN(keyblock->data);
562 keyblock->data = nullptr;
563 }
564 }
565 }
566 },
567 [&]() {
568 if (mesh->mselect && mesh->totselect) {
569 BLI_assert(create == (um->store.mselect == nullptr));
570 if (create) {
571 BArrayState *state_reference = um_ref ? um_ref->store.mselect : nullptr;
572 const size_t stride = sizeof(*mesh->mselect);
575 stride,
576 array_chunk_size_calc(stride));
577 um->store.mselect = BLI_array_store_state_add(
578 bs, mesh->mselect, size_t(mesh->totselect) * stride, state_reference);
579 }
580
581 /* keep mesh->totselect for validation */
582 MEM_freeN(mesh->mselect);
583 mesh->mselect = nullptr;
584 }
585 });
586
587 if (create) {
588 um_arraystore.users += 1;
589 }
590}
591
595static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
596{
597 um_arraystore_compact_ex(um, um_ref, true);
598}
599
600static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
601{
602# ifdef DEBUG_PRINT
603 size_t size_expanded_prev = 0, size_compacted_prev = 0;
604
605 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
606 size_t size_expanded_prev_iter, size_compacted_prev_iter;
608 &um_arraystore.bs_stride[bs_index], &size_expanded_prev_iter, &size_compacted_prev_iter);
609 size_expanded_prev += size_expanded_prev_iter;
610 size_compacted_prev += size_compacted_prev_iter;
611 }
612# endif
613
614# ifdef DEBUG_TIME
615 TIMEIT_START(mesh_undo_compact);
616# endif
617
618 um_arraystore_compact(um, um_ref);
619
620# ifdef DEBUG_TIME
621 TIMEIT_END(mesh_undo_compact);
622# endif
623
624# ifdef DEBUG_PRINT
625 {
626 size_t size_expanded = 0, size_compacted = 0;
627
628 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
629 size_t size_expanded_iter, size_compacted_iter;
631 &um_arraystore.bs_stride[bs_index], &size_expanded_iter, &size_compacted_iter);
632 size_expanded += size_expanded_iter;
633 size_compacted += size_compacted_iter;
634 }
635
636 const double percent_total = size_expanded ?
637 ((double(size_compacted) / double(size_expanded)) * 100.0) :
638 -1.0;
639
640 size_t size_expanded_step = size_expanded - size_expanded_prev;
641 size_t size_compacted_step = size_compacted - size_compacted_prev;
642 const double percent_step = size_expanded_step ?
643 ((double(size_compacted_step) / double(size_expanded_step)) *
644 100.0) :
645 -1.0;
646
647 printf("overall memory use: %.8f%% of expanded size\n", percent_total);
648 printf("step memory use: %.8f%% of expanded size\n", percent_step);
649 }
650# endif
651}
652
653# ifdef USE_ARRAY_STORE_THREAD
654
657 const UndoMesh *um_ref; /* can be nullptr */
658};
659static void um_arraystore_compact_cb(TaskPool *__restrict /*pool*/, void *taskdata)
660{
661 UMArrayData *um_data = static_cast<UMArrayData *>(taskdata);
662 um_arraystore_compact_with_info(um_data->um, um_data->um_ref);
663}
664
665# endif /* USE_ARRAY_STORE_THREAD */
666
671{
672 um_arraystore_compact_ex(um, nullptr, false);
673}
674
676{
677 Mesh *mesh = um->mesh;
678
683
684 if (um->store.keyblocks) {
685 const size_t stride = mesh->key->elemsize;
686 KeyBlock *keyblock = static_cast<KeyBlock *>(mesh->key->block.first);
687 for (int i = 0; i < mesh->key->totkey; i++, keyblock = keyblock->next) {
689 size_t state_len;
690 keyblock->data = BLI_array_store_state_data_get_alloc(state, &state_len);
691 BLI_assert(keyblock->totelem == (state_len / stride));
692 UNUSED_VARS_NDEBUG(stride);
693 }
694 }
695
696 if (um->store.face_offset_indices) {
697 const size_t stride = sizeof(*mesh->face_offset_indices);
699 size_t state_len;
700 mesh->face_offset_indices = static_cast<int *>(
702 mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
703 mesh->face_offset_indices);
704 BLI_assert((mesh->faces_num + 1) == (state_len / stride));
705 UNUSED_VARS_NDEBUG(stride);
706 }
707 if (um->store.mselect) {
708 const size_t stride = sizeof(*mesh->mselect);
710 size_t state_len;
711 mesh->mselect = static_cast<MSelect *>(
713 BLI_assert(mesh->totselect == (state_len / stride));
714 UNUSED_VARS_NDEBUG(stride);
715 }
716}
717
719{
720 Mesh *mesh = um->mesh;
721
726
727 if (um->store.keyblocks) {
728 const size_t stride = mesh->key->elemsize;
730 &um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE], stride);
731 for (int i = 0; i < mesh->key->totkey; i++) {
734 }
736 um->store.keyblocks = nullptr;
737 }
738
739 if (um->store.face_offset_indices) {
740 const size_t stride = sizeof(*mesh->face_offset_indices);
745 um->store.face_offset_indices = nullptr;
746 }
747 if (um->store.mselect) {
748 const size_t stride = sizeof(*mesh->mselect);
750 stride);
753 um->store.mselect = nullptr;
754 }
755
756 um_arraystore.users -= 1;
757
758 BLI_assert(um_arraystore.users >= 0);
759
760 if (um_arraystore.users == 0) {
761# ifdef DEBUG_PRINT
762 printf("mesh undo store: freeing all data!\n");
763# endif
764 for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
765 BLI_array_store_at_size_clear(&um_arraystore.bs_stride[bs_index]);
766 }
767# ifdef USE_ARRAY_STORE_THREAD
769 um_arraystore.task_pool = nullptr;
770# endif
771 }
772}
773
775
776/* -------------------------------------------------------------------- */
779
792{
793 /* Map: `Mesh.id.session_uid` -> `UndoMesh`. */
794 GHash *uuid_map = BLI_ghash_ptr_new_ex(__func__, object_len);
795 UndoMesh **um_references = MEM_calloc_arrayN<UndoMesh *>(object_len, __func__);
796 for (int i = 0; i < object_len; i++) {
797 const Mesh *mesh = static_cast<const Mesh *>(object[i]->data);
798 BLI_ghash_insert(uuid_map, POINTER_FROM_INT(mesh->id.session_uid), &um_references[i]);
799 }
800 int uuid_map_len = object_len;
801
802 /* Loop backwards over all previous mesh undo data until either:
803 * - All elements have been found (where `um_references` we'll have every element set).
804 * - There are no undo steps left to look for. */
805 UndoMesh *um_iter = static_cast<UndoMesh *>(um_arraystore.local_links.last);
806 while (um_iter && (uuid_map_len != 0)) {
807 UndoMesh **um_p;
808 if ((um_p = static_cast<UndoMesh **>(BLI_ghash_popkey(
809 uuid_map, POINTER_FROM_INT(um_iter->mesh->id.session_uid), nullptr))))
810 {
811 *um_p = um_iter;
812 uuid_map_len--;
813 }
814 um_iter = um_iter->local_prev;
815 }
816 BLI_assert(uuid_map_len == BLI_ghash_len(uuid_map));
817 BLI_ghash_free(uuid_map, nullptr, nullptr);
818 if (uuid_map_len == object_len) {
819 MEM_freeN(um_references);
820 um_references = nullptr;
821 }
822 return um_references;
823}
824
826
827#endif /* USE_ARRAY_STORE */
828
829/* for callbacks */
830/* undo simply makes copies of a bmesh */
840 BMEditMesh *em,
841 Key *key,
842 const ListBase *vertex_group_names,
843 const int vertex_group_active_index,
844 UndoMesh *um_ref)
845{
847#ifdef USE_ARRAY_STORE_THREAD
848 /* changes this waits is low, but must have finished */
849 if (um_arraystore.task_pool) {
851 }
852#endif
853
855
856 /* make sure shape keys work */
857 if (key != nullptr) {
858 um->mesh->key = (Key *)BKE_id_copy_ex(
859 nullptr, &key->id, nullptr, LIB_ID_COPY_LOCALIZE | LIB_ID_COPY_NO_ANIMDATA);
860 }
861 else {
862 um->mesh->key = nullptr;
863 }
864
865 /* Uncomment for troubleshooting. */
866 // BM_mesh_validate(em->bm);
867
868 CustomData_MeshMasks cd_mask_extra{};
869 cd_mask_extra.vmask = CD_MASK_SHAPE_KEYINDEX;
871 /* Undo code should not be manipulating 'G_MAIN->object' hooks/vertex-parent. */
872 params.calc_object_remap = false;
873 params.update_shapekey_indices = false;
874 params.cd_mask_extra = cd_mask_extra;
875 params.active_shapekey_to_mvert = true;
876 BM_mesh_bm_to_me(nullptr, em->bm, um->mesh, &params);
877 BKE_defgroup_copy_list(&um->mesh->vertex_group_names, vertex_group_names);
878 um->mesh->vertex_group_active_index = vertex_group_active_index;
879
880 um->selectmode = em->selectmode;
881 um->shapenr = em->bm->shapenr;
882
883#ifdef USE_ARRAY_STORE
884 {
885 /* Add ourselves. */
886 BLI_addtail(&um_arraystore.local_links, um);
887
888# ifdef USE_ARRAY_STORE_THREAD
889 if (um_arraystore.task_pool == nullptr) {
891 }
892
893 UMArrayData *um_data = MEM_mallocN<UMArrayData>(__func__);
894 um_data->um = um;
895 um_data->um_ref = um_ref;
896
897 BLI_task_pool_push(um_arraystore.task_pool, um_arraystore_compact_cb, um_data, true, nullptr);
898# else
900# endif
901 }
902#else
903 UNUSED_VARS(um_ref);
904#endif
905
906 return um;
907}
908
918 BMEditMesh *em,
919 ListBase *vertex_group_names,
920 int *vertex_group_active_index)
921{
922 BMEditMesh *em_tmp;
923 BMesh *bm;
924
925#ifdef USE_ARRAY_STORE
926# ifdef USE_ARRAY_STORE_THREAD
927 /* changes this waits is low, but must have finished */
929# endif
930
931# ifdef DEBUG_TIME
932 TIMEIT_START(mesh_undo_expand);
933# endif
934
936
937# ifdef DEBUG_TIME
938 TIMEIT_END(mesh_undo_expand);
939# endif
940#endif /* USE_ARRAY_STORE */
941
942 const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(um->mesh);
943
944 em->bm->shapenr = um->shapenr;
945
947
948 BMeshCreateParams create_params{};
949 create_params.use_toolflags = true;
950 bm = BM_mesh_create(&allocsize, &create_params);
951
952 BMeshFromMeshParams convert_params{};
953 /* Handled with tessellation. */
954 convert_params.calc_face_normal = false;
955 convert_params.calc_vert_normal = false;
956 convert_params.active_shapekey = um->shapenr;
957 BM_mesh_bm_from_me(bm, um->mesh, &convert_params);
958 BLI_freelistN(vertex_group_names);
959 BKE_defgroup_copy_list(vertex_group_names, &um->mesh->vertex_group_names);
960 *vertex_group_active_index = um->mesh->vertex_group_active_index;
961
962 em_tmp = BKE_editmesh_create(bm);
963 *em = *em_tmp;
964
965 /* Calculate face normals and tessellation at once since it's multi-threaded. */
967
968 em->selectmode = um->selectmode;
969 bm->selectmode = um->selectmode;
970
971 bm->spacearr_dirty = BM_SPACEARR_DIRTY_ALL;
972
973 MEM_delete(em_tmp);
974
975#ifdef USE_ARRAY_STORE
977#endif
978}
979
981{
982 Mesh *mesh = um->mesh;
983
984#ifdef USE_ARRAY_STORE
985
986# ifdef USE_ARRAY_STORE_THREAD
987 /* Chances this waits is low, but must have finished. */
989# endif
990
991 /* We need to expand so any allocations in custom-data are freed with the mesh. */
993
994 BLI_assert(BLI_findindex(&um_arraystore.local_links, um) != -1);
995 BLI_remlink(&um_arraystore.local_links, um);
996
998#endif
999
1000 if (mesh->key) {
1001 BKE_id_free(nullptr, mesh->key);
1002 mesh->key = nullptr;
1003 }
1004
1005 BKE_id_free(nullptr, mesh);
1006 um->mesh = nullptr;
1007}
1008
1010{
1011 Scene *scene = CTX_data_scene(C);
1012 ViewLayer *view_layer = CTX_data_view_layer(C);
1013 BKE_view_layer_synced_ensure(scene, view_layer);
1014 Object *obedit = BKE_view_layer_edit_object_get(view_layer);
1015 if (obedit && obedit->type == OB_MESH) {
1016 const Mesh *mesh = static_cast<Mesh *>(obedit->data);
1017 if (mesh->runtime->edit_mesh != nullptr) {
1018 return obedit;
1019 }
1020 }
1021 return nullptr;
1022}
1023
1025
1026/* -------------------------------------------------------------------- */
1031
1033 UndoRefID_Object obedit_ref;
1035};
1036
1044
1046{
1047 return editmesh_object_from_context(C) != nullptr;
1048}
1049
1051{
1052 MeshUndoStep *us = (MeshUndoStep *)us_p;
1053
1054 /* Important not to use the 3D view when getting objects because all objects
1055 * outside of this list will be moved out of edit-mode when reading back undo steps. */
1056 Scene *scene = CTX_data_scene(C);
1057 ViewLayer *view_layer = CTX_data_view_layer(C);
1058 const ToolSettings *ts = scene->toolsettings;
1060
1061 us->scene_ref.ptr = scene;
1062 us->elems = MEM_calloc_arrayN<MeshUndoStep_Elem>(objects.size(), __func__);
1063 us->elems_len = objects.size();
1064
1065 UndoMesh **um_references = nullptr;
1066
1067#ifdef USE_ARRAY_STORE
1068 um_references = mesh_undostep_reference_elems_from_objects(objects.data(), objects.size());
1069#endif
1070
1071 for (uint i = 0; i < objects.size(); i++) {
1072 Object *obedit = objects[i];
1073 MeshUndoStep_Elem *elem = &us->elems[i];
1074
1075 elem->obedit_ref.ptr = obedit;
1076 Mesh *mesh = static_cast<Mesh *>(elem->obedit_ref.ptr->data);
1077 BMEditMesh *em = mesh->runtime->edit_mesh.get();
1079 em,
1080 mesh->key,
1081 &mesh->vertex_group_names,
1083 um_references ? um_references[i] : nullptr);
1084
1085 em->needs_flush_to_id = 1;
1086 us->step.data_size += elem->data.undo_size;
1087 elem->data.uv_selectmode = ts->uv_selectmode;
1088
1089#ifdef USE_ARRAY_STORE
1091 elem->data.mesh->id.session_uid = mesh->id.session_uid;
1092#endif
1093 }
1094
1095 if (um_references != nullptr) {
1096 MEM_freeN(um_references);
1097 }
1098
1099 bmain->is_memfile_undo_flush_needed = true;
1100
1101 return true;
1102}
1103
1105 bContext *C, Main *bmain, UndoStep *us_p, const eUndoStepDir /*dir*/, bool /*is_final*/)
1106{
1107 MeshUndoStep *us = (MeshUndoStep *)us_p;
1108 Scene *scene = CTX_data_scene(C);
1109 ViewLayer *view_layer = CTX_data_view_layer(C);
1110
1112 CTX_wm_manager(C), us->scene_ref.ptr, &scene, &view_layer);
1114 scene, view_layer, &us->elems[0].obedit_ref.ptr, us->elems_len, sizeof(*us->elems));
1115
1117
1118 for (uint i = 0; i < us->elems_len; i++) {
1119 MeshUndoStep_Elem *elem = &us->elems[i];
1120 Object *obedit = elem->obedit_ref.ptr;
1121 Mesh *mesh = static_cast<Mesh *>(obedit->data);
1122 if (mesh->runtime->edit_mesh == nullptr) {
1123 /* Should never fail, may not crash but can give odd behavior. */
1124 CLOG_ERROR(&LOG,
1125 "name='%s', failed to enter edit-mode for object '%s', undo state invalid",
1126 us_p->name,
1127 obedit->id.name);
1128 continue;
1129 }
1130 BMEditMesh *em = mesh->runtime->edit_mesh.get();
1132 &elem->data, em, &mesh->vertex_group_names, &mesh->vertex_group_active_index);
1133
1134 obedit->shapenr = em->bm->shapenr;
1135
1136 em->needs_flush_to_id = 1;
1138 /* The object update tag is necessary to cause modifiers to reevaluate after vertex group
1139 * changes. */
1141 }
1142
1143 /* The first element is always active */
1145 scene, view_layer, us->elems[0].obedit_ref.ptr, us_p->name, &LOG);
1146
1147 /* Check after setting active (unless undoing into another scene). */
1149
1150 scene->toolsettings->selectmode = us->elems[0].data.selectmode;
1152
1153 bmain->is_memfile_undo_flush_needed = true;
1154
1156}
1157
1159{
1160 MeshUndoStep *us = (MeshUndoStep *)us_p;
1161
1162 for (uint i = 0; i < us->elems_len; i++) {
1163 MeshUndoStep_Elem *elem = &us->elems[i];
1164 undomesh_free_data(&elem->data);
1165 }
1166 MEM_freeN(us->elems);
1167}
1168
1170 UndoTypeForEachIDRefFn foreach_ID_ref_fn,
1171 void *user_data)
1172{
1173 MeshUndoStep *us = (MeshUndoStep *)us_p;
1174
1175 foreach_ID_ref_fn(user_data, ((UndoRefID *)&us->scene_ref));
1176 for (uint i = 0; i < us->elems_len; i++) {
1177 MeshUndoStep_Elem *elem = &us->elems[i];
1178 foreach_ID_ref_fn(user_data, ((UndoRefID *)&elem->obedit_ref));
1179 }
1180}
1181
1196
Scene * CTX_data_scene(const bContext *C)
wmWindowManager * CTX_wm_manager(const bContext *C)
ViewLayer * CTX_data_view_layer(const bContext *C)
CustomData interface, see also DNA_customdata_types.h.
int CustomData_sizeof(eCustomDataType type)
bool CustomData_layertype_is_dynamic(eCustomDataType type)
support for deformation groups and hooks.
void BKE_defgroup_copy_list(ListBase *outbase, const ListBase *inbase)
Definition deform.cc:71
BMEditMesh * BKE_editmesh_create(BMesh *bm)
Definition editmesh.cc:32
void BKE_editmesh_looptris_and_normals_calc(BMEditMesh *em)
Definition editmesh.cc:95
void BKE_view_layer_synced_ensure(const Scene *scene, ViewLayer *view_layer)
Object * BKE_view_layer_edit_object_get(const ViewLayer *view_layer)
void BKE_id_free(Main *bmain, void *idv)
@ LIB_ID_COPY_LOCALIZE
@ LIB_ID_COPY_NO_ANIMDATA
ID * BKE_id_copy_ex(Main *bmain, const ID *id, ID **new_id_p, int flag)
Definition lib_id.cc:767
General operations, lookup, etc. for blender objects.
bool BKE_object_is_in_editmode(const Object *ob)
@ UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE
void(*)(void *user_data, UndoRefID *id_ref) UndoTypeForEachIDRefFn
eUndoStepDir
Efficient in-memory storage of multiple similar arrays.
BArrayState * BLI_array_store_state_add(BArrayStore *bs, const void *data, size_t data_len, const BArrayState *state_reference)
void BLI_array_store_state_remove(BArrayStore *bs, BArrayState *state)
void * BLI_array_store_state_data_get_alloc(const BArrayState *state, size_t *r_data_len)
void BLI_array_store_rle_decode(const uint8_t *data_enc, const size_t data_enc_len, void *data_dec_v, const size_t data_dec_len)
uint8_t * BLI_array_store_rle_encode(const uint8_t *data_dec, size_t data_dec_len, size_t data_enc_extra_size, size_t *r_data_enc_len)
struct BArrayStore * BLI_array_store_at_size_ensure(struct BArrayStore_AtSize *bs_stride, int stride, int chunk_size)
void BLI_array_store_at_size_clear(struct BArrayStore_AtSize *bs_stride)
void BLI_array_store_at_size_calc_memory_usage(const struct BArrayStore_AtSize *bs_stride, size_t *r_size_expanded, size_t *r_size_compacted)
struct BArrayStore * BLI_array_store_at_size_get(struct BArrayStore_AtSize *bs_stride, int stride)
Generic array manipulation API.
#define BLI_array_is_zeroed(arr, arr_len)
#define BLI_assert(a)
Definition BLI_assert.h:46
void * BLI_ghash_popkey(GHash *gh, const void *key, GHashKeyFreeFP keyfreefp) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.cc:802
GHash * BLI_ghash_ptr_new_ex(const char *info, unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
unsigned int BLI_ghash_len(const GHash *gh) ATTR_WARN_UNUSED_RESULT
Definition BLI_ghash.cc:702
void BLI_ghash_insert(GHash *gh, void *key, void *val)
Definition BLI_ghash.cc:707
void BLI_ghash_free(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp)
Definition BLI_ghash.cc:860
int BLI_findindex(const ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:586
void void BLI_freelistN(ListBase *listbase) ATTR_NONNULL(1)
Definition listbase.cc:497
void BLI_addtail(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:111
void BLI_remlink(ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition listbase.cc:131
MINLINE int power_of_2_max_i(int n)
unsigned int uint
@ TASK_PRIORITY_LOW
Definition BLI_task.h:52
void BLI_task_pool_work_and_wait(TaskPool *pool)
Definition task_pool.cc:531
TaskPool * BLI_task_pool_create_background(void *userdata, eTaskPriority priority)
Definition task_pool.cc:485
void BLI_task_pool_free(TaskPool *pool)
Definition task_pool.cc:517
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Definition task_pool.cc:522
Utility defines for timing/benchmarks.
#define TIMEIT_START(var)
#define TIMEIT_END(var)
#define UNUSED_VARS(...)
#define POINTER_FROM_INT(i)
#define UNUSED_VARS_NDEBUG(...)
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:182
void DEG_id_tag_update(ID *id, unsigned int flags)
@ ID_RECALC_GEOMETRY
Definition DNA_ID.h:982
Object is a sort of wrapper for general info.
@ OB_MESH
void EDBM_mesh_free_data(BMEditMesh *em)
void ED_undo_object_set_active_or_warn(Scene *scene, ViewLayer *view_layer, Object *ob, const char *info, CLG_LogRef *log)
Definition ed_undo.cc:774
void ED_undo_object_editmode_restore_helper(Scene *scene, ViewLayer *view_layer, Object **object_array, uint object_array_len, uint object_array_stride)
Definition ed_undo.cc:810
blender::Vector< Object * > ED_undo_editmode_objects_from_view_layer(const Scene *scene, ViewLayer *view_layer)
Definition ed_undo.cc:855
void ED_undo_object_editmode_validate_scene_from_windows(wmWindowManager *wm, const Scene *scene_ref, Scene **scene_p, ViewLayer **view_layer_p)
Definition ed_undo.cc:793
Read Guarded memory(de)allocation.
#define C
Definition RandGen.cpp:29
#define NC_GEOM
Definition WM_types.hh:390
#define ND_DATA
Definition WM_types.hh:506
@ BM_SPACEARR_DIRTY_ALL
BMesh const char void * data
BMesh * bm
BMesh * BM_mesh_create(const BMAllocTemplate *allocsize, const BMeshCreateParams *params)
BMesh Make Mesh.
#define BMALLOC_TEMPLATE_FROM_ME(...)
void BM_mesh_bm_from_me(BMesh *bm, const Mesh *mesh, const BMeshFromMeshParams *params)
void BM_mesh_bm_to_me(Main *bmain, BMesh *bm, Mesh *mesh, const BMeshToMeshParams *params)
int64_t size() const
Definition BLI_array.hh:245
void reinitialize(const int64_t new_size)
Definition BLI_array.hh:398
int64_t size() const
ListBase local_links
int users
BArrayStore_AtSize bs_stride
static Object * editmesh_object_from_context(bContext *C)
static void um_arraystore_expand(UndoMesh *um)
static void um_arraystore_compact_cb(TaskPool *__restrict, void *taskdata)
static void um_arraystore_free(UndoMesh *um)
static void undomesh_free_data(UndoMesh *um)
static void * undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key, const ListBase *vertex_group_names, const int vertex_group_active_index, UndoMesh *um_ref)
static void undomesh_to_editmesh(UndoMesh *um, BMEditMesh *em, ListBase *vertex_group_names, int *vertex_group_active_index)
static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
#define ARRAY_CHUNK_NUM_MIN
static bool mesh_undosys_step_encode(bContext *C, Main *bmain, UndoStep *us_p)
#define ARRAY_CHUNK_SIZE_IN_BYTES
void ED_mesh_undosys_type(UndoType *ut)
static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
static struct @060174143312344215311261256220167117024134050205 um_arraystore
#define ARRAY_STORE_INDEX_NUM
static size_t array_chunk_size_calc(const size_t stride)
TaskPool * task_pool
static void um_arraystore_expand_clear(UndoMesh *um)
static void um_arraystore_cd_expand(const BArrayCustomData *bcd, CustomData *cdata, const size_t data_len)
@ ARRAY_STORE_INDEX_LOOP
@ ARRAY_STORE_INDEX_VERT
@ ARRAY_STORE_INDEX_POLY_OFFSETS
@ ARRAY_STORE_INDEX_SHAPE
@ ARRAY_STORE_INDEX_MSEL
@ ARRAY_STORE_INDEX_EDGE
@ ARRAY_STORE_INDEX_POLY
static bool mesh_undosys_poll(bContext *C)
static void mesh_undosys_foreach_ID_ref(UndoStep *us_p, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
static bool um_customdata_layer_use_rle(const BArrayCustomData *bcd)
static void mesh_undosys_step_free(UndoStep *us_p)
static void mesh_undosys_step_decode(bContext *C, Main *bmain, UndoStep *us_p, const eUndoStepDir, bool)
static UndoMesh ** mesh_undostep_reference_elems_from_objects(Object **object, int object_len)
static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
static void um_arraystore_cd_compact(CustomData *cdata, const size_t data_len, const bool create, const int bs_index, const BArrayCustomData *bcd_reference, BArrayCustomData **r_bcd_first)
static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool create)
#define printf(...)
#define MEM_SAFE_FREE(v)
#define CD_MASK_SHAPE_KEYINDEX
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
#define LOG(severity)
Definition log.h:32
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static ulong state[N]
std::unique_ptr< IDProperty, IDPropertyDeleter > create(StringRef prop_name, int32_t value, eIDPropertyFlag flags={})
Allocate a new IDProperty of type IDP_INT, set its name and value.
Mesh * mesh_new_no_attributes(int verts_num, int edges_num, int faces_num, int corners_num)
const ImplicitSharingInfo * info_for_mem_free(void *data)
void parallel_invoke(Functions &&...functions)
Definition BLI_task.hh:221
BArrayCustomData * next
blender::Array< std::variant< BArrayState *, blender::ImplicitSharingInfoAndData > > states
eCustomDataType type
short selectmode
char needs_flush_to_id
int shapenr
const ImplicitSharingInfoHandle * sharing_info
CustomDataLayer * layers
char name[66]
Definition DNA_ID.h:415
unsigned int session_uid
Definition DNA_ID.h:444
struct KeyBlock * next
void * data
int totkey
int elemsize
ListBase block
void * first
bool is_memfile_undo_flush_needed
Definition BKE_main.hh:185
UndoRefID_Object obedit_ref
MeshUndoStep_Elem * elems
UndoRefID_Scene scene_ref
int corners_num
CustomData edge_data
int edges_num
MeshRuntimeHandle * runtime
CustomData corner_data
CustomData face_data
ListBase vertex_group_names
int * face_offset_indices
CustomData vert_data
int vertex_group_active_index
struct Key * key
int totselect
int faces_num
struct MSelect * mselect
int verts_num
struct ToolSettings * toolsettings
const UndoMesh * um_ref
UndoMesh * um
size_t undo_size
UndoMesh * local_prev
BArrayState * mselect
BArrayCustomData * ldata
BArrayState * face_offset_indices
BArrayCustomData * vdata
BArrayState ** keyblocks
UndoMesh * local_next
BArrayCustomData * pdata
struct UndoMesh::@241132136031332173343267200163210323165327054204 store
char uv_selectmode
BArrayCustomData * edata
size_t data_size
char name[64]
void(* step_foreach_ID_ref)(UndoStep *us, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
const char * name
void(* step_free)(UndoStep *us)
bool(* poll)(struct bContext *C)
void(* step_decode)(bContext *C, Main *bmain, UndoStep *us, eUndoStepDir dir, bool is_final)
bool(* step_encode)(bContext *C, Main *bmain, UndoStep *us)
i
Definition text_draw.cc:230
void WM_event_add_notifier(const bContext *C, uint type, void *reference)