Blender  V2.93
undo_system.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16 
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "CLG_log.h"
27 
28 #include "BLI_listbase.h"
29 #include "BLI_string.h"
30 #include "BLI_sys_types.h"
31 #include "BLI_utildefines.h"
32 
33 #include "BLT_translation.h"
34 
35 #include "DNA_listBase.h"
37 
38 #include "BKE_context.h"
39 #include "BKE_global.h"
40 #include "BKE_lib_override.h"
41 #include "BKE_main.h"
42 #include "BKE_undo_system.h"
43 
44 #include "MEM_guardedalloc.h"
45 
46 #define undo_stack _wm_undo_stack_disallow /* pass in as a variable always. */
47 
49 #define WITH_GLOBAL_UNDO_KEEP_ONE
50 
52 #define WITH_GLOBAL_UNDO_ENSURE_UPDATED
53 
58 #define WITH_GLOBAL_UNDO_CORRECT_ORDER
59 
61 static CLG_LogRef LOG = {"bke.undosys"};
62 
63 /* -------------------------------------------------------------------- */
71 #define WITH_NESTED_UNDO_CHECK
72 
73 #ifdef WITH_NESTED_UNDO_CHECK
74 static bool g_undo_callback_running = false;
75 # define UNDO_NESTED_ASSERT(state) BLI_assert(g_undo_callback_running == state)
76 # define UNDO_NESTED_CHECK_BEGIN \
77  { \
78  UNDO_NESTED_ASSERT(false); \
79  g_undo_callback_running = true; \
80  } \
81  ((void)0)
82 # define UNDO_NESTED_CHECK_END \
83  { \
84  UNDO_NESTED_ASSERT(true); \
85  g_undo_callback_running = false; \
86  } \
87  ((void)0)
88 #else
89 # define UNDO_NESTED_ASSERT(state) ((void)0)
90 # define UNDO_NESTED_CHECK_BEGIN ((void)0)
91 # define UNDO_NESTED_CHECK_END ((void)0)
92 #endif
95 /* -------------------------------------------------------------------- */
108 /* UndoType */
109 
111 
113 {
114  LISTBASE_FOREACH (const UndoType *, ut, &g_undo_types) {
115  /* No poll means we don't check context. */
116  if (ut->poll && ut->poll(C)) {
117  return ut;
118  }
119  }
120  return NULL;
121 }
122 
123 /* -------------------------------------------------------------------- */
131 static void undosys_id_ref_store(void *UNUSED(user_data), UndoRefID *id_ref)
132 {
133  BLI_assert(id_ref->name[0] == '\0');
134  if (id_ref->ptr) {
135  BLI_strncpy(id_ref->name, id_ref->ptr->name, sizeof(id_ref->name));
136  /* Not needed, just prevents stale data access. */
137  id_ref->ptr = NULL;
138  }
139 }
140 
141 static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
142 {
143  /* Note: we could optimize this,
144  * for now it's not too bad since it only runs when we access undo! */
145  Main *bmain = user_data;
146  ListBase *lb = which_libbase(bmain, GS(id_ref->name));
147  LISTBASE_FOREACH (ID *, id, lb) {
148  if (STREQ(id_ref->name, id->name) && (id->lib == NULL)) {
149  id_ref->ptr = id;
150  break;
151  }
152  }
153 }
154 
155 static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
156 {
157  CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
159  bool ok = us->type->step_encode(C, bmain, us);
161  if (ok) {
162  if (us->type->step_foreach_ID_ref != NULL) {
163  /* Don't use from context yet because sometimes context is fake and
164  * not all members are filled in. */
166  }
167 
168 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
169  if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
170  ustack->step_active_memfile = us;
171  }
172 #endif
173  }
174  if (ok == false) {
175  CLOG_INFO(&LOG, 2, "encode callback didn't create undo step");
176  }
177  return ok;
178 }
179 
181  Main *bmain,
182  UndoStack *ustack,
183  UndoStep *us,
184  const eUndoStepDir dir,
185  bool is_final)
186 {
187  CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
188 
189  if (us->type->step_foreach_ID_ref) {
190 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
191  if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
192  for (UndoStep *us_iter = us->prev; us_iter; us_iter = us_iter->prev) {
193  if (us_iter->type == BKE_UNDOSYS_TYPE_MEMFILE) {
194  if (us_iter == ustack->step_active_memfile) {
195  /* Common case, we're already using the last memfile state. */
196  }
197  else {
198  /* Load the previous memfile state so any ID's referenced in this
199  * undo step will be correctly resolved, see: T56163. */
200  undosys_step_decode(C, bmain, ustack, us_iter, dir, false);
201  /* May have been freed on memfile read. */
202  bmain = G_MAIN;
203  }
204  break;
205  }
206  }
207  }
208 #endif
209  /* Don't use from context yet because sometimes context is fake and
210  * not all members are filled in. */
212  }
213 
215  us->type->step_decode(C, bmain, us, dir, is_final);
217 
218 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
219  if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
220  ustack->step_active_memfile = us;
221  }
222 #endif
223 }
224 
226 {
227  CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
229  us->type->step_free(us);
231 
232  BLI_remlink(&ustack->steps, us);
233  MEM_freeN(us);
234 
235 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
236  if (ustack->step_active_memfile == us) {
237  ustack->step_active_memfile = NULL;
238  }
239 #endif
240 }
241 
244 /* -------------------------------------------------------------------- */
248 #ifndef NDEBUG
249 static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
250 {
251  if (ustack->step_active != NULL) {
253  BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
254  }
255  if (expect_non_empty) {
257  }
258 }
259 #else
260 static void undosys_stack_validate(UndoStack *UNUSED(ustack), bool UNUSED(expect_non_empty))
261 {
262 }
263 #endif
264 
266 {
267  UndoStack *ustack = MEM_callocN(sizeof(UndoStack), __func__);
268  return ustack;
269 }
270 
272 {
273  BKE_undosys_stack_clear(ustack);
274  MEM_freeN(ustack);
275 }
276 
278 {
279  UNDO_NESTED_ASSERT(false);
280  CLOG_INFO(&LOG, 1, "steps=%d", BLI_listbase_count(&ustack->steps));
281  for (UndoStep *us = ustack->steps.last, *us_prev; us; us = us_prev) {
282  us_prev = us->prev;
283  undosys_step_free_and_unlink(ustack, us);
284  }
285  BLI_listbase_clear(&ustack->steps);
286  ustack->step_active = NULL;
287 }
288 
290 {
291  /* Remove active and all following undo-steps. */
292  UndoStep *us = ustack->step_active;
293 
294  if (us) {
295  ustack->step_active = us->prev;
296  bool is_not_empty = ustack->step_active != NULL;
297 
298  while (ustack->steps.last != ustack->step_active) {
299  UndoStep *us_iter = ustack->steps.last;
300  undosys_step_free_and_unlink(ustack, us_iter);
301  undosys_stack_validate(ustack, is_not_empty);
302  }
303  }
304 }
305 
306 /* Caller is responsible for handling active. */
308 {
309  if (us) {
310  bool is_not_empty = true;
311  UndoStep *us_iter;
312  do {
313  us_iter = ustack->steps.last;
314  BLI_assert(us_iter != ustack->step_active);
315  undosys_step_free_and_unlink(ustack, us_iter);
316  undosys_stack_validate(ustack, is_not_empty);
317  } while ((us != us_iter));
318  }
319 }
320 
321 static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoStep *us_exclude)
322 {
323  if (us && us == us_exclude) {
324  us = us->prev;
325  }
326 
327  if (us) {
328  bool is_not_empty = true;
329  UndoStep *us_iter;
330  do {
331  us_iter = ustack->steps.first;
332  if (us_iter == us_exclude) {
333  us_iter = us_iter->next;
334  }
335  BLI_assert(us_iter != ustack->step_active);
336  undosys_step_free_and_unlink(ustack, us_iter);
337  undosys_stack_validate(ustack, is_not_empty);
338  } while ((us != us_iter));
339  }
340 }
341 
342 static bool undosys_stack_push_main(UndoStack *ustack, const char *name, struct Main *bmain)
343 {
344  UNDO_NESTED_ASSERT(false);
345  BLI_assert(ustack->step_init == NULL);
346  CLOG_INFO(&LOG, 1, "'%s'", name);
347  bContext *C_temp = CTX_create();
348  CTX_data_main_set(C_temp, bmain);
350  ustack, C_temp, name, BKE_UNDOSYS_TYPE_MEMFILE);
351  CTX_free(C_temp);
352  return (ret & UNDO_PUSH_RET_SUCCESS);
353 }
354 
355 void BKE_undosys_stack_init_from_main(UndoStack *ustack, struct Main *bmain)
356 {
357  UNDO_NESTED_ASSERT(false);
358  undosys_stack_push_main(ustack, IFACE_("Original"), bmain);
359 }
360 
361 /* called after 'BKE_undosys_stack_init_from_main' */
363 {
365  if (!ELEM(ut, NULL, BKE_UNDOSYS_TYPE_MEMFILE)) {
366  BKE_undosys_step_push_with_type(ustack, C, IFACE_("Original Mode"), ut);
367  }
368 }
369 
370 /* name optional */
371 bool BKE_undosys_stack_has_undo(UndoStack *ustack, const char *name)
372 {
373  if (name) {
374  UndoStep *us = BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
375  return us && us->prev;
376  }
377 
378  return !BLI_listbase_is_empty(&ustack->steps);
379 }
380 
382 {
383  UndoStep *us = ustack->step_active;
384  while (us && (us->type != ut)) {
385  us = us->prev;
386  }
387  return us;
388 }
389 
391 {
392  UNDO_NESTED_ASSERT(false);
393  CLOG_INFO(&LOG, 1, "type='%s'", ut->name);
394  if (ustack->step_init && (ustack->step_init->type == ut)) {
395  return ustack->step_init;
396  }
397  return BKE_undosys_stack_active_with_type(ustack, ut);
398 }
399 
405 {
406  UNDO_NESTED_ASSERT(false);
407  if ((steps == -1) && (memory_limit != 0)) {
408  return;
409  }
410 
411  CLOG_INFO(&LOG, 1, "steps=%d, memory_limit=%zu", steps, memory_limit);
412  UndoStep *us;
413  UndoStep *us_exclude = NULL;
414  /* keep at least two (original + other) */
415  size_t data_size_all = 0;
416  size_t us_count = 0;
417  for (us = ustack->steps.last; us && us->prev; us = us->prev) {
418  if (memory_limit) {
419  data_size_all += us->data_size;
420  if (data_size_all > memory_limit) {
421  break;
422  }
423  }
424  if (steps != -1) {
425  if (us_count == steps) {
426  break;
427  }
428  if (us->skip == false) {
429  us_count += 1;
430  }
431  }
432  }
433 
434  if (us) {
435 #ifdef WITH_GLOBAL_UNDO_KEEP_ONE
436  /* Hack, we need to keep at least one BKE_UNDOSYS_TYPE_MEMFILE. */
437  if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
438  us_exclude = us->prev;
439  while (us_exclude && us_exclude->type != BKE_UNDOSYS_TYPE_MEMFILE) {
440  us_exclude = us_exclude->prev;
441  }
442  /* Once this is outside the given number of 'steps', undoing onto this state
443  * may skip past many undo steps which is confusing, instead,
444  * disallow stepping onto this state entirely. */
445  if (us_exclude) {
446  us_exclude->skip = true;
447  }
448  }
449 #endif
450  /* Free from first to last, free functions may update de-duplication info
451  * (see #MemFileUndoStep). */
452  undosys_stack_clear_all_first(ustack, us->prev, us_exclude);
453  }
454 }
455 
459  bContext *C,
460  const char *name,
461  const UndoType *ut)
462 {
463  UNDO_NESTED_ASSERT(false);
464  /* We could detect and clean this up (but it should never happen!). */
465  BLI_assert(ustack->step_init == NULL);
466  if (ut->step_encode_init) {
467  undosys_stack_validate(ustack, false);
468 
469  if (ustack->step_active) {
471  }
472 
473  UndoStep *us = MEM_callocN(ut->step_size, __func__);
474  if (name != NULL) {
475  BLI_strncpy(us->name, name, sizeof(us->name));
476  }
477  us->type = ut;
478  ustack->step_init = us;
479  CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
480  ut->step_encode_init(C, us);
481  undosys_stack_validate(ustack, false);
482  return us;
483  }
484 
485  return NULL;
486 }
487 
489 {
490  UNDO_NESTED_ASSERT(false);
491  /* We could detect and clean this up (but it should never happen!). */
492  BLI_assert(ustack->step_init == NULL);
494  if (ut == NULL) {
495  return NULL;
496  }
497  return BKE_undosys_step_push_init_with_type(ustack, C, name, ut);
498 }
499 
504  bContext *C,
505  const char *name,
506  const UndoType *ut)
507 {
509 
510  UNDO_NESTED_ASSERT(false);
511  undosys_stack_validate(ustack, false);
512  bool is_not_empty = ustack->step_active != NULL;
514 
515  /* Might not be final place for this to be called - probably only want to call it from some
516  * undo handlers, not all of them? */
519  }
520 
521  /* Remove all undo-steps after (also when 'ustack->step_active == NULL'). */
522  while (ustack->steps.last != ustack->step_active) {
523  UndoStep *us_iter = ustack->steps.last;
524  undosys_step_free_and_unlink(ustack, us_iter);
525  undosys_stack_validate(ustack, is_not_empty);
526  }
527 
528  if (ustack->step_active) {
529  BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
530  }
531 
532 #ifdef WITH_GLOBAL_UNDO_ENSURE_UPDATED
533  if (ut->step_foreach_ID_ref != NULL) {
534  if (G_MAIN->is_memfile_undo_written == false) {
535  const char *name_internal = "MemFile Internal (pre)";
536  /* Don't let 'step_init' cause issues when adding memfile undo step. */
537  void *step_init = ustack->step_init;
538  ustack->step_init = NULL;
539  const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
540  /* Restore 'step_init'. */
541  ustack->step_init = step_init;
542  if (ok) {
543  UndoStep *us = ustack->steps.last;
544  BLI_assert(STREQ(us->name, name_internal));
545  us->skip = true;
546 # ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
547  ustack->step_active_memfile = us;
548 # endif
549  }
550  }
551  }
552 #endif
553 
554  bool use_memfile_step = false;
555  {
556  UndoStep *us = ustack->step_init ? ustack->step_init : MEM_callocN(ut->step_size, __func__);
557  ustack->step_init = NULL;
558  if (us->name[0] == '\0') {
559  BLI_strncpy(us->name, name, sizeof(us->name));
560  }
561  us->type = ut;
562  /* True by default, code needs to explicitly set it to false if necessary. */
563  us->use_old_bmain_data = true;
564  /* Initialized, not added yet. */
565 
566  CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
567 
568  if (!undosys_step_encode(C, G_MAIN, ustack, us)) {
569  MEM_freeN(us);
570  undosys_stack_validate(ustack, true);
571  return retval;
572  }
573  ustack->step_active = us;
574  BLI_addtail(&ustack->steps, us);
575  use_memfile_step = us->use_memfile_step;
576  }
577 
578  if (use_memfile_step) {
579  /* Make this the user visible undo state, so redo always applies
580  * on top of the mem-file undo instead of skipping it. see: T67256. */
581  UndoStep *us_prev = ustack->step_active;
582  const char *name_internal = us_prev->name;
583  const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
584  if (ok) {
585  UndoStep *us = ustack->steps.last;
586  BLI_assert(STREQ(us->name, name_internal));
587  us_prev->skip = true;
588 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
589  ustack->step_active_memfile = us;
590 #endif
591  ustack->step_active = us;
592  }
593  }
594 
595  if (ustack->group_level > 0) {
596  /* Temporarily set skip for the active step.
597  * This is an invalid state which must be corrected once the last group ends. */
598  ustack->step_active->skip = true;
599  }
600 
601  undosys_stack_validate(ustack, true);
602  return (retval | UNDO_PUSH_RET_SUCCESS);
603 }
604 
606 {
607  UNDO_NESTED_ASSERT(false);
608  const UndoType *ut = ustack->step_init ? ustack->step_init->type :
610  if (ut == NULL) {
611  return false;
612  }
613  return BKE_undosys_step_push_with_type(ustack, C, name, ut);
614 }
615 
620 {
621  if (us) {
622  const UndoType *ut = us->type;
623  while ((us = us->next)) {
624  if (us->type == ut) {
625  return us;
626  }
627  }
628  }
629  return us;
630 }
631 
636 {
637  if (us) {
638  const UndoType *ut = us->type;
639  while ((us = us->prev)) {
640  if (us->type == ut) {
641  return us;
642  }
643  }
644  }
645  return us;
646 }
647 
649  const char *name,
650  const UndoType *ut)
651 {
652  for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
653  if (us->type == ut) {
654  if (STREQ(name, us->name)) {
655  return us;
656  }
657  }
658  }
659  return NULL;
660 }
661 
663 {
664  return BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
665 }
666 
668 {
669  for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
670  if (us->type == ut) {
671  return us;
672  }
673  }
674  return NULL;
675 }
676 
686  const UndoStep *us_target,
687  const UndoStep *us_reference)
688 {
689  if (us_reference == NULL) {
690  us_reference = ustack->step_active;
691  }
692 
693  BLI_assert(us_reference != NULL);
694 
695  /* Note that we use heuristics to make this lookup as fast as possible in most common cases,
696  * assuming that:
697  * - Most cases are just undo or redo of one step from active one.
698  * - Otherwise, it is typically faster to check future steps since active one is usually close
699  * to the end of the list, rather than its start. */
700  /* NOTE: in case target step is the active one, we assume we are in an undo case... */
701  if (ELEM(us_target, us_reference, us_reference->prev)) {
702  return STEP_UNDO;
703  }
704  if (us_target == us_reference->next) {
705  return STEP_REDO;
706  }
707 
708  /* Search forward, and then backward. */
709  for (UndoStep *us_iter = us_reference->next; us_iter != NULL; us_iter = us_iter->next) {
710  if (us_iter == us_target) {
711  return STEP_REDO;
712  }
713  }
714  for (UndoStep *us_iter = us_reference->prev; us_iter != NULL; us_iter = us_iter->prev) {
715  if (us_iter == us_target) {
716  return STEP_UNDO;
717  }
718  }
719 
720  BLI_assert(!"Target undo step not found, this should not happen and may indicate an undo stack corruption");
721  return STEP_INVALID;
722 }
723 
739  bContext *C,
740  UndoStep *us_target,
741  UndoStep *us_reference,
742  const bool use_skip)
743 {
744  UNDO_NESTED_ASSERT(false);
745  if (us_target == NULL) {
746  CLOG_ERROR(&LOG, "called with a NULL target step");
747  return false;
748  }
749  undosys_stack_validate(ustack, true);
750 
751  if (us_reference == NULL) {
752  us_reference = ustack->step_active;
753  }
754  if (us_reference == NULL) {
755  CLOG_ERROR(&LOG, "could not find a valid initial active target step as reference");
756  return false;
757  }
758 
759  /* This considers we are in undo case if both `us_target` and `us_reference` are the same. */
760  const eUndoStepDir undo_dir = BKE_undosys_step_calc_direction(ustack, us_target, us_reference);
761  BLI_assert(undo_dir != STEP_INVALID);
762 
763  /* This will be the active step once the undo process is complete.
764  *
765  * In case we do skip 'skipped' steps, the final active step may be several steps backward from
766  * the one passed as parameter. */
767  UndoStep *us_target_active = us_target;
768  if (use_skip) {
769  while (us_target_active != NULL && us_target_active->skip) {
770  us_target_active = (undo_dir == -1) ? us_target_active->prev : us_target_active->next;
771  }
772  if (us_target_active == NULL) {
773  CLOG_INFO(&LOG,
774  2,
775  "undo/redo did not find a step after stepping over skip-steps "
776  "(undo limit exceeded)");
777  return false;
778  }
779  }
780 
781  CLOG_INFO(&LOG,
782  1,
783  "addr=%p, name='%s', type='%s', undo_dir=%d",
784  us_target,
785  us_target->name,
786  us_target->type->name,
787  undo_dir);
788 
789  /* Undo/Redo steps until we reach given target step (or beyond if it has to be skipped), from
790  * given reference step.
791  *
792  * NOTE: Unlike with redo case, where we can expect current active step to fully reflect current
793  * data status, in undo case we also do reload the active step.
794  * FIXME: this feels weak, and should probably not be actually needed? Or should also be done in
795  * redo case? */
796  bool is_processing_extra_skipped_steps = false;
797  for (UndoStep *us_iter = (undo_dir == -1) ? us_reference : us_reference->next; us_iter != NULL;
798  us_iter = (undo_dir == -1) ? us_iter->prev : us_iter->next) {
799  BLI_assert(us_iter != NULL);
800 
801  const bool is_final = (us_iter == us_target_active);
802 
803  if (!is_final && is_processing_extra_skipped_steps) {
804  BLI_assert(us_iter->skip == true);
805  CLOG_INFO(&LOG,
806  2,
807  "undo/redo continue with skip addr=%p, name='%s', type='%s'",
808  us_iter,
809  us_iter->name,
810  us_iter->type->name);
811  }
812 
813  undosys_step_decode(C, G_MAIN, ustack, us_iter, undo_dir, is_final);
814  ustack->step_active = us_iter;
815 
816  if (us_iter == us_target) {
817  is_processing_extra_skipped_steps = true;
818  }
819 
820  if (is_final) {
821  /* Undo/Redo process is finished and successful. */
822  return true;
823  }
824  }
825 
826  BLI_assert(
827  !"This should never be reached, either undo stack is corrupted, or code above is buggy");
828  return false;
829 }
830 
835 {
836  /* Note that here we do not skip 'skipped' steps by default. */
837  return BKE_undosys_step_load_data_ex(ustack, C, us_target, NULL, false);
838 }
839 
844 void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int index)
845 {
846  UndoStep *us_target = BLI_findlink(&ustack->steps, index);
847  BLI_assert(us_target->skip == false);
848  BKE_undosys_step_load_data(ustack, C, us_target);
849 }
850 
863  bContext *C,
864  UndoStep *us_target,
865  bool use_skip)
866 {
867  /* In case there is no active step, we consider we just load given step, so reference must be
868  * itself (due to weird 'load current active step in undo case' thing, see comments in
869  * #BKE_undosys_step_load_data_ex). */
870  UndoStep *us_reference = ustack->step_active != NULL ? ustack->step_active : us_target;
871 
872  BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == -1);
873 
874  return BKE_undosys_step_load_data_ex(ustack, C, us_target, us_reference, use_skip);
875 }
876 
883 {
884  return BKE_undosys_step_undo_with_data_ex(ustack, C, us_target, true);
885 }
886 
891 {
892  if (ustack->step_active != NULL) {
893  return BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active->prev);
894  }
895  return false;
896 }
897 
910  bContext *C,
911  UndoStep *us_target,
912  bool use_skip)
913 {
914  /* In case there is no active step, we consider we just load given step, so reference must be
915  * the previous one. */
916  UndoStep *us_reference = ustack->step_active != NULL ? ustack->step_active : us_target->prev;
917 
918  BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == 1);
919 
920  return BKE_undosys_step_load_data_ex(ustack, C, us_target, us_reference, use_skip);
921 }
922 
929 {
930  return BKE_undosys_step_redo_with_data_ex(ustack, C, us_target, true);
931 }
932 
937 {
938  if (ustack->step_active != NULL) {
939  return BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active->next);
940  }
941  return false;
942 }
943 
948 {
949  UndoType *ut;
950 
951  ut = MEM_callocN(sizeof(UndoType), __func__);
952 
953  undosys_fn(ut);
954 
956 
957  return ut;
958 }
959 
961 {
962  UndoType *ut;
963  while ((ut = BLI_pophead(&g_undo_types))) {
964  MEM_freeN(ut);
965  }
966 }
967 
970 /* -------------------------------------------------------------------- */
990 {
991  BLI_assert(ustack->group_level >= 0);
992  ustack->group_level += 1;
993 }
994 
996 {
997  ustack->group_level -= 1;
998  BLI_assert(ustack->group_level >= 0);
999 
1000  if (ustack->group_level == 0) {
1001  if (LIKELY(ustack->step_active != NULL)) {
1002  ustack->step_active->skip = false;
1003  }
1004  }
1005 }
1006 
1009 /* -------------------------------------------------------------------- */
1015 static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack,
1016  UndoTypeForEachIDRefFn foreach_ID_ref_fn,
1017  void *user_data))
1018 {
1019  LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
1020  const UndoType *ut = us->type;
1021  if (ut->step_foreach_ID_ref != NULL) {
1022  ut->step_foreach_ID_ref(us, foreach_ID_ref_fn, user_data);
1023  }
1024  }
1025 }
1026 
1029 /* -------------------------------------------------------------------- */
1034 {
1035  printf("Undo %d Steps (*: active, #=applied, M=memfile-active, S=skip)\n",
1036  BLI_listbase_count(&ustack->steps));
1037  int index = 0;
1038  LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
1039  printf("[%c%c%c%c] %3d {%p} type='%s', name='%s'\n",
1040  (us == ustack->step_active) ? '*' : ' ',
1041  us->is_applied ? '#' : ' ',
1042  (us == ustack->step_active_memfile) ? 'M' : ' ',
1043  us->skip ? 'S' : ' ',
1044  index,
1045  (void *)us,
1046  us->type->name,
1047  us->name);
1048  index++;
1049  }
1050 }
1051 
bContext * CTX_create(void)
Definition: context.c:105
void CTX_free(bContext *C)
Definition: context.c:119
void CTX_data_main_set(bContext *C, struct Main *bmain)
Definition: context.c:1028
#define G_MAIN
Definition: BKE_global.h:232
bool BKE_lib_override_library_main_operations_create(struct Main *bmain, const bool force_auto)
struct ListBase * which_libbase(struct Main *bmain, short type)
Definition: main.c:447
eUndoStepDir
@ STEP_INVALID
@ STEP_UNDO
@ STEP_REDO
UndoPushReturn
@ UNDO_PUSH_RET_SUCCESS
@ UNDO_PUSH_RET_OVERRIDE_CHANGED
@ UNDO_PUSH_RET_FAILURE
@ UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE
void(* UndoTypeForEachIDRefFn)(void *user_data, struct UndoRefID *id_ref)
#define BLI_assert(a)
Definition: BLI_assert.h:58
BLI_INLINE bool BLI_listbase_is_empty(const struct ListBase *lb)
Definition: BLI_listbase.h:124
void * BLI_pophead(ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:257
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
BLI_INLINE void BLI_listbase_clear(struct ListBase *lb)
Definition: BLI_listbase.h:128
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:110
void BLI_remlink(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:133
int BLI_findindex(const struct ListBase *listbase, const void *vlink) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void * BLI_findlink(const struct ListBase *listbase, int number) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
void * BLI_rfindstring(const struct ListBase *listbase, const char *id, const int offset) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
char * BLI_strncpy(char *__restrict dst, const char *__restrict src, const size_t maxncpy) ATTR_NONNULL()
Definition: string.c:108
#define UNUSED(x)
#define ELEM(...)
#define STREQ(a, b)
#define LIKELY(x)
#define IFACE_(msgid)
#define CLOG_ERROR(clg_ref,...)
Definition: CLG_log.h:204
#define CLOG_INFO(clg_ref, level,...)
Definition: CLG_log.h:201
These structs are the foundation for all linked lists in the library system.
Read Guarded memory(de)allocation.
#define C
Definition: RandGen.cpp:39
void * user_data
#define GS(x)
Definition: iris.c:241
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
return ret
static const int steps
Definition: sky_nishita.cpp:28
CLG_LogType * type
Definition: CLG_log.h:120
Definition: DNA_ID.h:273
struct Library * lib
Definition: DNA_ID.h:277
char name[66]
Definition: DNA_ID.h:283
void * last
Definition: DNA_listBase.h:47
void * first
Definition: DNA_listBase.h:47
Definition: BKE_main.h:116
char name[MAX_ID_NAME]
struct ID * ptr
struct UndoStep * step_active
struct UndoStep * step_active_memfile
struct UndoStep * step_init
ListBase steps
const struct UndoType * type
size_t data_size
struct UndoStep * prev
bool use_old_bmain_data
struct UndoStep * next
bool use_memfile_step
char name[64]
size_t step_size
void(* step_decode)(struct bContext *C, struct Main *bmain, UndoStep *us, const eUndoStepDir dir, bool is_final)
bool(* step_encode)(struct bContext *C, struct Main *bmain, UndoStep *us)
void(* step_encode_init)(struct bContext *C, UndoStep *us)
void(* step_foreach_ID_ref)(UndoStep *us, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
const char * name
void(* step_free)(UndoStep *us)
static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
Definition: undo_system.c:141
UndoType * BKE_undosys_type_append(void(*undosys_fn)(UndoType *))
Definition: undo_system.c:947
UndoStep * BKE_undosys_step_push_init_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
Definition: undo_system.c:458
UndoStep * BKE_undosys_step_push_init(UndoStack *ustack, bContext *C, const char *name)
Definition: undo_system.c:488
bool BKE_undosys_step_undo_with_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, bool use_skip)
Definition: undo_system.c:862
const UndoType * BKE_UNDOSYS_TYPE_SCULPT
Definition: undo_system.c:104
bool BKE_undosys_step_redo_with_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
Definition: undo_system.c:928
UndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
Definition: undo_system.c:503
void BKE_undosys_stack_init_from_context(UndoStack *ustack, bContext *C)
Definition: undo_system.c:362
bool BKE_undosys_step_redo(UndoStack *ustack, bContext *C)
Definition: undo_system.c:936
static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
Definition: undo_system.c:249
bool BKE_undosys_step_undo(UndoStack *ustack, bContext *C)
Definition: undo_system.c:890
void BKE_undosys_stack_clear_active(UndoStack *ustack)
Definition: undo_system.c:289
#define UNDO_NESTED_CHECK_END
Definition: undo_system.c:82
static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data))
Definition: undo_system.c:1015
static const UndoType * BKE_undosys_type_from_context(bContext *C)
Definition: undo_system.c:112
static ListBase g_undo_types
Definition: undo_system.c:110
bool BKE_undosys_step_undo_with_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
Definition: undo_system.c:882
UndoStack * BKE_undosys_stack_create(void)
Definition: undo_system.c:265
static bool g_undo_callback_running
Definition: undo_system.c:74
#define UNDO_NESTED_CHECK_BEGIN
Definition: undo_system.c:76
eUndoStepDir BKE_undosys_step_calc_direction(const UndoStack *ustack, const UndoStep *us_target, const UndoStep *us_reference)
Definition: undo_system.c:685
UndoStep * BKE_undosys_step_same_type_prev(UndoStep *us)
Definition: undo_system.c:635
UndoStep * BKE_undosys_stack_init_or_active_with_type(UndoStack *ustack, const UndoType *ut)
Definition: undo_system.c:390
static void undosys_stack_clear_all_last(UndoStack *ustack, UndoStep *us)
Definition: undo_system.c:307
const UndoType * BKE_UNDOSYS_TYPE_MEMFILE
Definition: undo_system.c:101
const UndoType * BKE_UNDOSYS_TYPE_PARTICLE
Definition: undo_system.c:103
static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoStep *us_exclude)
Definition: undo_system.c:321
void BKE_undosys_stack_clear(UndoStack *ustack)
Definition: undo_system.c:277
UndoPushReturn BKE_undosys_step_push(UndoStack *ustack, bContext *C, const char *name)
Definition: undo_system.c:605
const UndoType * BKE_UNDOSYS_TYPE_TEXT
Definition: undo_system.c:105
UndoStep * BKE_undosys_step_find_by_name(UndoStack *ustack, const char *name)
Definition: undo_system.c:662
void BKE_undosys_stack_limit_steps_and_memory(UndoStack *ustack, int steps, size_t memory_limit)
Definition: undo_system.c:404
UndoStep * BKE_undosys_stack_active_with_type(UndoStack *ustack, const UndoType *ut)
Definition: undo_system.c:381
bool BKE_undosys_step_load_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, UndoStep *us_reference, const bool use_skip)
Definition: undo_system.c:738
UndoStep * BKE_undosys_step_find_by_name_with_type(UndoStack *ustack, const char *name, const UndoType *ut)
Definition: undo_system.c:648
static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us)
Definition: undo_system.c:225
void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int index)
Definition: undo_system.c:844
UndoStep * BKE_undosys_step_same_type_next(UndoStep *us)
Definition: undo_system.c:619
bool BKE_undosys_stack_has_undo(UndoStack *ustack, const char *name)
Definition: undo_system.c:371
UndoStep * BKE_undosys_step_find_by_type(UndoStack *ustack, const UndoType *ut)
Definition: undo_system.c:667
static void undosys_id_ref_store(void *UNUSED(user_data), UndoRefID *id_ref)
Definition: undo_system.c:131
void BKE_undosys_stack_destroy(UndoStack *ustack)
Definition: undo_system.c:271
#define UNDO_NESTED_ASSERT(state)
Definition: undo_system.c:75
void BKE_undosys_stack_group_end(UndoStack *ustack)
Definition: undo_system.c:995
static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
Definition: undo_system.c:155
static CLG_LogRef LOG
Definition: undo_system.c:61
static bool undosys_stack_push_main(UndoStack *ustack, const char *name, struct Main *bmain)
Definition: undo_system.c:342
bool BKE_undosys_step_load_data(UndoStack *ustack, bContext *C, UndoStep *us_target)
Definition: undo_system.c:834
const UndoType * BKE_UNDOSYS_TYPE_PAINTCURVE
Definition: undo_system.c:102
void BKE_undosys_type_free_all(void)
Definition: undo_system.c:960
bool BKE_undosys_step_redo_with_data_ex(UndoStack *ustack, bContext *C, UndoStep *us_target, bool use_skip)
Definition: undo_system.c:909
void BKE_undosys_stack_init_from_main(UndoStack *ustack, struct Main *bmain)
Definition: undo_system.c:355
void BKE_undosys_print(UndoStack *ustack)
Definition: undo_system.c:1033
static void undosys_step_decode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us, const eUndoStepDir dir, bool is_final)
Definition: undo_system.c:180
void BKE_undosys_stack_group_begin(UndoStack *ustack)
Definition: undo_system.c:989
const UndoType * BKE_UNDOSYS_TYPE_IMAGE
Definition: undo_system.c:100
size_t memory_limit
Definition: wm_playanim.c:267