Blender V4.5
BLI_mempool.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2008 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
16
17#include <algorithm>
18#include <cstdlib>
19#include <cstring>
20
21#include "atomic_ops.h"
22
23#include "BLI_utildefines.h"
24
25#include "BLI_asan.h"
26#include "BLI_math_base.h"
27#include "BLI_mempool.h" /* own include */
28#include "BLI_mempool_private.h" /* own include */
29
30#ifdef WITH_ASAN
31# include "BLI_threads.h"
32#endif
33
34#include "MEM_guardedalloc.h"
35
36#ifdef WITH_MEM_VALGRIND
37# include "valgrind/memcheck.h"
38#endif
39
40#include "BLI_strict_flags.h" /* IWYU pragma: keep. Keep last. */
41
42#ifdef WITH_ASAN
43# define POISON_REDZONE_SIZE 32
44#else
45# define POISON_REDZONE_SIZE 0
46#endif
47
48/* NOTE: copied from BLO_core_bhead.hh, don't use here because we're in BLI. */
49#ifdef __BIG_ENDIAN__
50/* Big Endian */
51# define MAKE_ID(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
52# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
53 ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
54 (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h))
55#else
56/* Little Endian */
57# define MAKE_ID(a, b, c, d) (int(d) << 24 | int(c) << 16 | (b) << 8 | (a))
58# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
59 (int64_t(h) << 56 | int64_t(g) << 48 | int64_t(f) << 40 | int64_t(e) << 32 | \
60 int64_t(d) << 24 | int64_t(c) << 16 | int64_t(b) << 8 | (a))
61#endif
62
69#define FREEWORD \
70 ((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
71 MAKE_ID('e', 'f', 'f', 'e'))
72
76#define USEDWORD MAKE_ID('u', 's', 'e', 'd')
77
78/* optimize pool size */
79#define USE_CHUNK_POW2
80
81#ifndef NDEBUG
82static bool mempool_debug_memset = false;
83#endif
84
94 intptr_t freeword;
95};
96
104
134
135#define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
136
137#define CHUNK_DATA(chunk) \
138 ((BLI_freenode *)(CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1)))
139
140#define NODE_STEP_NEXT(node) ((BLI_freenode *)((char *)(node) + esize))
141#define NODE_STEP_PREV(node) ((BLI_freenode *)((char *)(node)-esize))
142
144#define CHUNK_OVERHEAD uint(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
145
147{
148#ifdef WITH_ASAN
149 BLI_mutex_unlock(&pool->mutex);
150#else
151 UNUSED_VARS(pool);
152#endif
153}
154
156{
157#ifdef WITH_ASAN
158 BLI_mutex_lock(&pool->mutex);
159#else
160 UNUSED_VARS(pool);
161#endif
162}
163
165{
166 while (index-- && head) {
167 head = head->next;
168 }
169 return head;
170}
171
178BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
179{
180 return (elem_num <= pchunk) ? 1 : ((elem_num / pchunk) + 1);
181}
182
184{
185 return static_cast<BLI_mempool_chunk *>(
186 MEM_mallocN(sizeof(BLI_mempool_chunk) + size_t(pool->csize), "mempool chunk"));
187}
188
199 BLI_mempool_chunk *mpchunk,
200 BLI_freenode *last_tail)
201{
202 const uint esize = pool->esize;
203 BLI_freenode *curnode = CHUNK_DATA(mpchunk);
204 uint j;
205
206 /* append */
207 if (pool->chunk_tail) {
208 pool->chunk_tail->next = mpchunk;
209 }
210 else {
211 BLI_assert(pool->chunks == nullptr);
212 pool->chunks = mpchunk;
213 }
214
215 mpchunk->next = nullptr;
216 pool->chunk_tail = mpchunk;
217
218 if (UNLIKELY(pool->free == nullptr)) {
219 pool->free = curnode;
220 }
221
222 /* loop through the allocated data, building the pointer structures */
223 j = pool->pchunk;
224 if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
225 while (j--) {
227
229#ifdef WITH_MEM_VALGRIND
230 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
231#endif
232 curnode->next = next = NODE_STEP_NEXT(curnode);
233 curnode->freeword = FREEWORD;
234
235 BLI_asan_poison(curnode, pool->esize);
236#ifdef WITH_MEM_VALGRIND
237 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
238#endif
239 curnode = next;
240 }
241 }
242 else {
243 while (j--) {
245
247#ifdef WITH_MEM_VALGRIND
248 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
249#endif
250 curnode->next = next = NODE_STEP_NEXT(curnode);
251 BLI_asan_poison(curnode, pool->esize);
252#ifdef WITH_MEM_VALGRIND
253 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
254#endif
255
256 curnode = next;
257 }
258 }
259
260 /* terminate the list (rewind one)
261 * will be overwritten if 'curnode' gets passed in again as 'last_tail' */
262
263 if (POISON_REDZONE_SIZE > 0) {
265 BLI_asan_poison(curnode, pool->esize);
266#ifdef WITH_MEM_VALGRIND
267 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
268 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
269#endif
270 }
271
272 curnode = NODE_STEP_PREV(curnode);
273
275#ifdef WITH_MEM_VALGRIND
276 VALGRIND_MAKE_MEM_DEFINED(curnode, pool->esize - POISON_REDZONE_SIZE);
277#endif
278
279 curnode->next = nullptr;
280 BLI_asan_poison(curnode, pool->esize);
281#ifdef WITH_MEM_VALGRIND
282 VALGRIND_MAKE_MEM_UNDEFINED(curnode, pool->esize);
283#endif
284
285 /* final pointer in the previously allocated chunk is wrong */
286 if (last_tail) {
287 BLI_asan_unpoison(last_tail, pool->esize - POISON_REDZONE_SIZE);
288#ifdef WITH_MEM_VALGRIND
289 VALGRIND_MAKE_MEM_DEFINED(last_tail, pool->esize - POISON_REDZONE_SIZE);
290#endif
291 last_tail->next = CHUNK_DATA(mpchunk);
292 BLI_asan_poison(last_tail, pool->esize);
293#ifdef WITH_MEM_VALGRIND
294 VALGRIND_MAKE_MEM_UNDEFINED(last_tail, pool->esize);
295#endif
296 }
297
298 return curnode;
299}
300
302{
303#ifdef WITH_ASAN
304 BLI_asan_unpoison(mpchunk, sizeof(BLI_mempool_chunk) + pool->esize * pool->csize);
305#else
306 UNUSED_VARS(pool);
307#endif
308#ifdef WITH_MEM_VALGRIND
309 VALGRIND_MAKE_MEM_DEFINED(mpchunk, sizeof(BLI_mempool_chunk) + pool->esize * pool->csize);
310#endif
311 MEM_freeN(mpchunk);
312}
313
315{
316 BLI_mempool_chunk *mpchunk_next;
317
318 for (; mpchunk; mpchunk = mpchunk_next) {
319 mpchunk_next = mpchunk->next;
320 mempool_chunk_free(mpchunk, pool);
321 }
322}
323
325{
326 BLI_mempool *pool;
327 BLI_freenode *last_tail = nullptr;
328 uint i, maxchunks;
329
330 /* allocate the pool structure */
331 pool = MEM_callocN<BLI_mempool>("memory pool");
332
333#ifdef WITH_ASAN
334 BLI_mutex_init(&pool->mutex);
335#endif
336
337 /* set the elem size */
338 esize = std::max(esize, uint(MEMPOOL_ELEM_SIZE_MIN));
339
341 esize = std::max(esize, uint(sizeof(BLI_freenode)));
342 }
343
344 esize += POISON_REDZONE_SIZE;
345
346 maxchunks = mempool_maxchunks(elem_num, pchunk);
347
348 pool->chunks = nullptr;
349 pool->chunk_tail = nullptr;
350 pool->esize = esize;
351
352 /* Optimize chunk size to powers of 2, accounting for slop-space. */
353#ifdef USE_CHUNK_POW2
354 {
355 BLI_assert(power_of_2_max_u(pchunk * esize) > CHUNK_OVERHEAD);
356 pchunk = (power_of_2_max_u(pchunk * esize) - CHUNK_OVERHEAD) / esize;
357 }
358#endif
359
360 pool->csize = esize * pchunk;
361
362 /* Ensure this is a power of 2, minus the rounding by element size. */
363#if defined(USE_CHUNK_POW2) && !defined(NDEBUG)
364 {
365 uint final_size = (uint)MEM_SIZE_OVERHEAD + (uint)sizeof(BLI_mempool_chunk) + pool->csize;
366 BLI_assert(((uint)power_of_2_max_u(final_size) - final_size) < pool->esize);
367 }
368#endif
369
370 pool->pchunk = pchunk;
371 pool->flag = flag;
372 pool->free = nullptr; /* mempool_chunk_add assigns */
373 pool->maxchunks = maxchunks;
374 pool->totused = 0;
375
376 if (elem_num) {
377 /* Allocate the actual chunks. */
378 for (i = 0; i < maxchunks; i++) {
379 BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
380 last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
381 }
382 }
383
384#ifdef WITH_MEM_VALGRIND
385 VALGRIND_CREATE_MEMPOOL(pool, 0, false);
386#endif
387
388 return pool;
389}
390
392{
393 BLI_freenode *free_pop;
394
395 if (UNLIKELY(pool->free == nullptr)) {
396 /* Need to allocate a new chunk. */
397 BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
398 mempool_chunk_add(pool, mpchunk, nullptr);
399 }
400
401 free_pop = pool->free;
402
404#ifdef WITH_MEM_VALGRIND
405 VALGRIND_MEMPOOL_ALLOC(pool, free_pop, pool->esize - POISON_REDZONE_SIZE);
406 /* Mark as define, then undefine immediately before returning so:
407 * - `free_pop->next` can be read without reading "undefined" memory.
408 * - `freeword` can be set without causing the memory to be considered "defined".
409 *
410 * These could be handled on a more granular level - dealing with defining & underlining these
411 * members explicitly but that requires more involved calls,
412 * adding overhead for no real benefit. */
413 VALGRIND_MAKE_MEM_DEFINED(free_pop, pool->esize - POISON_REDZONE_SIZE);
414#endif
415
416 BLI_assert(pool->chunk_tail->next == nullptr);
417
418 if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
419 free_pop->freeword = USEDWORD;
420 }
421
422 pool->free = free_pop->next;
423 pool->totused++;
424
425#ifdef WITH_MEM_VALGRIND
426 VALGRIND_MAKE_MEM_UNDEFINED(free_pop, pool->esize - POISON_REDZONE_SIZE);
427#endif
428
429 return (void *)free_pop;
430}
431
433{
434 void *retval = BLI_mempool_alloc(pool);
435
436 memset(retval, 0, size_t(pool->esize) - POISON_REDZONE_SIZE);
437
438 return retval;
439}
440
441void BLI_mempool_free(BLI_mempool *pool, void *addr)
442{
443 BLI_freenode *newhead = static_cast<BLI_freenode *>(addr);
444
445#ifndef NDEBUG
446 {
447 BLI_mempool_chunk *chunk;
448 bool found = false;
449 for (chunk = pool->chunks; chunk; chunk = chunk->next) {
450 if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
451 found = true;
452 break;
453 }
454 }
455 if (!found) {
456 BLI_assert_msg(0, "Attempt to free data which is not in pool.\n");
457 }
458 }
459
460 /* Enable for debugging. */
462 memset(addr, 255, pool->esize - POISON_REDZONE_SIZE);
463 }
464#endif
465
466 if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
467#ifndef NDEBUG
468 /* This will detect double free's. */
469 BLI_assert(newhead->freeword != FREEWORD);
470#endif
471 newhead->freeword = FREEWORD;
472 }
473
474 newhead->next = pool->free;
475 pool->free = newhead;
476
477 BLI_asan_poison(newhead, pool->esize);
478
479 pool->totused--;
480
481#ifdef WITH_MEM_VALGRIND
482 VALGRIND_MEMPOOL_FREE(pool, addr);
483#endif
484
485 /* Nothing is in use; free all the chunks except the first. */
486 if (UNLIKELY(pool->totused == 0) && (pool->chunks->next)) {
487 const uint esize = pool->esize;
488 BLI_freenode *curnode;
489 uint j;
490 BLI_mempool_chunk *first;
491
492 first = pool->chunks;
493 mempool_chunk_free_all(first->next, pool);
494 first->next = nullptr;
495 pool->chunk_tail = first;
496
497 /* Temporary allocation so VALGRIND doesn't complain when setting freed blocks 'next'. */
498#ifdef WITH_MEM_VALGRIND
499 VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
500#endif
501
502 curnode = CHUNK_DATA(first);
503 pool->free = curnode;
504
505 j = pool->pchunk;
506 while (j--) {
508 BLI_freenode *next = curnode->next = NODE_STEP_NEXT(curnode);
509 BLI_asan_poison(curnode, pool->esize);
510 curnode = next;
511 }
512
514 BLI_freenode *prev = NODE_STEP_PREV(curnode);
515 BLI_asan_poison(curnode, pool->esize);
516
517 curnode = prev;
518
520 curnode->next = nullptr; /* terminate the list */
521 BLI_asan_poison(curnode, pool->esize);
522
523#ifdef WITH_MEM_VALGRIND
524 VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
525#endif
526 }
527}
528
530{
531 int ret = int(pool->totused);
532
533 return ret;
534}
535
537{
538 mempool_asan_lock(pool);
539
541
542 if (index < pool->totused) {
543 /* We could have some faster mem chunk stepping code inline. */
544 BLI_mempool_iter iter;
545 void *elem;
546 BLI_mempool_iternew(pool, &iter);
547 for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
548 /* pass */
549 }
550
552 return elem;
553 }
554
556 return nullptr;
557}
558
560{
561 const uint esize = pool->esize - uint(POISON_REDZONE_SIZE);
562 BLI_mempool_iter iter;
563 const char *elem;
564 char *p = static_cast<char *>(data);
565
567
568 mempool_asan_lock(pool);
569 BLI_mempool_iternew(pool, &iter);
570 while ((elem = static_cast<const char *>(BLI_mempool_iterstep(&iter)))) {
571 memcpy(p, elem, size_t(esize));
572 p = reinterpret_cast<char *>(NODE_STEP_NEXT(p));
573 }
575}
576
577void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
578{
579 char *data = static_cast<char *>(
580 MEM_malloc_arrayN(size_t(pool->totused), pool->esize, allocstr));
582 return data;
583}
584
586{
588
589 iter->pool = pool;
590 iter->curchunk = pool->chunks;
591 iter->curindex = 0;
592}
593
595{
596 BLI_mempool_iternew(pool, &ts_iter->iter);
597 ts_iter->curchunk_threaded_shared = nullptr;
598}
599
601{
603
605 __func__);
606 BLI_mempool_chunk **curchunk_threaded_shared = MEM_callocN<BLI_mempool_chunk *>(__func__);
607
608 mempool_threadsafe_iternew(pool, &iter_arr->ts_iter);
609
610 *curchunk_threaded_shared = iter_arr->ts_iter.iter.curchunk;
611 iter_arr->ts_iter.curchunk_threaded_shared = curchunk_threaded_shared;
612 for (size_t i = 1; i < iter_num; i++) {
613 iter_arr[i].ts_iter = iter_arr[0].ts_iter;
614 *curchunk_threaded_shared = iter_arr[i].ts_iter.iter.curchunk =
615 ((*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next : nullptr);
616 }
617
618 return iter_arr;
619}
620
628
629#if 0
630/* unoptimized, more readable */
631
632static void *bli_mempool_iternext(BLI_mempool_iter *iter)
633{
634 void *ret = nullptr;
635
636 if (iter->curchunk == nullptr || !iter->pool->totused) {
637 return ret;
638 }
639
640 ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
641
642 iter->curindex++;
643
644 if (iter->curindex == iter->pool->pchunk) {
645 iter->curindex = 0;
646 iter->curchunk = iter->curchunk->next;
647 }
648
649 return ret;
650}
651
653{
655
656 do {
657 ret = bli_mempool_iternext(iter);
658 } while (ret && ret->freeword == FREEWORD);
659
660 return ret;
661}
662
663#else /* Optimized version of code above. */
664
666{
667 if (UNLIKELY(iter->curchunk == nullptr)) {
668 return nullptr;
669 }
670
671 const uint esize = iter->pool->esize;
672 BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
674 do {
675 ret = curnode;
676
678# ifdef WITH_MEM_VALGRIND
679 VALGRIND_MAKE_MEM_DEFINED(ret, iter->pool->esize - POISON_REDZONE_SIZE);
680# endif
681
682 if (++iter->curindex != iter->pool->pchunk) {
683 curnode = POINTER_OFFSET(curnode, esize);
684 }
685 else {
686 iter->curindex = 0;
687 iter->curchunk = iter->curchunk->next;
688 if (UNLIKELY(iter->curchunk == nullptr)) {
690# ifdef WITH_MEM_VALGRIND
691 VALGRIND_MAKE_MEM_DEFINED(ret, iter->pool->esize - POISON_REDZONE_SIZE);
692# endif
693 void *ret2 = (ret->freeword == FREEWORD) ? nullptr : ret;
694
695 if (ret->freeword == FREEWORD) {
696 BLI_asan_poison(ret, iter->pool->esize);
697# ifdef WITH_MEM_VALGRIND
698 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
699# endif
700 }
701
702 return ret2;
703 }
704 curnode = CHUNK_DATA(iter->curchunk);
705 }
706 } while (ret->freeword == FREEWORD);
707
708 return ret;
709}
710
712{
713 BLI_mempool_iter *iter = &ts_iter->iter;
714 if (UNLIKELY(iter->curchunk == nullptr)) {
715 return nullptr;
716 }
717
718 mempool_asan_lock(iter->pool);
719
720 const uint esize = iter->pool->esize;
721 BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
723 do {
724 ret = curnode;
725
727# ifdef WITH_MEM_VALGRIND
728 VALGRIND_MAKE_MEM_DEFINED(ret, iter->pool->esize);
729# endif
730
731 if (++iter->curindex != iter->pool->pchunk) {
732 curnode = POINTER_OFFSET(curnode, esize);
733 }
734 else {
735 iter->curindex = 0;
736
737 /* Begin unique to the `threadsafe` version of this function. */
738 for (iter->curchunk = *ts_iter->curchunk_threaded_shared;
739 (iter->curchunk != nullptr) &&
740 (atomic_cas_ptr((void **)ts_iter->curchunk_threaded_shared,
741 iter->curchunk,
742 iter->curchunk->next) != iter->curchunk);
743 iter->curchunk = *ts_iter->curchunk_threaded_shared)
744 {
745 /* pass. */
746 }
747 if (UNLIKELY(iter->curchunk == nullptr)) {
748 if (ret->freeword == FREEWORD) {
749 BLI_asan_poison(ret, esize);
750# ifdef WITH_MEM_VALGRIND
751 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
752# endif
754 return nullptr;
755 }
757 return ret;
758 }
759 /* End `threadsafe` exception. */
760
761 iter->curchunk = iter->curchunk->next;
762 if (UNLIKELY(iter->curchunk == nullptr)) {
763 if (ret->freeword == FREEWORD) {
764 BLI_asan_poison(ret, iter->pool->esize);
765# ifdef WITH_MEM_VALGRIND
766 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
767# endif
769 return nullptr;
770 }
772 return ret;
773 }
774
775 curnode = CHUNK_DATA(iter->curchunk);
776 }
777
778 if (ret->freeword == FREEWORD) {
779 BLI_asan_poison(ret, iter->pool->esize);
780# ifdef WITH_MEM_VALGRIND
781 VALGRIND_MAKE_MEM_UNDEFINED(ret, iter->pool->esize);
782# endif
783 }
784 else {
785 break;
786 }
787 } while (true);
788
790 return ret;
791}
792
793#endif
794
795void BLI_mempool_clear_ex(BLI_mempool *pool, const int elem_num_reserve)
796{
797 BLI_mempool_chunk *mpchunk;
798 BLI_mempool_chunk *mpchunk_next;
799 uint maxchunks;
800
801 BLI_mempool_chunk *chunks_temp;
802 BLI_freenode *last_tail = nullptr;
803
804#ifdef WITH_MEM_VALGRIND
806 VALGRIND_CREATE_MEMPOOL(pool, 0, false);
807#endif
808
809 if (elem_num_reserve == -1) {
810 maxchunks = pool->maxchunks;
811 }
812 else {
813 maxchunks = mempool_maxchunks(uint(elem_num_reserve), pool->pchunk);
814 }
815
816 /* Free all after 'pool->maxchunks'. */
817 mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
818 if (mpchunk && mpchunk->next) {
819 /* terminate */
820 mpchunk_next = mpchunk->next;
821 mpchunk->next = nullptr;
822 mpchunk = mpchunk_next;
823
824 do {
825 mpchunk_next = mpchunk->next;
826 mempool_chunk_free(mpchunk, pool);
827 } while ((mpchunk = mpchunk_next));
828 }
829
830 /* re-initialize */
831 pool->free = nullptr;
832 pool->totused = 0;
833 chunks_temp = pool->chunks;
834 pool->chunks = nullptr;
835 pool->chunk_tail = nullptr;
836
837 while ((mpchunk = chunks_temp)) {
838 chunks_temp = mpchunk->next;
839 last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
840 }
841}
842
844{
845 BLI_mempool_clear_ex(pool, -1);
846}
847
849{
850 mempool_chunk_free_all(pool->chunks, pool);
851
852#ifdef WITH_MEM_VALGRIND
854#endif
855
856 MEM_freeN(pool);
857}
858
859#ifndef NDEBUG
864#endif
#define BLI_asan_unpoison(addr, size)
Definition BLI_asan.h:36
#define BLI_asan_poison(addr, size)
Definition BLI_asan.h:31
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
#define BLI_INLINE
MINLINE unsigned int power_of_2_max_u(unsigned int x)
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)
#define VALGRIND_DESTROY_MEMPOOL(pool)
#define MEMPOOL_ELEM_SIZE_MIN
void BLI_mempool_clear_ex(BLI_mempool *pool, const int elem_num_reserve)
#define NODE_STEP_PREV(node)
static bool mempool_debug_memset
ParallelMempoolTaskData * mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t iter_num)
#define USEDWORD
void * BLI_mempool_findelem(BLI_mempool *pool, uint index)
#define CHUNK_DATA(chunk)
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
static void mempool_asan_lock(BLI_mempool *pool)
void mempool_iter_threadsafe_destroy(ParallelMempoolTaskData *iter_arr)
void * BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
void * BLI_mempool_alloc(BLI_mempool *pool)
int BLI_mempool_len(const BLI_mempool *pool)
void BLI_mempool_free(BLI_mempool *pool, void *addr)
void BLI_mempool_clear(BLI_mempool *pool)
static BLI_mempool_chunk * mempool_chunk_alloc(const BLI_mempool *pool)
static void mempool_threadsafe_iternew(BLI_mempool *pool, BLI_mempool_threadsafe_iter *ts_iter)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
#define FREEWORD
static void mempool_asan_unlock(BLI_mempool *pool)
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk, BLI_mempool *pool)
void * mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
void * BLI_mempool_calloc(BLI_mempool *pool)
void BLI_mempool_destroy(BLI_mempool *pool)
void * BLI_mempool_iterstep(BLI_mempool_iter *iter)
void BLI_mempool_set_memory_debug()
#define POISON_REDZONE_SIZE
static BLI_freenode * mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpchunk, BLI_freenode *last_tail)
BLI_mempool * BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint flag)
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk, BLI_mempool *pool)
#define CHUNK_OVERHEAD
#define NODE_STEP_NEXT(node)
BLI_INLINE BLI_mempool_chunk * mempool_chunk_find(BLI_mempool_chunk *head, uint index)
BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
@ BLI_MEMPOOL_ALLOW_ITER
unsigned int uint
void BLI_mutex_init(ThreadMutex *mutex)
Definition threads.cc:340
void BLI_mutex_lock(ThreadMutex *mutex)
Definition threads.cc:345
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition threads.cc:350
pthread_mutex_t ThreadMutex
Definition BLI_threads.h:79
#define UNUSED_VARS(...)
#define ARRAY_HAS_ITEM(arr_item, arr_start, arr_len)
#define UNLIKELY(x)
#define POINTER_OFFSET(v, ofs)
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE void * atomic_cas_ptr(void **v, void *old, void *_new)
BMesh const char void * data
ThreadMutex mutex
#define MEM_SIZE_OVERHEAD
void * MEM_mallocN(size_t len, const char *str)
Definition mallocn.cc:128
void * MEM_calloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:123
void * MEM_callocN(size_t len, const char *str)
Definition mallocn.cc:118
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
void MEM_freeN(void *vmemh)
Definition mallocn.cc:113
static ulong * next
return ret
BLI_freenode * next
intptr_t freeword
BLI_mempool_chunk * next
struct BLI_mempool_chunk * curchunk
Definition BLI_mempool.h:96
BLI_mempool * pool
Definition BLI_mempool.h:95
unsigned int curindex
Definition BLI_mempool.h:97
struct BLI_mempool_chunk ** curchunk_threaded_shared
BLI_mempool_chunk * chunks
BLI_mempool_chunk * chunk_tail
BLI_freenode * free
BLI_mempool_threadsafe_iter ts_iter
i
Definition text_draw.cc:230
uint8_t flag
Definition wm_window.cc:139