45 #ifdef WITH_MEM_VALGRIND
46 # include "valgrind/memcheck.h"
52 # define MAKE_ID(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
53 # define MAKE_ID_8(a, b, c, d, e, f, g, h) \
54 ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
55 (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h))
58 # define MAKE_ID(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
59 # define MAKE_ID_8(a, b, c, d, e, f, g, h) \
60 ((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
61 (int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a))
71 ((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
72 MAKE_ID('e', 'f', 'f', 'e'))
77 #define USEDWORD MAKE_ID('u', 's', 'e', 'd')
83 #define USE_CHUNK_POW2
140 #define MEMPOOL_ELEM_SIZE_MIN (sizeof(void *) * 2)
142 #define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
144 #define NODE_STEP_NEXT(node) ((void *)((char *)(node) + esize))
145 #define NODE_STEP_PREV(node) ((void *)((char *)(node)-esize))
148 #define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
150 #ifdef USE_CHUNK_POW2
165 while (index-- && head) {
179 return (totelem <= pchunk) ? 1 : ((totelem / pchunk) + 1);
205 if (
pool->chunk_tail) {
206 pool->chunk_tail->next = mpchunk;
210 pool->chunks = mpchunk;
214 pool->chunk_tail = mpchunk;
217 pool->free = curnode;
226 curnode = curnode->
next;
232 curnode = curnode->
next;
262 for (; mpchunk; mpchunk = mpchunk_next) {
263 mpchunk_next = mpchunk->
next;
293 #ifdef USE_CHUNK_POW2
300 pool->csize = esize * pchunk;
303 #if defined(USE_CHUNK_POW2) && !defined(NDEBUG)
310 pool->pchunk = pchunk;
313 pool->maxchunks = maxchunks;
321 for (i = 0; i < maxchunks; i++) {
327 #ifdef WITH_MEM_VALGRIND
344 free_pop =
pool->free;
355 #ifdef WITH_MEM_VALGRIND
359 return (
void *)free_pop;
365 memset(retval, 0, (
size_t)
pool->esize);
382 for (chunk =
pool->chunks; chunk; chunk = chunk->
next) {
389 BLI_assert(!
"Attempt to free data which is not in pool.\n");
395 memset(addr, 255,
pool->esize);
408 pool->free = newhead;
412 #ifdef WITH_MEM_VALGRIND
413 VALGRIND_MEMPOOL_FREE(
pool, addr);
423 first =
pool->chunks;
426 pool->chunk_tail = first;
433 #ifdef WITH_MEM_VALGRIND
438 pool->free = curnode;
443 curnode = curnode->
next;
448 #ifdef WITH_MEM_VALGRIND
456 return (
int)
pool->totused;
463 if (index < pool->totused) {
514 char *elem, *p =
data;
518 memcpy(p, elem, (
size_t)esize);
570 *curchunk_threaded_shared = iter_arr->
curchunk;
573 for (
size_t i = 1; i < num_iter; i++) {
574 iter_arr[i] = iter_arr[0];
575 *curchunk_threaded_shared = iter_arr[i].
curchunk = ((*curchunk_threaded_shared) ?
576 (*curchunk_threaded_shared)->next :
632 ret = bli_mempool_iternext(iter);
703 #ifdef WITH_MEM_VALGRIND
708 if (totelem_reserve == -1) {
709 maxchunks =
pool->maxchunks;
717 if (mpchunk && mpchunk->
next) {
719 mpchunk_next = mpchunk->
next;
721 mpchunk = mpchunk_next;
724 mpchunk_next = mpchunk->
next;
726 }
while ((mpchunk = mpchunk_next));
736 chunks_temp =
pool->chunks;
740 while ((mpchunk = chunks_temp)) {
741 chunks_temp = mpchunk->
next;
761 #ifdef WITH_MEM_VALGRIND
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)
#define VALGRIND_DESTROY_MEMPOOL(pool)
#define MEMPOOL_ELEM_SIZE_MIN
#define NODE_STEP_PREV(node)
static bool mempool_debug_memset
#define CHUNK_DATA(chunk)
static BLI_freenode * mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpchunk, BLI_freenode *last_tail)
BLI_INLINE BLI_mempool_chunk * mempool_chunk_find(BLI_mempool_chunk *head, uint index)
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
int BLI_mempool_len(BLI_mempool *pool)
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
void * BLI_mempool_alloc(BLI_mempool *pool)
BLI_mempool_iter * BLI_mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter)
void BLI_mempool_free(BLI_mempool *pool, void *addr)
void BLI_mempool_clear(BLI_mempool *pool)
BLI_INLINE uint mempool_maxchunks(const uint totelem, const uint pchunk)
void BLI_mempool_set_memory_debug(void)
void * BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
void * BLI_mempool_calloc(BLI_mempool *pool)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
struct BLI_freenode BLI_freenode
void BLI_mempool_destroy(BLI_mempool *pool)
struct BLI_mempool_chunk BLI_mempool_chunk
BLI_mempool * BLI_mempool_create(uint esize, uint totelem, uint pchunk, uint flag)
void BLI_mempool_as_table(BLI_mempool *pool, void **data)
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
void * BLI_mempool_iterstep(BLI_mempool_iter *iter)
void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr)
#define NODE_STEP_NEXT(node)
void ** BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
void * BLI_mempool_findelem(BLI_mempool *pool, uint index)
static uint power_of_2_max_u(uint x)
static BLI_mempool_chunk * mempool_chunk_alloc(BLI_mempool *pool)
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
Strict compiler flags for areas of code we want to ensure don't do conversions without us knowing abo...
#define ARRAY_HAS_ITEM(arr_item, arr_start, arr_len)
#define POINTER_OFFSET(v, ofs)
Read Guarded memory(de)allocation.
#define MEM_SIZE_OVERHEAD
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE void * atomic_cas_ptr(void **v, void *old, void *_new)
void *(* MEM_malloc_arrayN)(size_t len, size_t size, const char *str)
void(* MEM_freeN)(void *vmemh)
void *(* MEM_mallocN)(size_t len, const char *str)
struct BLI_freenode * next
struct BLI_mempool_chunk * next
struct BLI_mempool_chunk * curchunk
struct BLI_mempool_chunk ** curchunk_threaded_shared
BLI_mempool_chunk * chunks
BLI_mempool_chunk * chunk_tail