38 #define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
39 #define MALLOCA_FREE(_mem, _size) \
40 if (((_mem) != NULL) && ((_size) > 8192)) { \
75 const int num_tasks_factor =
max_ii(1, num_tasks >> 3);
79 chunk_size = 32 * num_tasks_factor;
83 if (tot_items > 0 && tot_items <
max_ii(256, chunk_size * 2)) {
84 chunk_size = tot_items;
89 *r_chunk_size = chunk_size;
105 void *userdata_chunk)
111 void **current_chunk_items;
112 int *current_chunk_indices;
113 int current_chunk_size;
115 const size_t items_size =
sizeof(*current_chunk_items) * (size_t)
state->iter_shared.chunk_size;
116 const size_t indices_size =
sizeof(*current_chunk_indices) *
117 (size_t)
state->iter_shared.chunk_size;
119 current_chunk_items =
MALLOCA(items_size);
120 current_chunk_indices =
MALLOCA(indices_size);
121 current_chunk_size = 0;
123 for (
bool do_abort =
false; !do_abort;) {
124 if (
state->iter_shared.spin_lock !=
NULL) {
129 int index =
state->iter_shared.next_index;
130 void *item =
state->iter_shared.next_item;
134 for (i = 0; i <
state->iter_shared.chunk_size && !
state->iter_shared.is_finished; i++) {
135 current_chunk_indices[i] = index;
136 current_chunk_items[i] = item;
137 state->iter_func(
state->userdata, &tls, &item, &index, &
state->iter_shared.is_finished);
141 state->iter_shared.next_index = index;
142 state->iter_shared.next_item = item;
143 current_chunk_size = i;
145 do_abort =
state->iter_shared.is_finished;
147 if (
state->iter_shared.spin_lock !=
NULL) {
151 for (i = 0; i < current_chunk_size; ++i) {
152 state->func(
state->userdata, current_chunk_items[i], current_chunk_indices[i], &tls);
173 void *userdata_chunk_local =
NULL;
174 const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk !=
NULL);
175 if (use_userdata_chunk) {
176 userdata_chunk_local =
MALLOCA(userdata_chunk_size);
177 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
197 settings,
state->tot_items, num_threads, &
state->iter_shared.chunk_size);
204 const int chunk_size =
state->iter_shared.chunk_size;
205 const int tot_items =
state->tot_items;
206 const size_t num_tasks = tot_items >= 0 ?
207 (size_t)
min_ii(num_threads,
state->tot_items / chunk_size) :
211 if (num_tasks == 1) {
218 state->iter_shared.spin_lock = &spin_lock;
222 void *userdata_chunk_local =
NULL;
223 void *userdata_chunk_array =
NULL;
224 const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk !=
NULL);
228 if (use_userdata_chunk) {
229 userdata_chunk_array =
MALLOCA(userdata_chunk_size * num_tasks);
232 for (
size_t i = 0; i < num_tasks; i++) {
233 if (use_userdata_chunk) {
234 userdata_chunk_local = (
char *)userdata_chunk_array + (userdata_chunk_size * i);
235 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
245 for (
size_t i = 0; i < num_tasks; i++) {
246 userdata_chunk_local = (
char *)userdata_chunk_array + (userdata_chunk_size * i);
248 settings->
func_reduce(
state->userdata, userdata_chunk, userdata_chunk_local);
254 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
279 const int init_index,
286 state.tot_items = tot_items;
287 state.iter_shared.next_index = init_index;
288 state.iter_shared.next_item = init_item;
289 state.iter_shared.is_finished =
false;
290 state.userdata = userdata;
291 state.iter_func = iter_func;
304 Link *link = *r_next_item;
309 *r_next_item = link->
next;
336 state.iter_shared.next_index = 0;
337 state.iter_shared.next_item = listbase->
first;
338 state.iter_shared.is_finished =
false;
339 state.userdata = userdata;
380 const bool use_threading)
384 int i, num_threads, num_tasks;
390 if (!use_threading) {
396 func(userdata, item);
408 num_tasks = num_threads + 2;
410 state.userdata = userdata;
416 for (i = 0; i < num_tasks; i++) {
BLI_INLINE bool BLI_listbase_is_empty(const struct ListBase *lb)
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
MINLINE int min_ii(int a, int b)
MINLINE int max_ii(int a, int b)
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter) ATTR_NONNULL()
BLI_mempool_iter * BLI_mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
void * BLI_mempool_iterstep(BLI_mempool_iter *iter) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL()
int BLI_mempool_len(BLI_mempool *pool) ATTR_NONNULL(1)
void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr) ATTR_NONNULL()
int BLI_task_scheduler_num_threads(void)
struct MempoolIterData MempoolIterData
void * BLI_task_pool_user_data(TaskPool *pool)
void BLI_task_pool_work_and_wait(TaskPool *pool)
void(* TaskParallelIteratorIterFunc)(void *__restrict userdata, const TaskParallelTLS *__restrict tls, void **r_next_item, int *r_next_index, bool *r_do_abort)
void(* TaskParallelMempoolFunc)(void *userdata, MempoolIterData *iter)
TaskPool * BLI_task_pool_create(void *userdata, TaskPriority priority)
void(* TaskParallelIteratorFunc)(void *__restrict userdata, void *item, int index, const TaskParallelTLS *__restrict tls)
void BLI_task_pool_free(TaskPool *pool)
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
pthread_spinlock_t SpinLock
void BLI_spin_init(SpinLock *spin)
void BLI_spin_unlock(SpinLock *spin)
void BLI_spin_lock(SpinLock *spin)
void BLI_spin_end(SpinLock *spin)
These structs are the foundation for all linked lists in the library system.
Read Guarded memory(de)allocation.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
TaskParallelMempoolFunc func
TaskParallelIteratorFunc func
TaskParallelIteratorStateShared iter_shared
TaskParallelIteratorIterFunc iter_func
TaskParallelReduceFunc func_reduce
TaskParallelFreeFunc func_free
size_t userdata_chunk_size
static void task_parallel_listbase_get(void *__restrict UNUSED(userdata), const TaskParallelTLS *__restrict UNUSED(tls), void **r_next_item, int *r_next_index, bool *r_do_abort)
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings, const int tot_items, int num_tasks, int *r_chunk_size)
#define MALLOCA_FREE(_mem, _size)
static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk)
static void task_parallel_iterator_do(const TaskParallelSettings *settings, TaskParallelIteratorState *state)
static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
void BLI_task_parallel_listbase(ListBase *listbase, void *userdata, TaskParallelIteratorFunc func, const TaskParallelSettings *settings)
struct ParallelMempoolState ParallelMempoolState
static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings, TaskParallelIteratorState *state)
void BLI_task_parallel_iterator(void *userdata, TaskParallelIteratorIterFunc iter_func, void *init_item, const int init_index, const int tot_items, TaskParallelIteratorFunc func, const TaskParallelSettings *settings)
struct TaskParallelIteratorState TaskParallelIteratorState
void BLI_task_parallel_mempool(BLI_mempool *mempool, void *userdata, TaskParallelMempoolFunc func, const bool use_threading)
static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state, void *userdata_chunk)