Blender  V2.93
threads.cc
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2006 Blender Foundation
17  * All rights reserved.
18  */
19 
24 #include <cerrno>
25 #include <cstdlib>
26 #include <cstring>
27 
28 #include "MEM_guardedalloc.h"
29 
30 #include "BLI_gsqueue.h"
31 #include "BLI_listbase.h"
32 #include "BLI_system.h"
33 #include "BLI_task.h"
34 #include "BLI_threads.h"
35 
36 #include "PIL_time.h"
37 
38 /* for checking system threads - BLI_system_thread_count */
39 #ifdef WIN32
40 # include <sys/timeb.h>
41 # include <windows.h>
42 #elif defined(__APPLE__)
43 # include <sys/sysctl.h>
44 # include <sys/types.h>
45 #else
46 # include <sys/time.h>
47 # include <unistd.h>
48 #endif
49 
50 #ifdef WITH_TBB
51 # include <tbb/spin_mutex.h>
52 #endif
53 
54 #include "atomic_ops.h"
55 #include "numaapi.h"
56 
57 #if defined(__APPLE__) && defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2) && \
58  !defined(__clang__)
59 # define USE_APPLE_OMP_FIX
60 #endif
61 
62 #ifdef USE_APPLE_OMP_FIX
63 /* ************** libgomp (Apple gcc 4.2.1) TLS bug workaround *************** */
64 extern pthread_key_t gomp_tls_key;
65 static void *thread_tls_data;
66 #endif
67 
118 static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
119 static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
120 static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
121 static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
122 static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
123 static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
124 static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
125 static pthread_mutex_t _fftw_lock = PTHREAD_MUTEX_INITIALIZER;
126 static pthread_mutex_t _view3d_lock = PTHREAD_MUTEX_INITIALIZER;
127 static pthread_t mainid;
128 static bool is_numa_available = false;
129 static unsigned int thread_levels = 0; /* threads can be invoked inside threads */
130 static int num_threads_override = 0;
131 
132 /* just a max for security reasons */
133 #define RE_MAX_THREAD BLENDER_MAX_THREADS
134 
135 struct ThreadSlot {
136  struct ThreadSlot *next, *prev;
137  void *(*do_thread)(void *);
138  void *callerdata;
139  pthread_t pthread;
140  int avail;
141 };
142 
144 {
145  mainid = pthread_self();
147  is_numa_available = true;
148  }
149 }
150 
152 {
153 }
154 
155 /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
156  * problem otherwise: scene render will kill of the mutex!
157  */
158 
159 void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int tot)
160 {
161  int a;
162 
163  if (threadbase != nullptr && tot > 0) {
164  BLI_listbase_clear(threadbase);
165 
166  if (tot > RE_MAX_THREAD) {
167  tot = RE_MAX_THREAD;
168  }
169  else if (tot < 1) {
170  tot = 1;
171  }
172 
173  for (a = 0; a < tot; a++) {
174  ThreadSlot *tslot = static_cast<ThreadSlot *>(MEM_callocN(sizeof(ThreadSlot), "threadslot"));
175  BLI_addtail(threadbase, tslot);
176  tslot->do_thread = do_thread;
177  tslot->avail = 1;
178  }
179  }
180 
181  unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1);
182  if (level == 0) {
183 #ifdef USE_APPLE_OMP_FIX
184  /* Workaround for Apple gcc 4.2.1 OMP vs background thread bug,
185  * we copy GOMP thread local storage pointer to setting it again
186  * inside the thread that we start. */
187  thread_tls_data = pthread_getspecific(gomp_tls_key);
188 #endif
189  }
190 }
191 
192 /* amount of available threads */
194 {
195  int counter = 0;
196 
197  LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
198  if (tslot->avail) {
199  counter++;
200  }
201  }
202 
203  return counter;
204 }
205 
206 /* returns thread number, for sample patterns or threadsafe tables */
208 {
209  int counter = 0;
210 
211  LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
212  if (tslot->avail) {
213  return counter;
214  }
215  ++counter;
216  }
217 
218  return 0;
219 }
220 
221 static void *tslot_thread_start(void *tslot_p)
222 {
223  ThreadSlot *tslot = (ThreadSlot *)tslot_p;
224 
225 #ifdef USE_APPLE_OMP_FIX
226  /* Workaround for Apple gcc 4.2.1 OMP vs background thread bug,
227  * set GOMP thread local storage pointer which was copied beforehand */
228  pthread_setspecific(gomp_tls_key, thread_tls_data);
229 #endif
230 
231  return tslot->do_thread(tslot->callerdata);
232 }
233 
235 {
236  return pthread_equal(pthread_self(), mainid);
237 }
238 
239 void BLI_threadpool_insert(ListBase *threadbase, void *callerdata)
240 {
241  LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
242  if (tslot->avail) {
243  tslot->avail = 0;
244  tslot->callerdata = callerdata;
245  pthread_create(&tslot->pthread, nullptr, tslot_thread_start, tslot);
246  return;
247  }
248  }
249  printf("ERROR: could not insert thread slot\n");
250 }
251 
252 void BLI_threadpool_remove(ListBase *threadbase, void *callerdata)
253 {
254  LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
255  if (tslot->callerdata == callerdata) {
256  pthread_join(tslot->pthread, nullptr);
257  tslot->callerdata = nullptr;
258  tslot->avail = 1;
259  }
260  }
261 }
262 
263 void BLI_threadpool_remove_index(ListBase *threadbase, int index)
264 {
265  int counter = 0;
266 
267  LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
268  if (counter == index && tslot->avail == 0) {
269  pthread_join(tslot->pthread, nullptr);
270  tslot->callerdata = nullptr;
271  tslot->avail = 1;
272  break;
273  }
274  ++counter;
275  }
276 }
277 
279 {
280  LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
281  if (tslot->avail == 0) {
282  pthread_join(tslot->pthread, nullptr);
283  tslot->callerdata = nullptr;
284  tslot->avail = 1;
285  }
286  }
287 }
288 
289 void BLI_threadpool_end(ListBase *threadbase)
290 {
291 
292  /* Only needed if there's actually some stuff to end
293  * this way we don't end up decrementing thread_levels on an empty `threadbase`. */
294  if (threadbase == nullptr || BLI_listbase_is_empty(threadbase)) {
295  return;
296  }
297 
298  LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) {
299  if (tslot->avail == 0) {
300  pthread_join(tslot->pthread, nullptr);
301  }
302  }
303  BLI_freelistN(threadbase);
304 }
305 
306 /* System Information */
307 
308 /* how many threads are native on this system? */
310 {
311  static int t = -1;
312 
313  if (num_threads_override != 0) {
314  return num_threads_override;
315  }
316  if (LIKELY(t != -1)) {
317  return t;
318  }
319 
320  {
321 #ifdef WIN32
322  SYSTEM_INFO info;
323  GetSystemInfo(&info);
324  t = (int)info.dwNumberOfProcessors;
325 #else
326 # ifdef __APPLE__
327  int mib[2];
328  size_t len;
329 
330  mib[0] = CTL_HW;
331  mib[1] = HW_NCPU;
332  len = sizeof(t);
333  sysctl(mib, 2, &t, &len, nullptr, 0);
334 # else
335  t = (int)sysconf(_SC_NPROCESSORS_ONLN);
336 # endif
337 #endif
338  }
339 
340  CLAMP(t, 1, RE_MAX_THREAD);
341 
342  return t;
343 }
344 
346 {
347  num_threads_override = num;
348 }
349 
351 {
352  return num_threads_override;
353 }
354 
355 /* Global Mutex Locks */
356 
358 {
359  switch (type) {
360  case LOCK_IMAGE:
361  return &_image_lock;
362  case LOCK_DRAW_IMAGE:
363  return &_image_draw_lock;
364  case LOCK_VIEWER:
365  return &_viewer_lock;
366  case LOCK_CUSTOM1:
367  return &_custom1_lock;
368  case LOCK_NODES:
369  return &_nodes_lock;
370  case LOCK_MOVIECLIP:
371  return &_movieclip_lock;
372  case LOCK_COLORMANAGE:
373  return &_colormanage_lock;
374  case LOCK_FFTW:
375  return &_fftw_lock;
376  case LOCK_VIEW3D:
377  return &_view3d_lock;
378  default:
379  BLI_assert(0);
380  return nullptr;
381  }
382 }
383 
385 {
386  pthread_mutex_lock(global_mutex_from_type(type));
387 }
388 
390 {
391  pthread_mutex_unlock(global_mutex_from_type(type));
392 }
393 
394 /* Mutex Locks */
395 
397 {
398  pthread_mutex_init(mutex, nullptr);
399 }
400 
402 {
403  pthread_mutex_lock(mutex);
404 }
405 
407 {
408  pthread_mutex_unlock(mutex);
409 }
410 
412 {
413  return (pthread_mutex_trylock(mutex) == 0);
414 }
415 
417 {
418  pthread_mutex_destroy(mutex);
419 }
420 
422 {
423  ThreadMutex *mutex = static_cast<ThreadMutex *>(MEM_callocN(sizeof(ThreadMutex), "ThreadMutex"));
425  return mutex;
426 }
427 
429 {
431  MEM_freeN(mutex);
432 }
433 
434 /* Spin Locks */
435 
436 #ifdef WITH_TBB
437 static tbb::spin_mutex *tbb_spin_mutex_cast(SpinLock *spin)
438 {
439  static_assert(sizeof(SpinLock) >= sizeof(tbb::spin_mutex),
440  "SpinLock must match tbb::spin_mutex");
441  static_assert(alignof(SpinLock) % alignof(tbb::spin_mutex) == 0,
442  "SpinLock must be aligned same as tbb::spin_mutex");
443  return reinterpret_cast<tbb::spin_mutex *>(spin);
444 }
445 #endif
446 
448 {
449 #ifdef WITH_TBB
450  tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
451  new (spin_mutex) tbb::spin_mutex();
452 #elif defined(__APPLE__)
454 #elif defined(_MSC_VER)
455  *spin = 0;
456 #else
457  pthread_spin_init(spin, 0);
458 #endif
459 }
460 
462 {
463 #ifdef WITH_TBB
464  tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
465  spin_mutex->lock();
466 #elif defined(__APPLE__)
468 #elif defined(_MSC_VER)
469  while (InterlockedExchangeAcquire(spin, 1)) {
470  while (*spin) {
471  /* Spin-lock hint for processors with hyper-threading. */
472  YieldProcessor();
473  }
474  }
475 #else
476  pthread_spin_lock(spin);
477 #endif
478 }
479 
481 {
482 #ifdef WITH_TBB
483  tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
484  spin_mutex->unlock();
485 #elif defined(__APPLE__)
487 #elif defined(_MSC_VER)
488  _ReadWriteBarrier();
489  *spin = 0;
490 #else
491  pthread_spin_unlock(spin);
492 #endif
493 }
494 
496 {
497 #ifdef WITH_TBB
498  tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin);
499  spin_mutex->~spin_mutex();
500 #elif defined(__APPLE__)
502 #elif defined(_MSC_VER)
503  /* Nothing to do, spin is a simple integer type. */
504 #else
505  pthread_spin_destroy(spin);
506 #endif
507 }
508 
509 /* Read/Write Mutex Lock */
510 
512 {
513  pthread_rwlock_init(mutex, nullptr);
514 }
515 
517 {
518  if (mode == THREAD_LOCK_READ) {
519  pthread_rwlock_rdlock(mutex);
520  }
521  else {
522  pthread_rwlock_wrlock(mutex);
523  }
524 }
525 
527 {
528  pthread_rwlock_unlock(mutex);
529 }
530 
532 {
533  pthread_rwlock_destroy(mutex);
534 }
535 
537 {
538  ThreadRWMutex *mutex = static_cast<ThreadRWMutex *>(
539  MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex"));
541  return mutex;
542 }
543 
545 {
547  MEM_freeN(mutex);
548 }
549 
550 /* Ticket Mutex Lock */
551 
552 struct TicketMutex {
553  pthread_cond_t cond;
554  pthread_mutex_t mutex;
555  unsigned int queue_head, queue_tail;
556 };
557 
559 {
560  TicketMutex *ticket = static_cast<TicketMutex *>(
561  MEM_callocN(sizeof(TicketMutex), "TicketMutex"));
562 
563  pthread_cond_init(&ticket->cond, nullptr);
564  pthread_mutex_init(&ticket->mutex, nullptr);
565 
566  return ticket;
567 }
568 
570 {
571  pthread_mutex_destroy(&ticket->mutex);
572  pthread_cond_destroy(&ticket->cond);
573  MEM_freeN(ticket);
574 }
575 
577 {
578  unsigned int queue_me;
579 
580  pthread_mutex_lock(&ticket->mutex);
581  queue_me = ticket->queue_tail++;
582 
583  while (queue_me != ticket->queue_head) {
584  pthread_cond_wait(&ticket->cond, &ticket->mutex);
585  }
586 
587  pthread_mutex_unlock(&ticket->mutex);
588 }
589 
591 {
592  pthread_mutex_lock(&ticket->mutex);
593  ticket->queue_head++;
594  pthread_cond_broadcast(&ticket->cond);
595  pthread_mutex_unlock(&ticket->mutex);
596 }
597 
598 /* ************************************************ */
599 
600 /* Condition */
601 
603 {
604  pthread_cond_init(cond, nullptr);
605 }
606 
608 {
609  pthread_cond_wait(cond, mutex);
610 }
611 
613 {
614  pthread_cond_wait(cond, global_mutex_from_type(type));
615 }
616 
618 {
619  pthread_cond_signal(cond);
620 }
621 
623 {
624  pthread_cond_broadcast(cond);
625 }
626 
628 {
629  pthread_cond_destroy(cond);
630 }
631 
632 /* ************************************************ */
633 
634 struct ThreadQueue {
636  pthread_mutex_t mutex;
637  pthread_cond_t push_cond;
638  pthread_cond_t finish_cond;
639  volatile int nowait;
640  volatile int canceled;
641 };
642 
644 {
646 
647  queue = static_cast<ThreadQueue *>(MEM_callocN(sizeof(ThreadQueue), "ThreadQueue"));
648  queue->queue = BLI_gsqueue_new(sizeof(void *));
649 
650  pthread_mutex_init(&queue->mutex, nullptr);
651  pthread_cond_init(&queue->push_cond, nullptr);
652  pthread_cond_init(&queue->finish_cond, nullptr);
653 
654  return queue;
655 }
656 
658 {
659  /* destroy everything, assumes no one is using queue anymore */
660  pthread_cond_destroy(&queue->finish_cond);
661  pthread_cond_destroy(&queue->push_cond);
662  pthread_mutex_destroy(&queue->mutex);
663 
665 
666  MEM_freeN(queue);
667 }
668 
670 {
671  pthread_mutex_lock(&queue->mutex);
672 
673  BLI_gsqueue_push(queue->queue, &work);
674 
675  /* signal threads waiting to pop */
676  pthread_cond_signal(&queue->push_cond);
677  pthread_mutex_unlock(&queue->mutex);
678 }
679 
681 {
682  void *work = nullptr;
683 
684  /* wait until there is work */
685  pthread_mutex_lock(&queue->mutex);
686  while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
687  pthread_cond_wait(&queue->push_cond, &queue->mutex);
688  }
689 
690  /* if we have something, pop it */
692  BLI_gsqueue_pop(queue->queue, &work);
693 
695  pthread_cond_broadcast(&queue->finish_cond);
696  }
697  }
698 
699  pthread_mutex_unlock(&queue->mutex);
700 
701  return work;
702 }
703 
704 static void wait_timeout(struct timespec *timeout, int ms)
705 {
706  ldiv_t div_result;
707  long sec, usec, x;
708 
709 #ifdef WIN32
710  {
711  struct _timeb now;
712  _ftime(&now);
713  sec = now.time;
714  usec = now.millitm * 1000; /* microsecond precision would be better */
715  }
716 #else
717  {
718  struct timeval now;
719  gettimeofday(&now, nullptr);
720  sec = now.tv_sec;
721  usec = now.tv_usec;
722  }
723 #endif
724 
725  /* add current time + millisecond offset */
726  div_result = ldiv(ms, 1000);
727  timeout->tv_sec = sec + div_result.quot;
728 
729  x = usec + (div_result.rem * 1000);
730 
731  if (x >= 1000000) {
732  timeout->tv_sec++;
733  x -= 1000000;
734  }
735 
736  timeout->tv_nsec = x * 1000;
737 }
738 
740 {
741  double t;
742  void *work = nullptr;
743  struct timespec timeout;
744 
746  wait_timeout(&timeout, ms);
747 
748  /* wait until there is work */
749  pthread_mutex_lock(&queue->mutex);
750  while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
751  if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT) {
752  break;
753  }
754  if (PIL_check_seconds_timer() - t >= ms * 0.001) {
755  break;
756  }
757  }
758 
759  /* if we have something, pop it */
761  BLI_gsqueue_pop(queue->queue, &work);
762 
764  pthread_cond_broadcast(&queue->finish_cond);
765  }
766  }
767 
768  pthread_mutex_unlock(&queue->mutex);
769 
770  return work;
771 }
772 
774 {
775  int size;
776 
777  pthread_mutex_lock(&queue->mutex);
779  pthread_mutex_unlock(&queue->mutex);
780 
781  return size;
782 }
783 
785 {
786  bool is_empty;
787 
788  pthread_mutex_lock(&queue->mutex);
789  is_empty = BLI_gsqueue_is_empty(queue->queue);
790  pthread_mutex_unlock(&queue->mutex);
791 
792  return is_empty;
793 }
794 
796 {
797  pthread_mutex_lock(&queue->mutex);
798 
799  queue->nowait = 1;
800 
801  /* signal threads waiting to pop */
802  pthread_cond_broadcast(&queue->push_cond);
803  pthread_mutex_unlock(&queue->mutex);
804 }
805 
807 {
808  /* wait for finish condition */
809  pthread_mutex_lock(&queue->mutex);
810 
811  while (!BLI_gsqueue_is_empty(queue->queue)) {
812  pthread_cond_wait(&queue->finish_cond, &queue->mutex);
813  }
814 
815  pthread_mutex_unlock(&queue->mutex);
816 }
817 
818 /* **** Special functions to help performance on crazy NUMA setups. **** */
819 
820 #if 0 /* UNUSED */
821 static bool check_is_threadripper2_alike_topology(void)
822 {
823  /* NOTE: We hope operating system does not support CPU hot-swap to
824  * a different brand. And that SMP of different types is also not
825  * encouraged by the system. */
826  static bool is_initialized = false;
827  static bool is_threadripper2 = false;
828  if (is_initialized) {
829  return is_threadripper2;
830  }
831  is_initialized = true;
832  char *cpu_brand = BLI_cpu_brand_string();
833  if (cpu_brand == nullptr) {
834  return false;
835  }
836  if (strstr(cpu_brand, "Threadripper")) {
837  /* NOTE: We consider all Thread-rippers having similar topology to
838  * the second one. This is because we are trying to utilize NUMA node
839  * 0 as much as possible. This node does exist on earlier versions of
840  * thread-ripper and setting affinity to it should not have negative
841  * effect.
842  * This allows us to avoid per-model check, making the code more
843  * reliable for the CPUs which are not yet released.
844  */
845  if (strstr(cpu_brand, "2990WX") || strstr(cpu_brand, "2950X")) {
846  is_threadripper2 = true;
847  }
848  }
849  /* NOTE: While all dies of EPYC has memory controller, only two f them
850  * has access to a lower-indexed DDR slots. Those dies are same as on
851  * Threadripper2 with the memory controller.
852  * Now, it is rather likely that reasonable amount of users don't max
853  * up their DR slots, making it only two dies connected to a DDR slot
854  * with actual memory in it. */
855  if (strstr(cpu_brand, "EPYC")) {
856  /* NOTE: Similarly to Thread-ripper we do not do model check. */
857  is_threadripper2 = true;
858  }
859  MEM_freeN(cpu_brand);
860  return is_threadripper2;
861 }
862 
863 static void threadripper_put_process_on_fast_node(void)
864 {
865  if (!is_numa_available) {
866  return;
867  }
868  /* NOTE: Technically, we can use NUMA nodes 0 and 2 and using both of
869  * them in the affinity mask will allow OS to schedule threads more
870  * flexible,possibly increasing overall performance when multiple apps
871  * are crunching numbers.
872  *
873  * However, if scene fits into memory adjacent to a single die we don't
874  * want OS to re-schedule the process to another die since that will make
875  * it further away from memory allocated for .blend file. */
876  /* NOTE: Even if NUMA is available in the API but is disabled in BIOS on
877  * this workstation we still process here. If NUMA is disabled it will be a
878  * single node, so our action is no-visible-changes, but allows to keep
879  * things simple and unified. */
881 }
882 
883 static void threadripper_put_thread_on_fast_node(void)
884 {
885  if (!is_numa_available) {
886  return;
887  }
888  /* NOTE: This is where things becomes more interesting. On the one hand
889  * we can use nodes 0 and 2 and allow operating system to do balancing
890  * of processes/threads for the maximum performance when multiple apps
891  * are running.
892  * On another hand, however, we probably want to use same node as the
893  * main thread since that's where the memory of .blend file is likely
894  * to be allocated.
895  * Since the main thread is currently on node 0, we also put thread on
896  * same node. */
897  /* See additional note about NUMA disabled in BIOS above. */
899 }
900 #endif /* UNUSED */
901 
903 {
904  /* Disabled for now since this causes only 16 threads to be used on a
905  * thread-ripper for computations like sculpting and fluid sim. The problem
906  * is that all threads created as children from this thread will inherit
907  * the NUMA node and so will end up on the same node. This can be fixed
908  * case-by-case by assigning the NUMA node for every child thread, however
909  * this is difficult for external libraries and OpenMP, and out of our
910  * control for plugins like external renderers. */
911 #if 0
912  if (check_is_threadripper2_alike_topology()) {
913  threadripper_put_process_on_fast_node();
914  }
915 #endif
916 }
917 
919 {
920  /* Disabled for now, see comment above. */
921 #if 0
922  if (check_is_threadripper2_alike_topology()) {
923  threadripper_put_thread_on_fast_node();
924  }
925 #endif
926 }
#define BLI_assert(a)
Definition: BLI_assert.h:58
void BLI_gsqueue_free(GSQueue *queue)
Definition: gsqueue.c:107
void BLI_gsqueue_push(GSQueue *queue, const void *item)
Definition: gsqueue.c:122
GSQueue * BLI_gsqueue_new(const size_t elem_size)
Definition: gsqueue.c:83
void BLI_gsqueue_pop(GSQueue *queue, void *r_item)
Definition: gsqueue.c:162
bool BLI_gsqueue_is_empty(const GSQueue *queue)
Definition: gsqueue.c:193
size_t BLI_gsqueue_len(const GSQueue *queue)
Definition: gsqueue.c:185
BLI_INLINE bool BLI_listbase_is_empty(const struct ListBase *lb)
Definition: BLI_listbase.h:124
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:172
BLI_INLINE void BLI_listbase_clear(struct ListBase *lb)
Definition: BLI_listbase.h:128
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:547
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:110
char * BLI_cpu_brand_string(void)
Definition: system.c:137
pthread_spinlock_t SpinLock
Definition: BLI_threads.h:111
pthread_rwlock_t ThreadRWMutex
Definition: BLI_threads.h:126
#define THREAD_LOCK_READ
Definition: BLI_threads.h:121
pthread_cond_t ThreadCondition
Definition: BLI_threads.h:151
@ LOCK_NODES
Definition: BLI_threads.h:71
@ LOCK_VIEW3D
Definition: BLI_threads.h:75
@ LOCK_DRAW_IMAGE
Definition: BLI_threads.h:68
@ LOCK_COLORMANAGE
Definition: BLI_threads.h:73
@ LOCK_MOVIECLIP
Definition: BLI_threads.h:72
@ LOCK_CUSTOM1
Definition: BLI_threads.h:70
@ LOCK_IMAGE
Definition: BLI_threads.h:67
@ LOCK_VIEWER
Definition: BLI_threads.h:69
@ LOCK_FFTW
Definition: BLI_threads.h:74
pthread_mutex_t ThreadMutex
Definition: BLI_threads.h:83
#define LIKELY(x)
bool is_initialized
ThreadMutex mutex
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
Read Guarded memory(de)allocation.
Group RGB to Bright Vector Camera CLAMP
Platform independent time functions.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE unsigned int atomic_fetch_and_add_u(unsigned int *p, unsigned int x)
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
static SpinLock spin
Definition: cachefile.c:152
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
static unsigned a[3]
Definition: RandGen.cpp:92
ThreadQueue * queue
all scheduled work for the cpu
bool numaAPI_RunProcessOnNode(int node)
Definition: numaapi_stub.c:65
NUMAAPI_Result numaAPI_Initialize(void)
Definition: numaapi_stub.c:34
@ NUMAAPI_SUCCESS
Definition: numaapi.h:37
bool numaAPI_RunThreadOnNode(int node)
Definition: numaapi_stub.c:70
volatile int nowait
Definition: threads.cc:639
volatile int canceled
Definition: threads.cc:640
GSQueue * queue
Definition: threads.cc:635
pthread_cond_t push_cond
Definition: threads.cc:637
pthread_cond_t finish_cond
Definition: threads.cc:638
pthread_mutex_t mutex
Definition: threads.cc:636
void * callerdata
Definition: threads.cc:138
struct ThreadSlot * next
Definition: threads.cc:136
pthread_t pthread
Definition: threads.cc:139
int avail
Definition: threads.cc:140
void *(* do_thread)(void *)
Definition: threads.cc:137
struct ThreadSlot * prev
Definition: threads.cc:136
unsigned int queue_tail
Definition: threads.cc:555
pthread_cond_t cond
Definition: threads.cc:553
unsigned int queue_head
Definition: threads.cc:555
pthread_mutex_t mutex
Definition: threads.cc:554
void BLI_condition_notify_all(ThreadCondition *cond)
Definition: threads.cc:622
static bool is_numa_available
Definition: threads.cc:128
static int num_threads_override
Definition: threads.cc:130
bool BLI_mutex_trylock(ThreadMutex *mutex)
Definition: threads.cc:411
void BLI_rw_mutex_end(ThreadRWMutex *mutex)
Definition: threads.cc:531
void BLI_thread_queue_push(ThreadQueue *queue, void *work)
Definition: threads.cc:669
void BLI_thread_unlock(int type)
Definition: threads.cc:389
void BLI_ticket_mutex_unlock(TicketMutex *ticket)
Definition: threads.cc:590
static pthread_mutex_t _image_lock
Definition: threads.cc:118
void BLI_mutex_end(ThreadMutex *mutex)
Definition: threads.cc:416
#define RE_MAX_THREAD
Definition: threads.cc:133
void BLI_mutex_free(ThreadMutex *mutex)
Definition: threads.cc:428
static pthread_mutex_t _custom1_lock
Definition: threads.cc:121
void BLI_threadpool_clear(ListBase *threadbase)
Definition: threads.cc:278
void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int tot)
Definition: threads.cc:159
void BLI_thread_lock(int type)
Definition: threads.cc:384
void BLI_threadapi_init(void)
Definition: threads.cc:143
void BLI_thread_put_process_on_fast_node(void)
Definition: threads.cc:902
void BLI_thread_put_thread_on_fast_node(void)
Definition: threads.cc:918
void BLI_threadpool_remove(ListBase *threadbase, void *callerdata)
Definition: threads.cc:252
static pthread_mutex_t _view3d_lock
Definition: threads.cc:126
void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex)
Definition: threads.cc:607
static unsigned int thread_levels
Definition: threads.cc:129
int BLI_threadpool_available_thread_index(ListBase *threadbase)
Definition: threads.cc:207
void * BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
Definition: threads.cc:739
void BLI_mutex_init(ThreadMutex *mutex)
Definition: threads.cc:396
static pthread_mutex_t _viewer_lock
Definition: threads.cc:120
void BLI_system_num_threads_override_set(int num)
Definition: threads.cc:345
void BLI_condition_end(ThreadCondition *cond)
Definition: threads.cc:627
int BLI_system_thread_count(void)
Definition: threads.cc:309
void BLI_thread_queue_free(ThreadQueue *queue)
Definition: threads.cc:657
int BLI_system_num_threads_override_get(void)
Definition: threads.cc:350
void BLI_threadapi_exit(void)
Definition: threads.cc:151
static pthread_mutex_t _colormanage_lock
Definition: threads.cc:124
static pthread_mutex_t _fftw_lock
Definition: threads.cc:125
void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
Definition: threads.cc:516
void BLI_ticket_mutex_lock(TicketMutex *ticket)
Definition: threads.cc:576
static void * tslot_thread_start(void *tslot_p)
Definition: threads.cc:221
void BLI_condition_notify_one(ThreadCondition *cond)
Definition: threads.cc:617
bool BLI_thread_queue_is_empty(ThreadQueue *queue)
Definition: threads.cc:784
void BLI_ticket_mutex_free(TicketMutex *ticket)
Definition: threads.cc:569
void BLI_condition_wait_global_mutex(ThreadCondition *cond, const int type)
Definition: threads.cc:612
ThreadMutex * BLI_mutex_alloc(void)
Definition: threads.cc:421
int BLI_thread_is_main(void)
Definition: threads.cc:234
static pthread_mutex_t _image_draw_lock
Definition: threads.cc:119
void BLI_threadpool_end(ListBase *threadbase)
Definition: threads.cc:289
void BLI_condition_init(ThreadCondition *cond)
Definition: threads.cc:602
static pthread_t mainid
Definition: threads.cc:127
void BLI_mutex_lock(ThreadMutex *mutex)
Definition: threads.cc:401
void BLI_thread_queue_nowait(ThreadQueue *queue)
Definition: threads.cc:795
void BLI_thread_queue_wait_finish(ThreadQueue *queue)
Definition: threads.cc:806
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition: threads.cc:406
void BLI_rw_mutex_init(ThreadRWMutex *mutex)
Definition: threads.cc:511
static pthread_mutex_t _nodes_lock
Definition: threads.cc:122
static void wait_timeout(struct timespec *timeout, int ms)
Definition: threads.cc:704
ThreadRWMutex * BLI_rw_mutex_alloc(void)
Definition: threads.cc:536
void BLI_threadpool_insert(ListBase *threadbase, void *callerdata)
Definition: threads.cc:239
static pthread_mutex_t _movieclip_lock
Definition: threads.cc:123
void BLI_spin_init(SpinLock *spin)
Definition: threads.cc:447
void BLI_spin_unlock(SpinLock *spin)
Definition: threads.cc:480
void BLI_threadpool_remove_index(ListBase *threadbase, int index)
Definition: threads.cc:263
int BLI_available_threads(ListBase *threadbase)
Definition: threads.cc:193
void * BLI_thread_queue_pop(ThreadQueue *queue)
Definition: threads.cc:680
void BLI_spin_lock(SpinLock *spin)
Definition: threads.cc:461
void BLI_rw_mutex_free(ThreadRWMutex *mutex)
Definition: threads.cc:544
ThreadQueue * BLI_thread_queue_init(void)
Definition: threads.cc:643
void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
Definition: threads.cc:526
void BLI_spin_end(SpinLock *spin)
Definition: threads.cc:495
TicketMutex * BLI_ticket_mutex_alloc(void)
Definition: threads.cc:558
int BLI_thread_queue_len(ThreadQueue *queue)
Definition: threads.cc:773
static ThreadMutex * global_mutex_from_type(const int type)
Definition: threads.cc:357
double PIL_check_seconds_timer(void)
Definition: time.c:80
uint len