Blender  V2.93
COM_ExecutionGroup.cc
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2011, Blender Foundation.
17  */
18 
19 #include <algorithm>
20 #include <cmath>
21 #include <cstdlib>
22 #include <sstream>
23 
24 #include "atomic_ops.h"
25 
26 #include "COM_ChunkOrder.h"
27 #include "COM_Debug.h"
28 #include "COM_ExecutionGroup.h"
29 #include "COM_ExecutionSystem.h"
31 #include "COM_ViewerOperation.h"
32 #include "COM_WorkScheduler.h"
34 #include "COM_defines.h"
35 
36 #include "BLI_math.h"
37 #include "BLI_rand.hh"
38 #include "BLI_string.h"
39 
40 #include "BLT_translation.h"
41 
42 #include "MEM_guardedalloc.h"
43 
44 #include "PIL_time.h"
45 
46 #include "WM_api.h"
47 #include "WM_types.h"
48 
49 namespace blender::compositor {
50 
51 std::ostream &operator<<(std::ostream &os, const ExecutionGroupFlags &flags)
52 {
53  if (flags.initialized) {
54  os << "init,";
55  }
56  if (flags.is_output) {
57  os << "output,";
58  }
59  if (flags.complex) {
60  os << "complex,";
61  }
62  if (flags.open_cl) {
63  os << "open_cl,";
64  }
65  if (flags.single_threaded) {
66  os << "single_threaded,";
67  }
68  return os;
69 }
70 
72 {
73  m_id = id;
74  this->m_bTree = nullptr;
75  this->m_height = 0;
76  this->m_width = 0;
77  this->m_max_read_buffer_offset = 0;
78  this->m_x_chunks_len = 0;
79  this->m_y_chunks_len = 0;
80  this->m_chunks_len = 0;
81  this->m_chunks_finished = 0;
82  BLI_rcti_init(&this->m_viewerBorder, 0, 0, 0, 0);
83  this->m_executionStartTime = 0;
84 }
85 
86 std::ostream &operator<<(std::ostream &os, const ExecutionGroup &execution_group)
87 {
88  os << "ExecutionGroup(id=" << execution_group.get_id();
89  os << ",flags={" << execution_group.get_flags() << "}";
90  os << ",operation=" << *execution_group.getOutputOperation() << "";
91  os << ")";
92  return os;
93 }
94 
96 {
97  return this->getOutputOperation()->getRenderPriority();
98 }
99 
100 bool ExecutionGroup::can_contain(NodeOperation &operation)
101 {
102  if (!m_flags.initialized) {
103  return true;
104  }
105 
106  if (operation.get_flags().is_read_buffer_operation) {
107  return true;
108  }
109  if (operation.get_flags().is_write_buffer_operation) {
110  return false;
111  }
112  if (operation.get_flags().is_set_operation) {
113  return true;
114  }
115 
116  /* complex groups don't allow further ops (except read buffer and values, see above) */
117  if (m_flags.complex) {
118  return false;
119  }
120  /* complex ops can't be added to other groups (except their own, which they initialize, see
121  * above) */
122  if (operation.get_flags().complex) {
123  return false;
124  }
125 
126  return true;
127 }
128 
130 {
131  if (!can_contain(*operation)) {
132  return false;
133  }
134 
135  if (!operation->get_flags().is_read_buffer_operation &&
136  !operation->get_flags().is_write_buffer_operation) {
137  m_flags.complex = operation->get_flags().complex;
138  m_flags.open_cl = operation->get_flags().open_cl;
139  m_flags.single_threaded = operation->get_flags().single_threaded;
140  m_flags.initialized = true;
141  }
142 
143  m_operations.append(operation);
144 
145  return true;
146 }
147 
149 {
150  return this
151  ->m_operations[0]; /* the first operation of the group is always the output operation. */
152 }
153 
154 void ExecutionGroup::init_work_packages()
155 {
156  m_work_packages.clear();
157  if (this->m_chunks_len != 0) {
158  m_work_packages.resize(this->m_chunks_len);
159  for (unsigned int index = 0; index < m_chunks_len; index++) {
160  m_work_packages[index].state = eWorkPackageState::NotScheduled;
161  m_work_packages[index].execution_group = this;
162  m_work_packages[index].chunk_number = index;
163  determineChunkRect(&m_work_packages[index].rect, index);
164  }
165  }
166 }
167 
168 void ExecutionGroup::init_read_buffer_operations()
169 {
170  unsigned int max_offset = 0;
171  for (NodeOperation *operation : m_operations) {
172  if (operation->get_flags().is_read_buffer_operation) {
173  ReadBufferOperation *readOperation = static_cast<ReadBufferOperation *>(operation);
174  this->m_read_operations.append(readOperation);
175  max_offset = MAX2(max_offset, readOperation->getOffset());
176  }
177  }
178  max_offset++;
179  this->m_max_read_buffer_offset = max_offset;
180 }
181 
183 {
184  init_number_of_chunks();
185  init_work_packages();
186  init_read_buffer_operations();
187 }
188 
190 {
191  m_work_packages.clear();
192  this->m_chunks_len = 0;
193  this->m_x_chunks_len = 0;
194  this->m_y_chunks_len = 0;
195  this->m_read_operations.clear();
196  this->m_bTree = nullptr;
197 }
198 
199 void ExecutionGroup::determineResolution(unsigned int resolution[2])
200 {
201  NodeOperation *operation = this->getOutputOperation();
202  resolution[0] = operation->getWidth();
203  resolution[1] = operation->getHeight();
204  this->setResolution(resolution);
205  BLI_rcti_init(&this->m_viewerBorder, 0, this->m_width, 0, this->m_height);
206 }
207 
208 void ExecutionGroup::init_number_of_chunks()
209 {
210  if (this->m_flags.single_threaded) {
211  this->m_x_chunks_len = 1;
212  this->m_y_chunks_len = 1;
213  this->m_chunks_len = 1;
214  }
215  else {
216  const float chunkSizef = this->m_chunkSize;
217  const int border_width = BLI_rcti_size_x(&this->m_viewerBorder);
218  const int border_height = BLI_rcti_size_y(&this->m_viewerBorder);
219  this->m_x_chunks_len = ceil(border_width / chunkSizef);
220  this->m_y_chunks_len = ceil(border_height / chunkSizef);
221  this->m_chunks_len = this->m_x_chunks_len * this->m_y_chunks_len;
222  }
223 }
224 
225 blender::Array<unsigned int> ExecutionGroup::get_execution_order() const
226 {
227  blender::Array<unsigned int> chunk_order(m_chunks_len);
228  for (int chunk_index = 0; chunk_index < this->m_chunks_len; chunk_index++) {
229  chunk_order[chunk_index] = chunk_index;
230  }
231 
232  NodeOperation *operation = this->getOutputOperation();
233  float centerX = 0.5f;
234  float centerY = 0.5f;
236 
237  if (operation->get_flags().is_viewer_operation) {
238  ViewerOperation *viewer = (ViewerOperation *)operation;
239  centerX = viewer->getCenterX();
240  centerY = viewer->getCenterY();
241  order_type = viewer->getChunkOrder();
242  }
243 
244  const int border_width = BLI_rcti_size_x(&this->m_viewerBorder);
245  const int border_height = BLI_rcti_size_y(&this->m_viewerBorder);
246  int index;
247  switch (order_type) {
248  case ChunkOrdering::Random: {
250  blender::MutableSpan<unsigned int> span = chunk_order.as_mutable_span();
251  /* Shuffle twice to make it more random. */
252  rng.shuffle(span);
253  rng.shuffle(span);
254  break;
255  }
257  ChunkOrderHotspot hotspot(border_width * centerX, border_height * centerY, 0.0f);
258  blender::Array<ChunkOrder> chunk_orders(m_chunks_len);
259  for (index = 0; index < this->m_chunks_len; index++) {
260  const WorkPackage &work_package = m_work_packages[index];
261  chunk_orders[index].index = index;
262  chunk_orders[index].x = work_package.rect.xmin - this->m_viewerBorder.xmin;
263  chunk_orders[index].y = work_package.rect.ymin - this->m_viewerBorder.ymin;
264  chunk_orders[index].update_distance(&hotspot, 1);
265  }
266 
267  std::sort(&chunk_orders[0], &chunk_orders[this->m_chunks_len - 1]);
268  for (index = 0; index < this->m_chunks_len; index++) {
269  chunk_order[index] = chunk_orders[index].index;
270  }
271 
272  break;
273  }
275  unsigned int tx = border_width / 6;
276  unsigned int ty = border_height / 6;
277  unsigned int mx = border_width / 2;
278  unsigned int my = border_height / 2;
279  unsigned int bx = mx + 2 * tx;
280  unsigned int by = my + 2 * ty;
281  float addition = this->m_chunks_len / COM_RULE_OF_THIRDS_DIVIDER;
282 
283  ChunkOrderHotspot hotspots[9]{
284  ChunkOrderHotspot(mx, my, addition * 0),
285  ChunkOrderHotspot(tx, my, addition * 1),
286  ChunkOrderHotspot(bx, my, addition * 2),
287  ChunkOrderHotspot(bx, by, addition * 3),
288  ChunkOrderHotspot(tx, ty, addition * 4),
289  ChunkOrderHotspot(bx, ty, addition * 5),
290  ChunkOrderHotspot(tx, by, addition * 6),
291  ChunkOrderHotspot(mx, ty, addition * 7),
292  ChunkOrderHotspot(mx, by, addition * 8),
293  };
294 
295  blender::Array<ChunkOrder> chunk_orders(m_chunks_len);
296  for (index = 0; index < this->m_chunks_len; index++) {
297  const WorkPackage &work_package = m_work_packages[index];
298  chunk_orders[index].index = index;
299  chunk_orders[index].x = work_package.rect.xmin - this->m_viewerBorder.xmin;
300  chunk_orders[index].y = work_package.rect.ymin - this->m_viewerBorder.ymin;
301  chunk_orders[index].update_distance(hotspots, 9);
302  }
303 
304  std::sort(&chunk_orders[0], &chunk_orders[this->m_chunks_len]);
305 
306  for (index = 0; index < this->m_chunks_len; index++) {
307  chunk_order[index] = chunk_orders[index].index;
308  }
309 
310  break;
311  }
313  default:
314  break;
315  }
316  return chunk_order;
317 }
318 
324 {
325  const CompositorContext &context = graph->getContext();
326  const bNodeTree *bTree = context.getbNodeTree();
327  if (this->m_width == 0 || this->m_height == 0) {
328  return;
329  }
330  if (bTree->test_break && bTree->test_break(bTree->tbh)) {
331  return;
332  }
333  if (this->m_chunks_len == 0) {
334  return;
335  }
336  unsigned int chunk_index;
337 
338  this->m_executionStartTime = PIL_check_seconds_timer();
339 
340  this->m_chunks_finished = 0;
341  this->m_bTree = bTree;
342 
343  blender::Array<unsigned int> chunk_order = get_execution_order();
344 
347 
348  bool breaked = false;
349  bool finished = false;
350  unsigned int startIndex = 0;
351  const int maxNumberEvaluated = BLI_system_thread_count() * 2;
352 
353  while (!finished && !breaked) {
354  bool startEvaluated = false;
355  finished = true;
356  int numberEvaluated = 0;
357 
358  for (int index = startIndex;
359  index < this->m_chunks_len && numberEvaluated < maxNumberEvaluated;
360  index++) {
361  chunk_index = chunk_order[index];
362  int yChunk = chunk_index / this->m_x_chunks_len;
363  int xChunk = chunk_index - (yChunk * this->m_x_chunks_len);
364  const WorkPackage &work_package = m_work_packages[chunk_index];
365  switch (work_package.state) {
367  scheduleChunkWhenPossible(graph, xChunk, yChunk);
368  finished = false;
369  startEvaluated = true;
370  numberEvaluated++;
371 
372  if (bTree->update_draw) {
373  bTree->update_draw(bTree->udh);
374  }
375  break;
376  }
378  finished = false;
379  startEvaluated = true;
380  numberEvaluated++;
381  break;
382  }
384  if (!startEvaluated) {
385  startIndex = index + 1;
386  }
387  }
388  };
389  }
390 
392 
393  if (bTree->test_break && bTree->test_break(bTree->tbh)) {
394  breaked = true;
395  }
396  }
399 }
400 
402 {
403  WorkPackage &work_package = m_work_packages[chunkNumber];
404 
405  MemoryBuffer **memoryBuffers = (MemoryBuffer **)MEM_callocN(
406  sizeof(MemoryBuffer *) * this->m_max_read_buffer_offset, __func__);
407  rcti output;
408  for (ReadBufferOperation *readOperation : m_read_operations) {
409  MemoryProxy *memoryProxy = readOperation->getMemoryProxy();
410  this->determineDependingAreaOfInterest(&work_package.rect, readOperation, &output);
411  MemoryBuffer *memoryBuffer = memoryProxy->getExecutor()->constructConsolidatedMemoryBuffer(
412  *memoryProxy, output);
413  memoryBuffers[readOperation->getOffset()] = memoryBuffer;
414  }
415  return memoryBuffers;
416 }
417 
419  rcti &rect)
420 {
421  MemoryBuffer *imageBuffer = memoryProxy.getBuffer();
422  MemoryBuffer *result = new MemoryBuffer(&memoryProxy, rect, MemoryBufferState::Temporary);
423  result->fill_from(*imageBuffer);
424  return result;
425 }
426 
427 void ExecutionGroup::finalizeChunkExecution(int chunkNumber, MemoryBuffer **memoryBuffers)
428 {
429  WorkPackage &work_package = m_work_packages[chunkNumber];
430  if (work_package.state == eWorkPackageState::Scheduled) {
431  work_package.state = eWorkPackageState::Executed;
432  }
433 
434  atomic_add_and_fetch_u(&this->m_chunks_finished, 1);
435  if (memoryBuffers) {
436  for (unsigned int index = 0; index < this->m_max_read_buffer_offset; index++) {
437  MemoryBuffer *buffer = memoryBuffers[index];
438  if (buffer) {
439  if (buffer->isTemporarily()) {
440  memoryBuffers[index] = nullptr;
441  delete buffer;
442  }
443  }
444  }
445  MEM_freeN(memoryBuffers);
446  }
447  if (this->m_bTree) {
448  // status report is only performed for top level Execution Groups.
449  float progress = this->m_chunks_finished;
450  progress /= this->m_chunks_len;
451  this->m_bTree->progress(this->m_bTree->prh, progress);
452 
453  char buf[128];
454  BLI_snprintf(buf,
455  sizeof(buf),
456  TIP_("Compositing | Tile %u-%u"),
457  this->m_chunks_finished,
458  this->m_chunks_len);
459  this->m_bTree->stats_draw(this->m_bTree->sdh, buf);
460  }
461 }
462 
463 inline void ExecutionGroup::determineChunkRect(rcti *r_rect,
464  const unsigned int xChunk,
465  const unsigned int yChunk) const
466 {
467  const int border_width = BLI_rcti_size_x(&this->m_viewerBorder);
468  const int border_height = BLI_rcti_size_y(&this->m_viewerBorder);
469 
470  if (this->m_flags.single_threaded) {
472  r_rect, this->m_viewerBorder.xmin, border_width, this->m_viewerBorder.ymin, border_height);
473  }
474  else {
475  const unsigned int minx = xChunk * this->m_chunkSize + this->m_viewerBorder.xmin;
476  const unsigned int miny = yChunk * this->m_chunkSize + this->m_viewerBorder.ymin;
477  const unsigned int width = MIN2((unsigned int)this->m_viewerBorder.xmax, this->m_width);
478  const unsigned int height = MIN2((unsigned int)this->m_viewerBorder.ymax, this->m_height);
479  BLI_rcti_init(r_rect,
480  MIN2(minx, this->m_width),
481  MIN2(minx + this->m_chunkSize, width),
482  MIN2(miny, this->m_height),
483  MIN2(miny + this->m_chunkSize, height));
484  }
485 }
486 
487 void ExecutionGroup::determineChunkRect(rcti *r_rect, const unsigned int chunkNumber) const
488 {
489  const unsigned int yChunk = chunkNumber / this->m_x_chunks_len;
490  const unsigned int xChunk = chunkNumber - (yChunk * this->m_x_chunks_len);
491  determineChunkRect(r_rect, xChunk, yChunk);
492 }
493 
495 {
496  // we assume that this method is only called from complex execution groups.
497  NodeOperation *operation = this->getOutputOperation();
498  if (operation->get_flags().is_write_buffer_operation) {
499  WriteBufferOperation *writeOperation = (WriteBufferOperation *)operation;
501  writeOperation->getMemoryProxy(), rect, MemoryBufferState::Temporary);
502  return buffer;
503  }
504  return nullptr;
505 }
506 
507 bool ExecutionGroup::scheduleAreaWhenPossible(ExecutionSystem *graph, rcti *area)
508 {
509  if (this->m_flags.single_threaded) {
510  return scheduleChunkWhenPossible(graph, 0, 0);
511  }
512  // find all chunks inside the rect
513  // determine minxchunk, minychunk, maxxchunk, maxychunk where x and y are chunknumbers
514 
515  int indexx, indexy;
516  int minx = max_ii(area->xmin - m_viewerBorder.xmin, 0);
517  int maxx = min_ii(area->xmax - m_viewerBorder.xmin, m_viewerBorder.xmax - m_viewerBorder.xmin);
518  int miny = max_ii(area->ymin - m_viewerBorder.ymin, 0);
519  int maxy = min_ii(area->ymax - m_viewerBorder.ymin, m_viewerBorder.ymax - m_viewerBorder.ymin);
520  int minxchunk = minx / (int)m_chunkSize;
521  int maxxchunk = (maxx + (int)m_chunkSize - 1) / (int)m_chunkSize;
522  int minychunk = miny / (int)m_chunkSize;
523  int maxychunk = (maxy + (int)m_chunkSize - 1) / (int)m_chunkSize;
524  minxchunk = max_ii(minxchunk, 0);
525  minychunk = max_ii(minychunk, 0);
526  maxxchunk = min_ii(maxxchunk, (int)m_x_chunks_len);
527  maxychunk = min_ii(maxychunk, (int)m_y_chunks_len);
528 
529  bool result = true;
530  for (indexx = minxchunk; indexx < maxxchunk; indexx++) {
531  for (indexy = minychunk; indexy < maxychunk; indexy++) {
532  if (!scheduleChunkWhenPossible(graph, indexx, indexy)) {
533  result = false;
534  }
535  }
536  }
537 
538  return result;
539 }
540 
541 bool ExecutionGroup::scheduleChunk(unsigned int chunkNumber)
542 {
543  WorkPackage &work_package = m_work_packages[chunkNumber];
544  if (work_package.state == eWorkPackageState::NotScheduled) {
545  work_package.state = eWorkPackageState::Scheduled;
546  WorkScheduler::schedule(&work_package);
547  return true;
548  }
549  return false;
550 }
551 
552 bool ExecutionGroup::scheduleChunkWhenPossible(ExecutionSystem *graph,
553  const int chunk_x,
554  const int chunk_y)
555 {
556  if (chunk_x < 0 || chunk_x >= (int)this->m_x_chunks_len) {
557  return true;
558  }
559  if (chunk_y < 0 || chunk_y >= (int)this->m_y_chunks_len) {
560  return true;
561  }
562 
563  // Check if chunk is already executed or scheduled and not yet executed.
564  const int chunk_index = chunk_y * this->m_x_chunks_len + chunk_x;
565  WorkPackage &work_package = m_work_packages[chunk_index];
566  if (work_package.state == eWorkPackageState::Executed) {
567  return true;
568  }
569  if (work_package.state == eWorkPackageState::Scheduled) {
570  return false;
571  }
572 
573  bool can_be_executed = true;
574  rcti area;
575 
576  for (ReadBufferOperation *read_operation : m_read_operations) {
577  BLI_rcti_init(&area, 0, 0, 0, 0);
578  MemoryProxy *memory_proxy = read_operation->getMemoryProxy();
579  determineDependingAreaOfInterest(&work_package.rect, read_operation, &area);
580  ExecutionGroup *group = memory_proxy->getExecutor();
581 
582  if (!group->scheduleAreaWhenPossible(graph, &area)) {
583  can_be_executed = false;
584  }
585  }
586 
587  if (can_be_executed) {
588  scheduleChunk(chunk_index);
589  }
590 
591  return false;
592 }
593 
594 void ExecutionGroup::determineDependingAreaOfInterest(rcti *input,
595  ReadBufferOperation *readOperation,
596  rcti *output)
597 {
598  this->getOutputOperation()->determineDependingAreaOfInterest(input, readOperation, output);
599 }
600 
601 void ExecutionGroup::setViewerBorder(float xmin, float xmax, float ymin, float ymax)
602 {
603  const NodeOperation &operation = *this->getOutputOperation();
604  if (operation.get_flags().use_viewer_border) {
605  BLI_rcti_init(&this->m_viewerBorder,
606  xmin * this->m_width,
607  xmax * this->m_width,
608  ymin * this->m_height,
609  ymax * this->m_height);
610  }
611 }
612 
613 void ExecutionGroup::setRenderBorder(float xmin, float xmax, float ymin, float ymax)
614 {
615  const NodeOperation &operation = *this->getOutputOperation();
616  if (operation.isOutputOperation(true) && operation.get_flags().use_render_border) {
617  BLI_rcti_init(&this->m_viewerBorder,
618  xmin * this->m_width,
619  xmax * this->m_width,
620  ymin * this->m_height,
621  ymax * this->m_height);
622  }
623 }
624 
625 } // namespace blender::compositor
MINLINE int min_ii(int a, int b)
MINLINE int max_ii(int a, int b)
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition: BLI_rect.h:157
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition: rct.c:446
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition: BLI_rect.h:153
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
int BLI_system_thread_count(void)
Definition: threads.cc:309
#define MAX2(a, b)
#define MIN2(a, b)
#define TIP_(msgid)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
Read Guarded memory(de)allocation.
Platform independent time functions.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x)
void sort(btMatrix3x3 &U, btVector3 &sigma, btMatrix3x3 &V, int t)
Helper function of 3X3 SVD for sorting singular values.
#define output
void shuffle(MutableSpan< T > values)
Definition: BLI_rand.hh:89
Overall context of the compositor.
static void execution_group_started(const ExecutionGroup *group)
Definition: COM_Debug.cc:490
static void graphviz(const ExecutionSystem *system)
Definition: COM_Debug.cc:496
static void execution_group_finished(const ExecutionGroup *group)
Definition: COM_Debug.cc:493
Class ExecutionGroup is a group of Operations that are executed as one. This grouping is used to comb...
eCompositorPriority getRenderPriority()
get the Render priority of this ExecutionGroup
void execute(ExecutionSystem *graph)
schedule an ExecutionGroup
void setViewerBorder(float xmin, float xmax, float ymin, float ymax)
set border for viewer operation
MemoryBuffer * constructConsolidatedMemoryBuffer(MemoryProxy &memoryProxy, rcti &rect)
compose multiple chunks into a single chunk
MemoryBuffer * allocateOutputBuffer(rcti &rect)
allocate the outputbuffer of a chunk
const ExecutionGroupFlags get_flags() const
MemoryBuffer ** getInputBuffersOpenCL(int chunkNumber)
get all inputbuffers needed to calculate an chunk
void deinitExecution()
deinitExecution is called just after execution the whole graph.
NodeOperation * getOutputOperation() const
get the output operation of this ExecutionGroup
bool addOperation(NodeOperation *operation)
add an operation to this ExecutionGroup
void initExecution()
initExecution is called just before the execution of the whole graph will be done.
void setResolution(unsigned int resolution[2])
set the resolution of this executiongroup
void setRenderBorder(float xmin, float xmax, float ymin, float ymax)
void finalizeChunkExecution(int chunkNumber, MemoryBuffer **memoryBuffers)
after a chunk is executed the needed resources can be freed or unlocked.
void determineResolution(unsigned int resolution[2])
determine the resolution of this ExecutionGroup
the ExecutionSystem contains the whole compositor tree.
a MemoryBuffer contains access to the data of a chunk
A MemoryProxy is a unique identifier for a memory buffer. A single MemoryProxy is used among all chun...
ExecutionGroup * getExecutor() const
get the ExecutionGroup that can be scheduled to calculate a certain chunk.
MemoryBuffer * getBuffer()
get the allocated memory
NodeOperation contains calculation logic.
virtual eCompositorPriority getRenderPriority() const
get the render priority of this node.
const NodeOperationFlags get_flags() const
virtual bool isOutputOperation(bool) const
isOutputOperation determines whether this operation is an output of the ExecutionSystem during render...
virtual bool determineDependingAreaOfInterest(rcti *input, ReadBufferOperation *readOperation, rcti *output)
Depsgraph * graph
eCompositorPriority
Possible priority settings.
Definition: COM_Enums.h:45
ChunkOrdering
The order of chunks to be scheduled.
Definition: COM_defines.h:65
@ NotScheduled
chunk is not yet scheduled
@ Scheduled
chunk is scheduled, but not yet executed
@ RuleOfThirds
experimental ordering with 9 hot-spots.
@ CenterOut
order from a distance to centerX/centerY
@ Temporary
chunk is consolidated from other chunks. special state.
__kernel void ccl_constant KernelData ccl_global void ccl_global char ccl_global int ccl_global char ccl_global unsigned int ccl_global float * buffer
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:41
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:45
static void area(int d1, int d2, int e1, int e2, float weights[2])
constexpr float COM_RULE_OF_THIRDS_DIVIDER
Definition: COM_defines.h:79
std::ostream & operator<<(std::ostream &os, const eCompositorPriority &priority)
Definition: COM_Enums.cc:23
void(* progress)(void *, float progress)
int(* test_break)(void *)
void(* update_draw)(void *)
void(* stats_draw)(void *, const char *str)
contains data about work that can be scheduled
static void schedule(WorkPackage *package)
schedule a chunk of a group to be calculated. An execution group schedules a chunk in the WorkSchedul...
static void finish()
wait for all work to be completed.
int ymin
Definition: DNA_vec_types.h:80
int ymax
Definition: DNA_vec_types.h:80
int xmin
Definition: DNA_vec_types.h:79
int xmax
Definition: DNA_vec_types.h:79
double PIL_check_seconds_timer(void)
Definition: time.c:80
ccl_device_inline float3 ceil(const float3 &a)