Blender  V2.93
util_ssei.h
Go to the documentation of this file.
1 /*
2  * Copyright 2011-2013 Intel Corporation
3  * Modifications Copyright 2014, Blender Foundation.
4  *
5  * Licensed under the Apache License, Version 2.0(the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #ifndef __UTIL_SSEI_H__
19 #define __UTIL_SSEI_H__
20 
22 
23 #ifdef __KERNEL_SSE2__
24 
25 struct sseb;
26 struct ssef;
27 
29 struct ssei {
30  typedef sseb Mask; // mask type
31  typedef ssei Int; // int type
32  typedef ssef Float; // float type
33 
34  enum { size = 4 }; // number of SIMD elements
35  union {
36  __m128i m128;
37  int32_t i[4];
38  }; // data
39 
43 
44  __forceinline ssei()
45  {
46  }
47  __forceinline ssei(const ssei &a)
48  {
49  m128 = a.m128;
50  }
51  __forceinline ssei &operator=(const ssei &a)
52  {
53  m128 = a.m128;
54  return *this;
55  }
56 
57  __forceinline ssei(const __m128i a) : m128(a)
58  {
59  }
60  __forceinline operator const __m128i &(void)const
61  {
62  return m128;
63  }
64  __forceinline operator __m128i &(void)
65  {
66  return m128;
67  }
68 
69  __forceinline ssei(const int a) : m128(_mm_set1_epi32(a))
70  {
71  }
72  __forceinline ssei(int a, int b, int c, int d) : m128(_mm_setr_epi32(a, b, c, d))
73  {
74  }
75 
76  __forceinline explicit ssei(const __m128 a) : m128(_mm_cvtps_epi32(a))
77  {
78  }
79 
83 
84  __forceinline const int32_t &operator[](const size_t index) const
85  {
86  assert(index < 4);
87  return i[index];
88  }
89  __forceinline int32_t &operator[](const size_t index)
90  {
91  assert(index < 4);
92  return i[index];
93  }
94 };
95 
99 
100 __forceinline const ssei cast(const __m128 &a)
101 {
102  return _mm_castps_si128(a);
103 }
104 __forceinline const ssei operator+(const ssei &a)
105 {
106  return a;
107 }
108 __forceinline const ssei operator-(const ssei &a)
109 {
110  return _mm_sub_epi32(_mm_setzero_si128(), a.m128);
111 }
112 # if defined(__KERNEL_SSSE3__)
113 __forceinline const ssei abs(const ssei &a)
114 {
115  return _mm_abs_epi32(a.m128);
116 }
117 # endif
118 
122 
123 __forceinline const ssei operator+(const ssei &a, const ssei &b)
124 {
125  return _mm_add_epi32(a.m128, b.m128);
126 }
127 __forceinline const ssei operator+(const ssei &a, const int32_t &b)
128 {
129  return a + ssei(b);
130 }
131 __forceinline const ssei operator+(const int32_t &a, const ssei &b)
132 {
133  return ssei(a) + b;
134 }
135 
136 __forceinline const ssei operator-(const ssei &a, const ssei &b)
137 {
138  return _mm_sub_epi32(a.m128, b.m128);
139 }
140 __forceinline const ssei operator-(const ssei &a, const int32_t &b)
141 {
142  return a - ssei(b);
143 }
144 __forceinline const ssei operator-(const int32_t &a, const ssei &b)
145 {
146  return ssei(a) - b;
147 }
148 
149 # if defined(__KERNEL_SSE41__)
150 __forceinline const ssei operator*(const ssei &a, const ssei &b)
151 {
152  return _mm_mullo_epi32(a.m128, b.m128);
153 }
154 __forceinline const ssei operator*(const ssei &a, const int32_t &b)
155 {
156  return a * ssei(b);
157 }
158 __forceinline const ssei operator*(const int32_t &a, const ssei &b)
159 {
160  return ssei(a) * b;
161 }
162 # endif
163 
164 __forceinline const ssei operator&(const ssei &a, const ssei &b)
165 {
166  return _mm_and_si128(a.m128, b.m128);
167 }
168 __forceinline const ssei operator&(const ssei &a, const int32_t &b)
169 {
170  return a & ssei(b);
171 }
172 __forceinline const ssei operator&(const int32_t &a, const ssei &b)
173 {
174  return ssei(a) & b;
175 }
176 
177 __forceinline const ssei operator|(const ssei &a, const ssei &b)
178 {
179  return _mm_or_si128(a.m128, b.m128);
180 }
181 __forceinline const ssei operator|(const ssei &a, const int32_t &b)
182 {
183  return a | ssei(b);
184 }
185 __forceinline const ssei operator|(const int32_t &a, const ssei &b)
186 {
187  return ssei(a) | b;
188 }
189 
190 __forceinline const ssei operator^(const ssei &a, const ssei &b)
191 {
192  return _mm_xor_si128(a.m128, b.m128);
193 }
194 __forceinline const ssei operator^(const ssei &a, const int32_t &b)
195 {
196  return a ^ ssei(b);
197 }
198 __forceinline const ssei operator^(const int32_t &a, const ssei &b)
199 {
200  return ssei(a) ^ b;
201 }
202 
203 __forceinline const ssei operator<<(const ssei &a, const int32_t &n)
204 {
205  return _mm_slli_epi32(a.m128, n);
206 }
207 __forceinline const ssei operator>>(const ssei &a, const int32_t &n)
208 {
209  return _mm_srai_epi32(a.m128, n);
210 }
211 
212 __forceinline const ssei andnot(const ssei &a, const ssei &b)
213 {
214  return _mm_andnot_si128(a.m128, b.m128);
215 }
216 __forceinline const ssei andnot(const sseb &a, const ssei &b)
217 {
218  return _mm_andnot_si128(cast(a.m128), b.m128);
219 }
220 __forceinline const ssei andnot(const ssei &a, const sseb &b)
221 {
222  return _mm_andnot_si128(a.m128, cast(b.m128));
223 }
224 
225 __forceinline const ssei sra(const ssei &a, const int32_t &b)
226 {
227  return _mm_srai_epi32(a.m128, b);
228 }
229 __forceinline const ssei srl(const ssei &a, const int32_t &b)
230 {
231  return _mm_srli_epi32(a.m128, b);
232 }
233 
234 # if defined(__KERNEL_SSE41__)
235 __forceinline const ssei min(const ssei &a, const ssei &b)
236 {
237  return _mm_min_epi32(a.m128, b.m128);
238 }
239 __forceinline const ssei min(const ssei &a, const int32_t &b)
240 {
241  return min(a, ssei(b));
242 }
243 __forceinline const ssei min(const int32_t &a, const ssei &b)
244 {
245  return min(ssei(a), b);
246 }
247 
248 __forceinline const ssei max(const ssei &a, const ssei &b)
249 {
250  return _mm_max_epi32(a.m128, b.m128);
251 }
252 __forceinline const ssei max(const ssei &a, const int32_t &b)
253 {
254  return max(a, ssei(b));
255 }
256 __forceinline const ssei max(const int32_t &a, const ssei &b)
257 {
258  return max(ssei(a), b);
259 }
260 # endif
261 
265 
266 __forceinline ssei &operator+=(ssei &a, const ssei &b)
267 {
268  return a = a + b;
269 }
270 __forceinline ssei &operator+=(ssei &a, const int32_t &b)
271 {
272  return a = a + b;
273 }
274 
275 __forceinline ssei &operator-=(ssei &a, const ssei &b)
276 {
277  return a = a - b;
278 }
279 __forceinline ssei &operator-=(ssei &a, const int32_t &b)
280 {
281  return a = a - b;
282 }
283 
284 # if defined(__KERNEL_SSE41__)
285 __forceinline ssei &operator*=(ssei &a, const ssei &b)
286 {
287  return a = a * b;
288 }
289 __forceinline ssei &operator*=(ssei &a, const int32_t &b)
290 {
291  return a = a * b;
292 }
293 # endif
294 
295 __forceinline ssei &operator&=(ssei &a, const ssei &b)
296 {
297  return a = a & b;
298 }
299 __forceinline ssei &operator&=(ssei &a, const int32_t &b)
300 {
301  return a = a & b;
302 }
303 
304 __forceinline ssei &operator|=(ssei &a, const ssei &b)
305 {
306  return a = a | b;
307 }
308 __forceinline ssei &operator|=(ssei &a, const int32_t &b)
309 {
310  return a = a | b;
311 }
312 
313 __forceinline ssei &operator^=(ssei &a, const ssei &b)
314 {
315  return a = a ^ b;
316 }
317 __forceinline ssei &operator^=(ssei &a, const int32_t &b)
318 {
319  return a = a ^ b;
320 }
321 
322 __forceinline ssei &operator<<=(ssei &a, const int32_t &b)
323 {
324  return a = a << b;
325 }
326 __forceinline ssei &operator>>=(ssei &a, const int32_t &b)
327 {
328  return a = a >> b;
329 }
330 
334 
335 __forceinline const sseb operator==(const ssei &a, const ssei &b)
336 {
337  return _mm_castsi128_ps(_mm_cmpeq_epi32(a.m128, b.m128));
338 }
339 __forceinline const sseb operator==(const ssei &a, const int32_t &b)
340 {
341  return a == ssei(b);
342 }
343 __forceinline const sseb operator==(const int32_t &a, const ssei &b)
344 {
345  return ssei(a) == b;
346 }
347 
348 __forceinline const sseb operator!=(const ssei &a, const ssei &b)
349 {
350  return !(a == b);
351 }
352 __forceinline const sseb operator!=(const ssei &a, const int32_t &b)
353 {
354  return a != ssei(b);
355 }
356 __forceinline const sseb operator!=(const int32_t &a, const ssei &b)
357 {
358  return ssei(a) != b;
359 }
360 
361 __forceinline const sseb operator<(const ssei &a, const ssei &b)
362 {
363  return _mm_castsi128_ps(_mm_cmplt_epi32(a.m128, b.m128));
364 }
365 __forceinline const sseb operator<(const ssei &a, const int32_t &b)
366 {
367  return a < ssei(b);
368 }
369 __forceinline const sseb operator<(const int32_t &a, const ssei &b)
370 {
371  return ssei(a) < b;
372 }
373 
374 __forceinline const sseb operator>=(const ssei &a, const ssei &b)
375 {
376  return !(a < b);
377 }
378 __forceinline const sseb operator>=(const ssei &a, const int32_t &b)
379 {
380  return a >= ssei(b);
381 }
382 __forceinline const sseb operator>=(const int32_t &a, const ssei &b)
383 {
384  return ssei(a) >= b;
385 }
386 
387 __forceinline const sseb operator>(const ssei &a, const ssei &b)
388 {
389  return _mm_castsi128_ps(_mm_cmpgt_epi32(a.m128, b.m128));
390 }
391 __forceinline const sseb operator>(const ssei &a, const int32_t &b)
392 {
393  return a > ssei(b);
394 }
395 __forceinline const sseb operator>(const int32_t &a, const ssei &b)
396 {
397  return ssei(a) > b;
398 }
399 
400 __forceinline const sseb operator<=(const ssei &a, const ssei &b)
401 {
402  return !(a > b);
403 }
404 __forceinline const sseb operator<=(const ssei &a, const int32_t &b)
405 {
406  return a <= ssei(b);
407 }
408 __forceinline const sseb operator<=(const int32_t &a, const ssei &b)
409 {
410  return ssei(a) <= b;
411 }
412 
413 __forceinline const ssei select(const sseb &m, const ssei &t, const ssei &f)
414 {
415 # ifdef __KERNEL_SSE41__
416  return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
417 # else
418  return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
419 # endif
420 }
421 
422 __forceinline const ssei select(const int mask, const ssei &t, const ssei &f)
423 {
424 # if defined(__KERNEL_SSE41__) && \
425  ((!defined(__clang__) && !defined(_MSC_VER)) || defined(__INTEL_COMPILER))
426  return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask));
427 # else
428  return select(sseb(mask), t, f);
429 # endif
430 }
431 
433 // Movement/Shifting/Shuffling Functions
435 
436 __forceinline ssei unpacklo(const ssei &a, const ssei &b)
437 {
438  return _mm_unpacklo_epi32(a, b);
439 }
440 __forceinline ssei unpackhi(const ssei &a, const ssei &b)
441 {
442  return _mm_unpackhi_epi32(a, b);
443 }
444 
445 template<size_t i0, size_t i1, size_t i2, size_t i3>
446 __forceinline const ssei shuffle(const ssei &a)
447 {
448 # ifdef __KERNEL_NEON__
449  return shuffle_neon<ssei, i0, i1, i2, i3>(a);
450 # else
451  return _mm_shuffle_epi32(a, _MM_SHUFFLE(i3, i2, i1, i0));
452 # endif
453 }
454 
455 template<size_t i0, size_t i1, size_t i2, size_t i3>
456 __forceinline const ssei shuffle(const ssei &a, const ssei &b)
457 {
458 # ifdef __KERNEL_NEON__
459  return shuffle_neon<ssei, i0, i1, i2, i3>(a, b);
460 # else
461  return _mm_castps_si128(
462  _mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
463 # endif
464 }
465 
466 template<size_t i0> __forceinline const ssei shuffle(const ssei &b)
467 {
468  return shuffle<i0, i0, i0, i0>(b);
469 }
470 
471 # if defined(__KERNEL_SSE41__)
472 template<size_t src> __forceinline int extract(const ssei &b)
473 {
474  return _mm_extract_epi32(b, src);
475 }
476 template<size_t dst> __forceinline const ssei insert(const ssei &a, const int32_t b)
477 {
478  return _mm_insert_epi32(a, b, dst);
479 }
480 # else
481 template<size_t src> __forceinline int extract(const ssei &b)
482 {
483  return b[src];
484 }
485 template<size_t dst> __forceinline const ssei insert(const ssei &a, const int32_t b)
486 {
487  ssei c = a;
488  c[dst] = b;
489  return c;
490 }
491 # endif
492 
496 
497 # if defined(__KERNEL_SSE41__)
498 __forceinline const ssei vreduce_min(const ssei &v)
499 {
500  ssei h = min(shuffle<1, 0, 3, 2>(v), v);
501  return min(shuffle<2, 3, 0, 1>(h), h);
502 }
503 __forceinline const ssei vreduce_max(const ssei &v)
504 {
505  ssei h = max(shuffle<1, 0, 3, 2>(v), v);
506  return max(shuffle<2, 3, 0, 1>(h), h);
507 }
508 __forceinline const ssei vreduce_add(const ssei &v)
509 {
510  ssei h = shuffle<1, 0, 3, 2>(v) + v;
511  return shuffle<2, 3, 0, 1>(h) + h;
512 }
513 
514 __forceinline int reduce_min(const ssei &v)
515 {
516 # ifdef __KERNEL_NEON__
517  return vminvq_s32(v);
518 # else
519  return extract<0>(vreduce_min(v));
520 # endif
521 }
522 __forceinline int reduce_max(const ssei &v)
523 {
524 # ifdef __KERNEL_NEON__
525  return vmaxvq_s32(v);
526 # else
527  return extract<0>(vreduce_max(v));
528 # endif
529 }
530 __forceinline int reduce_add(const ssei &v)
531 {
532 # ifdef __KERNEL_NEON__
533  return vaddvq_s32(v);
534 # else
535  return extract<0>(vreduce_add(v));
536 # endif
537 }
538 
539 __forceinline uint32_t select_min(const ssei &v)
540 {
541  return __bsf(movemask(v == vreduce_min(v)));
542 }
543 __forceinline uint32_t select_max(const ssei &v)
544 {
545  return __bsf(movemask(v == vreduce_max(v)));
546 }
547 
548 __forceinline uint32_t select_min(const sseb &valid, const ssei &v)
549 {
550  const ssei a = select(valid, v, ssei((int)pos_inf));
551  return __bsf(movemask(valid & (a == vreduce_min(a))));
552 }
553 __forceinline uint32_t select_max(const sseb &valid, const ssei &v)
554 {
555  const ssei a = select(valid, v, ssei((int)neg_inf));
556  return __bsf(movemask(valid & (a == vreduce_max(a))));
557 }
558 
559 # else
560 
561 __forceinline int ssei_min(int a, int b)
562 {
563  return (a < b) ? a : b;
564 }
565 __forceinline int ssei_max(int a, int b)
566 {
567  return (a > b) ? a : b;
568 }
569 __forceinline int reduce_min(const ssei &v)
570 {
571  return ssei_min(ssei_min(v[0], v[1]), ssei_min(v[2], v[3]));
572 }
573 __forceinline int reduce_max(const ssei &v)
574 {
575  return ssei_max(ssei_max(v[0], v[1]), ssei_max(v[2], v[3]));
576 }
577 __forceinline int reduce_add(const ssei &v)
578 {
579  return v[0] + v[1] + v[2] + v[3];
580 }
581 
582 # endif
583 
587 
588 __forceinline ssei load4i(const void *const a)
589 {
590  return _mm_load_si128((__m128i *)a);
591 }
592 
593 __forceinline void store4i(void *ptr, const ssei &v)
594 {
595  _mm_store_si128((__m128i *)ptr, v);
596 }
597 
598 __forceinline void storeu4i(void *ptr, const ssei &v)
599 {
600  _mm_storeu_si128((__m128i *)ptr, v);
601 }
602 
603 __forceinline void store4i(const sseb &mask, void *ptr, const ssei &i)
604 {
605 # if defined(__KERNEL_AVX__)
606  _mm_maskstore_ps((float *)ptr, (__m128i)mask, _mm_castsi128_ps(i));
607 # else
608  *(ssei *)ptr = select(mask, i, *(ssei *)ptr);
609 # endif
610 }
611 
612 __forceinline ssei load4i_nt(void *ptr)
613 {
614 # if defined(__KERNEL_SSE41__)
615  return _mm_stream_load_si128((__m128i *)ptr);
616 # else
617  return _mm_load_si128((__m128i *)ptr);
618 # endif
619 }
620 
621 __forceinline void store4i_nt(void *ptr, const ssei &v)
622 {
623 # if defined(__KERNEL_SSE41__)
624  _mm_stream_ps((float *)ptr, _mm_castsi128_ps(v));
625 # else
626  _mm_store_si128((__m128i *)ptr, v);
627 # endif
628 }
629 
633 
634 ccl_device_inline void print_ssei(const char *label, const ssei &a)
635 {
636  printf("%s: %df %df %df %d\n", label, a[0], a[1], a[2], a[3]);
637 }
638 
639 #endif
640 
642 
643 #endif
void BLI_kdtree_nd_() insert(KDTree *tree, int index, const float co[KD_DIMS]) ATTR_NONNULL(1
struct Mask Mask
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint i1
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
Group RGB to Bright Vector Camera Vector Combine Material Light Line Style Layer Add Ambient Diffuse Glossy Refraction Transparent Toon Principled Hair Volume Principled Light Particle Volume Image Sky Noise Wave Voronoi Brick Texture Vector Combine Vertex Separate Vector White RGB Map Separate Set Z Dilate Combine Combine Color Channel Split ID Combine Luminance Directional Alpha Distance Hue Movie Ellipse Bokeh View Corner Anti Mix RGB Hue Separate TEX_NODE_PROC TEX_NODE_PROC TEX_NODE_PROC TEX_NODE_PROC TEX_NODE_PROC Boolean Random Float
ATTR_WARN_UNUSED_RESULT const BMVert * v
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
btGeneric6DofConstraint & operator=(btGeneric6DofConstraint &other)
SIMD_FORCE_INLINE btVector3 & operator[](int i)
Get a mutable reference to a row of the matrix as a vector.
Definition: btMatrix3x3.h:157
__forceinline float extract(const int4 &b)
Definition: bvh_binning.cpp:45
const char * label
CCL_NAMESPACE_BEGIN PackFlags operator|=(PackFlags &pack_flags, uint32_t value)
Definition: geometry.cpp:49
std::ostream & operator<<(std::ostream &stream, const GeometrySet &geometry_set)
bool operator==(const GeometrySet &UNUSED(a), const GeometrySet &UNUSED(b))
static void shuffle(float2 points[], int size, int rng_seed)
Definition: jitter.cpp:243
#define ccl_device_inline
#define CCL_NAMESPACE_END
U * cast(T *in)
Definition: Cast.h:27
Matrix< T, M, N > operator-(const Matrix< T, M, N > &m1, const Matrix< T, M, N > &m2)
Definition: VecMat.h:922
Vec< T, N > operator*(const typename Vec< T, N >::value_type r, const Vec< T, N > &v)
Definition: VecMat.h:858
static unsigned c
Definition: RandGen.cpp:97
static unsigned a[3]
Definition: RandGen.cpp:92
GPUState operator^(const GPUState &a, const GPUState &b)
constexpr bool operator!=(StringRef a, StringRef b)
constexpr bool operator>=(StringRef a, StringRef b)
constexpr bool operator<(StringRef a, StringRef b)
constexpr bool operator<=(StringRef a, StringRef b)
constexpr bool operator>(StringRef a, StringRef b)
std::string operator+(StringRef a, StringRef b)
#define min(a, b)
Definition: sort.c:51
unsigned int uint32_t
Definition: stdint.h:83
signed int int32_t
Definition: stdint.h:80
float max
__forceinline uint32_t movemask(const avxb &a)
Definition: util_avxb.h:227
__forceinline const avxb operator&(const avxb &a, const avxb &b)
Binary Operators.
Definition: util_avxb.h:113
__forceinline const avxb operator^=(avxb &a, const avxb &b)
Definition: util_avxb.h:138
__forceinline const avxb operator&=(avxb &a, const avxb &b)
Assignment Operators.
Definition: util_avxb.h:130
__forceinline const avxb unpacklo(const avxb &a, const avxb &b)
Movement/Shifting/Shuffling Functions.
Definition: util_avxb.h:180
__forceinline const avxb operator|(const avxb &a, const avxb &b)
Definition: util_avxb.h:117
__forceinline const avxb select(const avxb &m, const avxb &t, const avxb &f)
Definition: util_avxb.h:167
__forceinline const avxb unpackhi(const avxb &a, const avxb &b)
Definition: util_avxb.h:184
__forceinline float extract< 0 >(const avxf &a)
Definition: util_avxf.h:272
__forceinline avxi & operator-=(avxi &a, const avxi &b)
Definition: util_avxi.h:407
__forceinline int reduce_max(const avxi &v)
Definition: util_avxi.h:705
__forceinline avxi & operator<<=(avxi &a, const int32_t b)
Definition: util_avxi.h:452
__forceinline uint32_t select_max(const avxi &v)
Definition: util_avxi.h:718
__forceinline const avxi vreduce_add(const avxi &v)
Definition: util_avxi.h:695
__forceinline const avxi abs(const avxi &a)
Definition: util_avxi.h:186
__forceinline int reduce_min(const avxi &v)
Definition: util_avxi.h:701
__forceinline const avxi operator>>(const avxi &a, const int32_t n)
Definition: util_avxi.h:339
__forceinline const avxi vreduce_min(const avxi &v)
Definition: util_avxi.h:665
__forceinline avxi & operator>>=(avxi &a, const int32_t b)
Definition: util_avxi.h:456
__forceinline avxi & operator+=(avxi &a, const avxi &b)
Assignment Operators.
Definition: util_avxi.h:398
__forceinline const avxi vreduce_max(const avxi &v)
Definition: util_avxi.h:680
__forceinline const avxi srl(const avxi &a, const int32_t b)
Definition: util_avxi.h:348
__forceinline int reduce_add(const avxi &v)
Definition: util_avxi.h:709
__forceinline uint32_t select_min(const avxi &v)
Definition: util_avxi.h:714
__forceinline const avxi sra(const avxi &a, const int32_t b)
Definition: util_avxi.h:344
__forceinline avxi & operator*=(avxi &a, const avxi &b)
Definition: util_avxi.h:416
#define __forceinline
Definition: util_defines.h:71
ccl_device_inline float4 mask(const int4 &mask, const float4 &a)
CCL_NAMESPACE_BEGIN __forceinline uint32_t __bsf(const uint32_t x)
Definition: util_simd.h:367
PointerRNA * ptr
Definition: wm_files.c:3157