Blender  V2.93
util_avxi.h
Go to the documentation of this file.
1 /*
2  * Copyright 2009-2013 Intel Corporation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef __UTIL_AVXI_H__
18 #define __UTIL_AVXI_H__
19 
21 
22 struct avxb;
23 
24 struct avxi {
25  typedef avxb Mask; // mask type for us
26  enum { size = 8 }; // number of SIMD elements
27  union { // data
28  __m256i m256;
29 #if !defined(__KERNEL_AVX2__)
30  struct {
31  __m128i l, h;
32  };
33 #endif
34  int32_t v[8];
35  };
36 
40 
42  {
43  }
45  {
46  m256 = a.m256;
47  }
49  {
50  m256 = a.m256;
51  return *this;
52  }
53 
54  __forceinline avxi(const __m256i a) : m256(a)
55  {
56  }
57  __forceinline operator const __m256i &(void)const
58  {
59  return m256;
60  }
61  __forceinline operator __m256i &(void)
62  {
63  return m256;
64  }
65 
66  __forceinline explicit avxi(const ssei &a)
67  : m256(_mm256_insertf128_si256(_mm256_castsi128_si256(a), a, 1))
68  {
69  }
70  __forceinline avxi(const ssei &a, const ssei &b)
71  : m256(_mm256_insertf128_si256(_mm256_castsi128_si256(a), b, 1))
72  {
73  }
74 #if defined(__KERNEL_AVX2__)
75  __forceinline avxi(const __m128i &a, const __m128i &b)
76  : m256(_mm256_insertf128_si256(_mm256_castsi128_si256(a), b, 1))
77  {
78  }
79 #else
80  __forceinline avxi(const __m128i &a, const __m128i &b) : l(a), h(b)
81  {
82  }
83 #endif
84  __forceinline explicit avxi(const int32_t *const a)
85  : m256(_mm256_castps_si256(_mm256_loadu_ps((const float *)a)))
86  {
87  }
88  __forceinline avxi(int32_t a) : m256(_mm256_set1_epi32(a))
89  {
90  }
91  __forceinline avxi(int32_t a, int32_t b) : m256(_mm256_set_epi32(b, a, b, a, b, a, b, a))
92  {
93  }
95  : m256(_mm256_set_epi32(d, c, b, a, d, c, b, a))
96  {
97  }
100  : m256(_mm256_set_epi32(h, g, f, e, d, c, b, a))
101  {
102  }
103 
104  __forceinline explicit avxi(const __m256 a) : m256(_mm256_cvtps_epi32(a))
105  {
106  }
107 
111 
112  __forceinline avxi(ZeroTy) : m256(_mm256_setzero_si256())
113  {
114  }
115 #if defined(__KERNEL_AVX2__)
116  __forceinline avxi(OneTy) : m256(_mm256_set1_epi32(1))
117  {
118  }
119  __forceinline avxi(PosInfTy) : m256(_mm256_set1_epi32(pos_inf))
120  {
121  }
122  __forceinline avxi(NegInfTy) : m256(_mm256_set1_epi32(neg_inf))
123  {
124  }
125 #else
126  __forceinline avxi(OneTy) : m256(_mm256_set_epi32(1, 1, 1, 1, 1, 1, 1, 1))
127  {
128  }
129  __forceinline avxi(PosInfTy)
130  : m256(_mm256_set_epi32(
131  pos_inf, pos_inf, pos_inf, pos_inf, pos_inf, pos_inf, pos_inf, pos_inf))
132  {
133  }
134  __forceinline avxi(NegInfTy)
135  : m256(_mm256_set_epi32(
136  neg_inf, neg_inf, neg_inf, neg_inf, neg_inf, neg_inf, neg_inf, neg_inf))
137  {
138  }
139 #endif
140  __forceinline avxi(StepTy) : m256(_mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0))
141  {
142  }
143 
147 
148  __forceinline const int32_t &operator[](const size_t i) const
149  {
150  assert(i < 8);
151  return v[i];
152  }
154  {
155  assert(i < 8);
156  return v[i];
157  }
158 };
159 
163 
164 __forceinline const avxi cast(const __m256 &a)
165 {
166  return _mm256_castps_si256(a);
167 }
169 {
170  return a;
171 }
172 #if defined(__KERNEL_AVX2__)
173 __forceinline const avxi operator-(const avxi &a)
174 {
175  return _mm256_sub_epi32(_mm256_setzero_si256(), a.m256);
176 }
177 __forceinline const avxi abs(const avxi &a)
178 {
179  return _mm256_abs_epi32(a.m256);
180 }
181 #else
183 {
184  return avxi(_mm_sub_epi32(_mm_setzero_si128(), a.l), _mm_sub_epi32(_mm_setzero_si128(), a.h));
185 }
186 __forceinline const avxi abs(const avxi &a)
187 {
188  return avxi(_mm_abs_epi32(a.l), _mm_abs_epi32(a.h));
189 }
190 #endif
191 
195 
196 #if defined(__KERNEL_AVX2__)
197 __forceinline const avxi operator+(const avxi &a, const avxi &b)
198 {
199  return _mm256_add_epi32(a.m256, b.m256);
200 }
201 #else
202 __forceinline const avxi operator+(const avxi &a, const avxi &b)
203 {
204  return avxi(_mm_add_epi32(a.l, b.l), _mm_add_epi32(a.h, b.h));
205 }
206 #endif
207 __forceinline const avxi operator+(const avxi &a, const int32_t b)
208 {
209  return a + avxi(b);
210 }
211 __forceinline const avxi operator+(const int32_t a, const avxi &b)
212 {
213  return avxi(a) + b;
214 }
215 
216 #if defined(__KERNEL_AVX2__)
217 __forceinline const avxi operator-(const avxi &a, const avxi &b)
218 {
219  return _mm256_sub_epi32(a.m256, b.m256);
220 }
221 #else
222 __forceinline const avxi operator-(const avxi &a, const avxi &b)
223 {
224  return avxi(_mm_sub_epi32(a.l, b.l), _mm_sub_epi32(a.h, b.h));
225 }
226 #endif
227 __forceinline const avxi operator-(const avxi &a, const int32_t b)
228 {
229  return a - avxi(b);
230 }
231 __forceinline const avxi operator-(const int32_t a, const avxi &b)
232 {
233  return avxi(a) - b;
234 }
235 
236 #if defined(__KERNEL_AVX2__)
237 __forceinline const avxi operator*(const avxi &a, const avxi &b)
238 {
239  return _mm256_mullo_epi32(a.m256, b.m256);
240 }
241 #else
242 __forceinline const avxi operator*(const avxi &a, const avxi &b)
243 {
244  return avxi(_mm_mullo_epi32(a.l, b.l), _mm_mullo_epi32(a.h, b.h));
245 }
246 #endif
247 __forceinline const avxi operator*(const avxi &a, const int32_t b)
248 {
249  return a * avxi(b);
250 }
251 __forceinline const avxi operator*(const int32_t a, const avxi &b)
252 {
253  return avxi(a) * b;
254 }
255 
256 #if defined(__KERNEL_AVX2__)
257 __forceinline const avxi operator&(const avxi &a, const avxi &b)
258 {
259  return _mm256_and_si256(a.m256, b.m256);
260 }
261 #else
262 __forceinline const avxi operator&(const avxi &a, const avxi &b)
263 {
264  return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
265 }
266 #endif
267 __forceinline const avxi operator&(const avxi &a, const int32_t b)
268 {
269  return a & avxi(b);
270 }
271 __forceinline const avxi operator&(const int32_t a, const avxi &b)
272 {
273  return avxi(a) & b;
274 }
275 
276 #if defined(__KERNEL_AVX2__)
277 __forceinline const avxi operator|(const avxi &a, const avxi &b)
278 {
279  return _mm256_or_si256(a.m256, b.m256);
280 }
281 #else
282 __forceinline const avxi operator|(const avxi &a, const avxi &b)
283 {
284  return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
285 }
286 #endif
287 __forceinline const avxi operator|(const avxi &a, const int32_t b)
288 {
289  return a | avxi(b);
290 }
291 __forceinline const avxi operator|(const int32_t a, const avxi &b)
292 {
293  return avxi(a) | b;
294 }
295 
296 #if defined(__KERNEL_AVX2__)
297 __forceinline const avxi operator^(const avxi &a, const avxi &b)
298 {
299  return _mm256_xor_si256(a.m256, b.m256);
300 }
301 #else
302 __forceinline const avxi operator^(const avxi &a, const avxi &b)
303 {
304  return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
305 }
306 #endif
307 __forceinline const avxi operator^(const avxi &a, const int32_t b)
308 {
309  return a ^ avxi(b);
310 }
311 __forceinline const avxi operator^(const int32_t a, const avxi &b)
312 {
313  return avxi(a) ^ b;
314 }
315 
316 #if defined(__KERNEL_AVX2__)
317 __forceinline const avxi operator<<(const avxi &a, const int32_t n)
318 {
319  return _mm256_slli_epi32(a.m256, n);
320 }
321 __forceinline const avxi operator>>(const avxi &a, const int32_t n)
322 {
323  return _mm256_srai_epi32(a.m256, n);
324 }
325 
326 __forceinline const avxi sra(const avxi &a, const int32_t b)
327 {
328  return _mm256_srai_epi32(a.m256, b);
329 }
330 __forceinline const avxi srl(const avxi &a, const int32_t b)
331 {
332  return _mm256_srli_epi32(a.m256, b);
333 }
334 #else
335 __forceinline const avxi operator<<(const avxi &a, const int32_t n)
336 {
337  return avxi(_mm_slli_epi32(a.l, n), _mm_slli_epi32(a.h, n));
338 }
339 __forceinline const avxi operator>>(const avxi &a, const int32_t n)
340 {
341  return avxi(_mm_srai_epi32(a.l, n), _mm_srai_epi32(a.h, n));
342 }
343 
344 __forceinline const avxi sra(const avxi &a, const int32_t b)
345 {
346  return avxi(_mm_srai_epi32(a.l, b), _mm_srai_epi32(a.h, b));
347 }
348 __forceinline const avxi srl(const avxi &a, const int32_t b)
349 {
350  return avxi(_mm_srli_epi32(a.l, b), _mm_srli_epi32(a.h, b));
351 }
352 #endif
353 
354 #if defined(__KERNEL_AVX2__)
355 __forceinline const avxi min(const avxi &a, const avxi &b)
356 {
357  return _mm256_min_epi32(a.m256, b.m256);
358 }
359 #else
360 __forceinline const avxi min(const avxi &a, const avxi &b)
361 {
362  return avxi(_mm_min_epi32(a.l, b.l), _mm_min_epi32(a.h, b.h));
363 }
364 #endif
365 __forceinline const avxi min(const avxi &a, const int32_t b)
366 {
367  return min(a, avxi(b));
368 }
369 __forceinline const avxi min(const int32_t a, const avxi &b)
370 {
371  return min(avxi(a), b);
372 }
373 
374 #if defined(__KERNEL_AVX2__)
375 __forceinline const avxi max(const avxi &a, const avxi &b)
376 {
377  return _mm256_max_epi32(a.m256, b.m256);
378 }
379 #else
380 __forceinline const avxi max(const avxi &a, const avxi &b)
381 {
382  return avxi(_mm_max_epi32(a.l, b.l), _mm_max_epi32(a.h, b.h));
383 }
384 #endif
385 __forceinline const avxi max(const avxi &a, const int32_t b)
386 {
387  return max(a, avxi(b));
388 }
389 __forceinline const avxi max(const int32_t a, const avxi &b)
390 {
391  return max(avxi(a), b);
392 }
393 
397 
399 {
400  return a = a + b;
401 }
403 {
404  return a = a + b;
405 }
406 
408 {
409  return a = a - b;
410 }
412 {
413  return a = a - b;
414 }
415 
417 {
418  return a = a * b;
419 }
421 {
422  return a = a * b;
423 }
424 
426 {
427  return a = a & b;
428 }
430 {
431  return a = a & b;
432 }
433 
435 {
436  return a = a | b;
437 }
439 {
440  return a = a | b;
441 }
442 
444 {
445  return a = a ^ b;
446 }
448 {
449  return a = a ^ b;
450 }
451 
453 {
454  return a = a << b;
455 }
457 {
458  return a = a >> b;
459 }
460 
464 
465 #if defined(__KERNEL_AVX2__)
466 __forceinline const avxb operator==(const avxi &a, const avxi &b)
467 {
468  return _mm256_castsi256_ps(_mm256_cmpeq_epi32(a.m256, b.m256));
469 }
470 #else
471 __forceinline const avxb operator==(const avxi &a, const avxi &b)
472 {
473  return avxb(_mm_castsi128_ps(_mm_cmpeq_epi32(a.l, b.l)),
474  _mm_castsi128_ps(_mm_cmpeq_epi32(a.h, b.h)));
475 }
476 #endif
477 __forceinline const avxb operator==(const avxi &a, const int32_t b)
478 {
479  return a == avxi(b);
480 }
481 __forceinline const avxb operator==(const int32_t a, const avxi &b)
482 {
483  return avxi(a) == b;
484 }
485 
486 __forceinline const avxb operator!=(const avxi &a, const avxi &b)
487 {
488  return !(a == b);
489 }
490 __forceinline const avxb operator!=(const avxi &a, const int32_t b)
491 {
492  return a != avxi(b);
493 }
494 __forceinline const avxb operator!=(const int32_t a, const avxi &b)
495 {
496  return avxi(a) != b;
497 }
498 
499 #if defined(__KERNEL_AVX2__)
500 __forceinline const avxb operator<(const avxi &a, const avxi &b)
501 {
502  return _mm256_castsi256_ps(_mm256_cmpgt_epi32(b.m256, a.m256));
503 }
504 #else
505 __forceinline const avxb operator<(const avxi &a, const avxi &b)
506 {
507  return avxb(_mm_castsi128_ps(_mm_cmplt_epi32(a.l, b.l)),
508  _mm_castsi128_ps(_mm_cmplt_epi32(a.h, b.h)));
509 }
510 #endif
511 __forceinline const avxb operator<(const avxi &a, const int32_t b)
512 {
513  return a < avxi(b);
514 }
515 __forceinline const avxb operator<(const int32_t a, const avxi &b)
516 {
517  return avxi(a) < b;
518 }
519 
520 __forceinline const avxb operator>=(const avxi &a, const avxi &b)
521 {
522  return !(a < b);
523 }
524 __forceinline const avxb operator>=(const avxi &a, const int32_t b)
525 {
526  return a >= avxi(b);
527 }
528 __forceinline const avxb operator>=(const int32_t a, const avxi &b)
529 {
530  return avxi(a) >= b;
531 }
532 
533 #if defined(__KERNEL_AVX2__)
534 __forceinline const avxb operator>(const avxi &a, const avxi &b)
535 {
536  return _mm256_castsi256_ps(_mm256_cmpgt_epi32(a.m256, b.m256));
537 }
538 #else
539 __forceinline const avxb operator>(const avxi &a, const avxi &b)
540 {
541  return avxb(_mm_castsi128_ps(_mm_cmpgt_epi32(a.l, b.l)),
542  _mm_castsi128_ps(_mm_cmpgt_epi32(a.h, b.h)));
543 }
544 #endif
545 __forceinline const avxb operator>(const avxi &a, const int32_t b)
546 {
547  return a > avxi(b);
548 }
549 __forceinline const avxb operator>(const int32_t a, const avxi &b)
550 {
551  return avxi(a) > b;
552 }
553 
554 __forceinline const avxb operator<=(const avxi &a, const avxi &b)
555 {
556  return !(a > b);
557 }
558 __forceinline const avxb operator<=(const avxi &a, const int32_t b)
559 {
560  return a <= avxi(b);
561 }
562 __forceinline const avxb operator<=(const int32_t a, const avxi &b)
563 {
564  return avxi(a) <= b;
565 }
566 
567 __forceinline const avxi select(const avxb &m, const avxi &t, const avxi &f)
568 {
569  return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(f), _mm256_castsi256_ps(t), m));
570 }
571 
575 
576 #if defined(__KERNEL_AVX2__)
577 __forceinline avxi unpacklo(const avxi &a, const avxi &b)
578 {
579  return _mm256_unpacklo_epi32(a.m256, b.m256);
580 }
581 __forceinline avxi unpackhi(const avxi &a, const avxi &b)
582 {
583  return _mm256_unpackhi_epi32(a.m256, b.m256);
584 }
585 #else
587 {
588  return _mm256_castps_si256(_mm256_unpacklo_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
589 }
591 {
592  return _mm256_castps_si256(_mm256_unpackhi_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b)));
593 }
594 #endif
595 
596 template<size_t i> __forceinline const avxi shuffle(const avxi &a)
597 {
598  return _mm256_castps_si256(_mm256_permute_ps(_mm256_castsi256_ps(a), _MM_SHUFFLE(i, i, i, i)));
599 }
600 
601 template<size_t i0, size_t i1> __forceinline const avxi shuffle(const avxi &a)
602 {
603  return _mm256_permute2f128_si256(a, a, (i1 << 4) | (i0 << 0));
604 }
605 
606 template<size_t i0, size_t i1> __forceinline const avxi shuffle(const avxi &a, const avxi &b)
607 {
608  return _mm256_permute2f128_si256(a, b, (i1 << 4) | (i0 << 0));
609 }
610 
611 template<size_t i0, size_t i1, size_t i2, size_t i3>
613 {
614  return _mm256_castps_si256(
615  _mm256_permute_ps(_mm256_castsi256_ps(a), _MM_SHUFFLE(i3, i2, i1, i0)));
616 }
617 
618 template<size_t i0, size_t i1, size_t i2, size_t i3>
619 __forceinline const avxi shuffle(const avxi &a, const avxi &b)
620 {
621  return _mm256_castps_si256(_mm256_shuffle_ps(
622  _mm256_castsi256_ps(a), _mm256_castsi256_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
623 }
624 
625 template<> __forceinline const avxi shuffle<0, 0, 2, 2>(const avxi &b)
626 {
627  return _mm256_castps_si256(_mm256_moveldup_ps(_mm256_castsi256_ps(b)));
628 }
629 template<> __forceinline const avxi shuffle<1, 1, 3, 3>(const avxi &b)
630 {
631  return _mm256_castps_si256(_mm256_movehdup_ps(_mm256_castsi256_ps(b)));
632 }
633 template<> __forceinline const avxi shuffle<0, 1, 0, 1>(const avxi &b)
634 {
635  return _mm256_castps_si256(
636  _mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(_mm256_castsi256_ps(b)))));
637 }
638 
639 __forceinline const avxi broadcast(const int *ptr)
640 {
641  return _mm256_castps_si256(_mm256_broadcast_ss((const float *)ptr));
642 }
643 template<size_t i> __forceinline const avxi insert(const avxi &a, const ssei &b)
644 {
645  return _mm256_insertf128_si256(a, b, i);
646 }
647 template<size_t i> __forceinline const ssei extract(const avxi &a)
648 {
649  return _mm256_extractf128_si256(a, i);
650 }
651 
655 
657 {
658  return min(v, shuffle<1, 0, 3, 2>(v));
659 }
661 {
662  avxi v1 = vreduce_min2(v);
663  return min(v1, shuffle<2, 3, 0, 1>(v1));
664 }
666 {
667  avxi v1 = vreduce_min4(v);
668  return min(v1, shuffle<1, 0>(v1));
669 }
670 
672 {
673  return max(v, shuffle<1, 0, 3, 2>(v));
674 }
676 {
677  avxi v1 = vreduce_max2(v);
678  return max(v1, shuffle<2, 3, 0, 1>(v1));
679 }
681 {
682  avxi v1 = vreduce_max4(v);
683  return max(v1, shuffle<1, 0>(v1));
684 }
685 
687 {
688  return v + shuffle<1, 0, 3, 2>(v);
689 }
691 {
692  avxi v1 = vreduce_add2(v);
693  return v1 + shuffle<2, 3, 0, 1>(v1);
694 }
696 {
697  avxi v1 = vreduce_add4(v);
698  return v1 + shuffle<1, 0>(v1);
699 }
700 
702 {
704 }
706 {
708 }
710 {
712 }
713 
715 {
716  return __bsf(movemask(v == vreduce_min(v)));
717 }
719 {
720  return __bsf(movemask(v == vreduce_max(v)));
721 }
722 
724 {
725  const avxi a = select(valid, v, avxi(pos_inf));
726  return __bsf(movemask(valid & (a == vreduce_min(a))));
727 }
729 {
730  const avxi a = select(valid, v, avxi(neg_inf));
731  return __bsf(movemask(valid & (a == vreduce_max(a))));
732 }
733 
737 
738 ccl_device_inline void print_avxi(const char *label, const avxi &a)
739 {
740  printf("%s: %d %d %d %d %d %d %d %d\n", label, a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]);
741 }
742 
744 
745 #endif
typedef float(TangentPoint)[2]
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint i1
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble u2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLdouble GLdouble v2 _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLdouble GLdouble nz _GL_VOID_RET _GL_VOID GLfloat GLfloat nz _GL_VOID_RET _GL_VOID GLint GLint nz _GL_VOID_RET _GL_VOID GLshort GLshort nz _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const GLfloat *values _GL_VOID_RET _GL_VOID GLsizei const GLushort *values _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID const GLuint const GLclampf *priorities _GL_VOID_RET _GL_VOID GLdouble y _GL_VOID_RET _GL_VOID GLfloat y _GL_VOID_RET _GL_VOID GLint y _GL_VOID_RET _GL_VOID GLshort y _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLfloat GLfloat z _GL_VOID_RET _GL_VOID GLint GLint z _GL_VOID_RET _GL_VOID GLshort GLshort z _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble w _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat w _GL_VOID_RET _GL_VOID GLint GLint GLint w _GL_VOID_RET _GL_VOID GLshort GLshort GLshort w _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble y2 _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat y2 _GL_VOID_RET _GL_VOID GLint GLint GLint y2 _GL_VOID_RET _GL_VOID GLshort GLshort GLshort y2 _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLdouble GLdouble z _GL_VOID_RET _GL_VOID GLuint *buffer _GL_VOID_RET _GL_VOID GLdouble t _GL_VOID_RET _GL_VOID GLfloat t _GL_VOID_RET _GL_VOID GLint t _GL_VOID_RET _GL_VOID GLshort t _GL_VOID_RET _GL_VOID GLdouble t
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei GLsizei GLenum type _GL_VOID_RET _GL_VOID GLsizei GLenum GLenum const void *pixels _GL_VOID_RET _GL_VOID const void *pointer _GL_VOID_RET _GL_VOID GLdouble v _GL_VOID_RET _GL_VOID GLfloat v _GL_VOID_RET _GL_VOID GLint GLint i2 _GL_VOID_RET _GL_VOID GLint j _GL_VOID_RET _GL_VOID GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble GLdouble GLdouble zFar _GL_VOID_RET _GL_UINT GLdouble *equation _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLenum GLfloat *v _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLfloat *values _GL_VOID_RET _GL_VOID GLushort *values _GL_VOID_RET _GL_VOID GLenum GLfloat *params _GL_VOID_RET _GL_VOID GLenum GLdouble *params _GL_VOID_RET _GL_VOID GLenum GLint *params _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_VOID GLsizei const void *pointer _GL_VOID_RET _GL_BOOL GLfloat param _GL_VOID_RET _GL_VOID GLint param _GL_VOID_RET _GL_VOID GLenum GLfloat param _GL_VOID_RET _GL_VOID GLenum GLint param _GL_VOID_RET _GL_VOID GLushort pattern _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint const GLdouble *points _GL_VOID_RET _GL_VOID GLdouble GLdouble GLint GLint GLdouble v1
ATTR_WARN_UNUSED_RESULT const BMVert const BMEdge * e
ATTR_WARN_UNUSED_RESULT const BMVert * v
const char * label
#define ccl_device_inline
#define CCL_NAMESPACE_END
static unsigned c
Definition: RandGen.cpp:97
static unsigned a[3]
Definition: RandGen.cpp:92
unsigned int uint32_t
Definition: stdint.h:83
signed int int32_t
Definition: stdint.h:80
Definition: util_avxb.h:26
Definition: util_avxi.h:24
avxb Mask
Definition: util_avxi.h:25
__m256i m256
Definition: util_avxi.h:28
__forceinline avxi(const __m128i &a, const __m128i &b)
Definition: util_avxi.h:80
__forceinline avxi(ZeroTy)
Constants.
Definition: util_avxi.h:112
__forceinline avxi(NegInfTy)
Definition: util_avxi.h:134
__forceinline avxi()
Constructors, Assignment & Cast Operators.
Definition: util_avxi.h:41
__m128i l
Definition: util_avxi.h:31
__forceinline avxi(const __m256 a)
Definition: util_avxi.h:104
__forceinline avxi(int32_t a, int32_t b, int32_t c, int32_t d)
Definition: util_avxi.h:94
__forceinline avxi(const __m256i a)
Definition: util_avxi.h:54
__forceinline avxi(StepTy)
Definition: util_avxi.h:140
__forceinline avxi(const ssei &a)
Definition: util_avxi.h:66
@ size
Definition: util_avxi.h:26
__forceinline avxi(OneTy)
Definition: util_avxi.h:126
__forceinline avxi(const ssei &a, const ssei &b)
Definition: util_avxi.h:70
__forceinline avxi(PosInfTy)
Definition: util_avxi.h:129
__forceinline const int32_t & operator[](const size_t i) const
Array Access.
Definition: util_avxi.h:148
__forceinline avxi(const int32_t *const a)
Definition: util_avxi.h:84
__forceinline avxi(int32_t a)
Definition: util_avxi.h:88
__m128i h
Definition: util_avxi.h:31
__forceinline avxi(const avxi &a)
Definition: util_avxi.h:44
__forceinline avxi(int32_t a, int32_t b)
Definition: util_avxi.h:91
__forceinline avxi(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f, int32_t g, int32_t h)
Definition: util_avxi.h:98
__forceinline int32_t & operator[](const size_t i)
Definition: util_avxi.h:153
int32_t v[8]
Definition: util_avxi.h:34
__forceinline avxi & operator=(const avxi &a)
Definition: util_avxi.h:48
__forceinline uint32_t movemask(const avxb &a)
Definition: util_avxb.h:227
__forceinline float extract< 0 >(const avxf &a)
Definition: util_avxf.h:272
__forceinline const avxi vreduce_min2(const avxi &v)
Reductions.
Definition: util_avxi.h:656
__forceinline const avxi max(const avxi &a, const avxi &b)
Definition: util_avxi.h:380
__forceinline const avxb operator>(const avxi &a, const avxi &b)
Definition: util_avxi.h:539
ccl_device_inline void print_avxi(const char *label, const avxi &a)
Output Operators.
Definition: util_avxi.h:738
__forceinline const avxi operator&(const avxi &a, const avxi &b)
Definition: util_avxi.h:262
__forceinline const avxb operator!=(const avxi &a, const avxi &b)
Definition: util_avxi.h:486
__forceinline const avxi vreduce_min4(const avxi &v)
Definition: util_avxi.h:660
__forceinline const avxi operator<<(const avxi &a, const int32_t n)
Definition: util_avxi.h:335
__forceinline const ssei extract(const avxi &a)
Definition: util_avxi.h:647
__forceinline avxi unpackhi(const avxi &a, const avxi &b)
Definition: util_avxi.h:590
__forceinline const avxi shuffle< 0, 0, 2, 2 >(const avxi &b)
Definition: util_avxi.h:625
__forceinline avxi & operator-=(avxi &a, const avxi &b)
Definition: util_avxi.h:407
__forceinline const avxb operator>=(const avxi &a, const avxi &b)
Definition: util_avxi.h:520
__forceinline const avxi shuffle< 0, 1, 0, 1 >(const avxi &b)
Definition: util_avxi.h:633
__forceinline int reduce_max(const avxi &v)
Definition: util_avxi.h:705
__forceinline const avxi vreduce_add2(const avxi &v)
Definition: util_avxi.h:686
__forceinline const avxi vreduce_add4(const avxi &v)
Definition: util_avxi.h:690
__forceinline avxi & operator<<=(avxi &a, const int32_t b)
Definition: util_avxi.h:452
__forceinline uint32_t select_max(const avxi &v)
Definition: util_avxi.h:718
__forceinline const avxi min(const avxi &a, const avxi &b)
Definition: util_avxi.h:360
__forceinline const avxi vreduce_add(const avxi &v)
Definition: util_avxi.h:695
__forceinline const avxi shuffle(const avxi &a)
Definition: util_avxi.h:596
__forceinline avxi & operator|=(avxi &a, const avxi &b)
Definition: util_avxi.h:434
__forceinline const avxi abs(const avxi &a)
Definition: util_avxi.h:186
__forceinline const avxb operator<=(const avxi &a, const avxi &b)
Definition: util_avxi.h:554
__forceinline int reduce_min(const avxi &v)
Definition: util_avxi.h:701
__forceinline const avxi vreduce_max4(const avxi &v)
Definition: util_avxi.h:675
__forceinline const avxi shuffle< 1, 1, 3, 3 >(const avxi &b)
Definition: util_avxi.h:629
__forceinline const avxi operator>>(const avxi &a, const int32_t n)
Definition: util_avxi.h:339
__forceinline const avxi vreduce_min(const avxi &v)
Definition: util_avxi.h:665
__forceinline const avxi broadcast(const int *ptr)
Definition: util_avxi.h:639
__forceinline avxi & operator>>=(avxi &a, const int32_t b)
Definition: util_avxi.h:456
__forceinline const avxb operator==(const avxi &a, const avxi &b)
Comparison Operators + Select.
Definition: util_avxi.h:471
__forceinline const avxi operator|(const avxi &a, const avxi &b)
Definition: util_avxi.h:282
__forceinline const avxb operator<(const avxi &a, const avxi &b)
Definition: util_avxi.h:505
__forceinline avxi & operator+=(avxi &a, const avxi &b)
Assignment Operators.
Definition: util_avxi.h:398
__forceinline const avxi operator^(const avxi &a, const avxi &b)
Definition: util_avxi.h:302
__forceinline const avxi vreduce_max(const avxi &v)
Definition: util_avxi.h:680
__forceinline const avxi srl(const avxi &a, const int32_t b)
Definition: util_avxi.h:348
__forceinline const avxi vreduce_max2(const avxi &v)
Definition: util_avxi.h:671
__forceinline const avxi insert(const avxi &a, const ssei &b)
Definition: util_avxi.h:643
__forceinline avxi unpacklo(const avxi &a, const avxi &b)
Movement/Shifting/Shuffling Functions.
Definition: util_avxi.h:586
__forceinline const avxi select(const avxb &m, const avxi &t, const avxi &f)
Definition: util_avxi.h:567
__forceinline avxi & operator^=(avxi &a, const avxi &b)
Definition: util_avxi.h:443
__forceinline const avxi operator+(const avxi &a)
Definition: util_avxi.h:168
__forceinline int reduce_add(const avxi &v)
Definition: util_avxi.h:709
__forceinline uint32_t select_min(const avxi &v)
Definition: util_avxi.h:714
__forceinline const avxi sra(const avxi &a, const int32_t b)
Definition: util_avxi.h:344
__forceinline const avxi operator-(const avxi &a)
Definition: util_avxi.h:182
__forceinline avxi & operator*=(avxi &a, const avxi &b)
Definition: util_avxi.h:416
__forceinline avxi & operator&=(avxi &a, const avxi &b)
Definition: util_avxi.h:425
__forceinline const avxi cast(const __m256 &a)
Unary Operators.
Definition: util_avxi.h:164
__forceinline const avxi operator*(const avxi &a, const avxi &b)
Definition: util_avxi.h:242
#define __forceinline
Definition: util_defines.h:71
CCL_NAMESPACE_BEGIN __forceinline uint32_t __bsf(const uint32_t x)
Definition: util_simd.h:367
PointerRNA * ptr
Definition: wm_files.c:3157