vuint4_sse2.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. // Copyright 2009-2021 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #pragma once
  4. #include "../math/math.h"
  5. #define vboolf vboolf_impl
  6. #define vboold vboold_impl
  7. #define vint vint_impl
  8. #define vuint vuint_impl
  9. #define vllong vllong_impl
  10. #define vfloat vfloat_impl
  11. #define vdouble vdouble_impl
  12. namespace embree
  13. {
  14. /* 4-wide SSE integer type */
  15. template<>
  16. struct vuint<4>
  17. {
  18. ALIGNED_STRUCT_(16);
  19. typedef vboolf4 Bool;
  20. typedef vuint4 Int;
  21. typedef vfloat4 Float;
  22. enum { size = 4 }; // number of SIMD elements
  23. union { __m128i v; unsigned int i[4]; }; // data
  24. ////////////////////////////////////////////////////////////////////////////////
  25. /// Constructors, Assignment & Cast Operators
  26. ////////////////////////////////////////////////////////////////////////////////
  27. __forceinline vuint() {}
  28. __forceinline vuint(const vuint4& a) { v = a.v; }
  29. __forceinline vuint4& operator =(const vuint4& a) { v = a.v; return *this; }
  30. __forceinline vuint(const __m128i a) : v(a) {}
  31. __forceinline operator const __m128i&() const { return v; }
  32. __forceinline operator __m128i&() { return v; }
  33. __forceinline vuint(unsigned int a) : v(_mm_set1_epi32(a)) {}
  34. __forceinline vuint(unsigned int a, unsigned int b, unsigned int c, unsigned int d) : v(_mm_set_epi32(d, c, b, a)) {}
  35. #if defined(__AVX512VL__)
  36. __forceinline explicit vuint(__m128 a) : v(_mm_cvtps_epu32(a)) {}
  37. #endif
  38. #if defined(__AVX512VL__)
  39. __forceinline explicit vuint(const vboolf4& a) : v(_mm_movm_epi32(a)) {}
  40. #else
  41. __forceinline explicit vuint(const vboolf4& a) : v(_mm_castps_si128((__m128)a)) {}
  42. #endif
  43. ////////////////////////////////////////////////////////////////////////////////
  44. /// Constants
  45. ////////////////////////////////////////////////////////////////////////////////
  46. __forceinline vuint(ZeroTy) : v(_mm_setzero_si128()) {}
  47. __forceinline vuint(OneTy) : v(_mm_set1_epi32(1)) {}
  48. __forceinline vuint(PosInfTy) : v(_mm_set1_epi32(unsigned(pos_inf))) {}
  49. __forceinline vuint(StepTy) : v(_mm_set_epi32(3, 2, 1, 0)) {}
  50. __forceinline vuint(TrueTy) { v = _mm_cmpeq_epi32(v,v); }
  51. __forceinline vuint(UndefinedTy) : v(_mm_castps_si128(_mm_undefined_ps())) {}
  52. ////////////////////////////////////////////////////////////////////////////////
  53. /// Loads and Stores
  54. ////////////////////////////////////////////////////////////////////////////////
  55. static __forceinline vuint4 load (const void* a) { return _mm_load_si128((__m128i*)a); }
  56. static __forceinline vuint4 loadu(const void* a) { return _mm_loadu_si128((__m128i*)a); }
  57. static __forceinline void store (void* ptr, const vuint4& v) { _mm_store_si128((__m128i*)ptr,v); }
  58. static __forceinline void storeu(void* ptr, const vuint4& v) { _mm_storeu_si128((__m128i*)ptr,v); }
  59. #if defined(__AVX512VL__)
  60. static __forceinline vuint4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_epi32 (_mm_setzero_si128(),mask,ptr); }
  61. static __forceinline vuint4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_epi32(_mm_setzero_si128(),mask,ptr); }
  62. static __forceinline void store (const vboolf4& mask, void* ptr, const vuint4& v) { _mm_mask_store_epi32 (ptr,mask,v); }
  63. static __forceinline void storeu(const vboolf4& mask, void* ptr, const vuint4& v) { _mm_mask_storeu_epi32(ptr,mask,v); }
  64. #elif defined(__AVX__)
  65. static __forceinline vuint4 load (const vbool4& mask, const void* a) { return _mm_castps_si128(_mm_maskload_ps((float*)a,mask)); }
  66. static __forceinline vuint4 loadu(const vbool4& mask, const void* a) { return _mm_castps_si128(_mm_maskload_ps((float*)a,mask)); }
  67. static __forceinline void store (const vboolf4& mask, void* ptr, const vuint4& i) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i)); }
  68. static __forceinline void storeu(const vboolf4& mask, void* ptr, const vuint4& i) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i)); }
  69. #else
  70. static __forceinline vuint4 load (const vbool4& mask, const void* a) { return _mm_and_si128(_mm_load_si128 ((__m128i*)a),mask); }
  71. static __forceinline vuint4 loadu(const vbool4& mask, const void* a) { return _mm_and_si128(_mm_loadu_si128((__m128i*)a),mask); }
  72. static __forceinline void store (const vboolf4& mask, void* ptr, const vuint4& i) { store (ptr,select(mask,i,load (ptr))); }
  73. static __forceinline void storeu(const vboolf4& mask, void* ptr, const vuint4& i) { storeu(ptr,select(mask,i,loadu(ptr))); }
  74. #endif
  75. #if defined(__aarch64__)
  76. static __forceinline vuint4 load(const unsigned char* ptr) {
  77. return _mm_load4epu8_epi32(((__m128i*)ptr));
  78. }
  79. static __forceinline vuint4 loadu(const unsigned char* ptr) {
  80. return _mm_load4epu8_epi32(((__m128i*)ptr));
  81. }
  82. #elif defined(__SSE4_1__)
  83. static __forceinline vuint4 load(const unsigned char* ptr) {
  84. return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
  85. }
  86. static __forceinline vuint4 loadu(const unsigned char* ptr) {
  87. return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
  88. }
  89. #endif
  90. static __forceinline vuint4 load(const unsigned short* ptr) {
  91. #if defined(__aarch64__)
  92. return _mm_load4epu16_epi32(((__m128i*)ptr));
  93. #elif defined (__SSE4_1__)
  94. return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr));
  95. #else
  96. return vuint4(ptr[0],ptr[1],ptr[2],ptr[3]);
  97. #endif
  98. }
  99. static __forceinline vuint4 load_nt(void* ptr) {
  100. #if (defined(__aarch64__)) || defined(__SSE4_1__)
  101. return _mm_stream_load_si128((__m128i*)ptr);
  102. #else
  103. return _mm_load_si128((__m128i*)ptr);
  104. #endif
  105. }
  106. static __forceinline void store_nt(void* ptr, const vuint4& v) {
  107. #if !defined(__aarch64__) && defined(__SSE4_1__)
  108. _mm_stream_ps((float*)ptr, _mm_castsi128_ps(v));
  109. #else
  110. _mm_store_si128((__m128i*)ptr,v);
  111. #endif
  112. }
  113. template<int scale = 4>
  114. static __forceinline vuint4 gather(const unsigned int* ptr, const vint4& index) {
  115. #if defined(__AVX2__) && !defined(__aarch64__)
  116. return _mm_i32gather_epi32((const int*)ptr, index, scale);
  117. #else
  118. return vuint4(
  119. *(unsigned int*)(((char*)ptr)+scale*index[0]),
  120. *(unsigned int*)(((char*)ptr)+scale*index[1]),
  121. *(unsigned int*)(((char*)ptr)+scale*index[2]),
  122. *(unsigned int*)(((char*)ptr)+scale*index[3]));
  123. #endif
  124. }
  125. template<int scale = 4>
  126. static __forceinline vuint4 gather(const vboolf4& mask, const unsigned int* ptr, const vint4& index) {
  127. vuint4 r = zero;
  128. #if defined(__AVX512VL__)
  129. return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale);
  130. #elif defined(__AVX2__) && !defined(__aarch64__)
  131. return _mm_mask_i32gather_epi32(r, (const int*)ptr, index, mask, scale);
  132. #else
  133. if (likely(mask[0])) r[0] = *(unsigned int*)(((char*)ptr)+scale*index[0]);
  134. if (likely(mask[1])) r[1] = *(unsigned int*)(((char*)ptr)+scale*index[1]);
  135. if (likely(mask[2])) r[2] = *(unsigned int*)(((char*)ptr)+scale*index[2]);
  136. if (likely(mask[3])) r[3] = *(unsigned int*)(((char*)ptr)+scale*index[3]);
  137. return r;
  138. #endif
  139. }
  140. ////////////////////////////////////////////////////////////////////////////////
  141. /// Array Access
  142. ////////////////////////////////////////////////////////////////////////////////
  143. __forceinline const unsigned int& operator [](size_t index) const { assert(index < 4); return i[index]; }
  144. __forceinline unsigned int& operator [](size_t index) { assert(index < 4); return i[index]; }
  145. friend __forceinline vuint4 select(const vboolf4& m, const vuint4& t, const vuint4& f) {
  146. #if defined(__AVX512VL__)
  147. return _mm_mask_blend_epi32(m, (__m128i)f, (__m128i)t);
  148. #elif defined(__SSE4_1__)
  149. return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
  150. #else
  151. return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
  152. #endif
  153. }
  154. };
  155. ////////////////////////////////////////////////////////////////////////////////
  156. /// Unary Operators
  157. ////////////////////////////////////////////////////////////////////////////////
  158. #if defined(__AVX512VL__)
  159. __forceinline vboolf4 asBool(const vuint4& a) { return _mm_movepi32_mask(a); }
  160. #else
  161. __forceinline vboolf4 asBool(const vuint4& a) { return _mm_castsi128_ps(a); }
  162. #endif
  163. __forceinline vuint4 operator +(const vuint4& a) { return a; }
  164. __forceinline vuint4 operator -(const vuint4& a) { return _mm_sub_epi32(_mm_setzero_si128(), a); }
  165. ////////////////////////////////////////////////////////////////////////////////
  166. /// Binary Operators
  167. ////////////////////////////////////////////////////////////////////////////////
  168. __forceinline vuint4 operator +(const vuint4& a, const vuint4& b) { return _mm_add_epi32(a, b); }
  169. __forceinline vuint4 operator +(const vuint4& a, unsigned int b) { return a + vuint4(b); }
  170. __forceinline vuint4 operator +(unsigned int a, const vuint4& b) { return vuint4(a) + b; }
  171. __forceinline vuint4 operator -(const vuint4& a, const vuint4& b) { return _mm_sub_epi32(a, b); }
  172. __forceinline vuint4 operator -(const vuint4& a, unsigned int b) { return a - vuint4(b); }
  173. __forceinline vuint4 operator -(unsigned int a, const vuint4& b) { return vuint4(a) - b; }
  174. //#if defined(__SSE4_1__)
  175. // __forceinline vuint4 operator *(const vuint4& a, const vuint4& b) { return _mm_mullo_epu32(a, b); }
  176. //#else
  177. // __forceinline vuint4 operator *(const vuint4& a, const vuint4& b) { return vuint4(a[0]*b[0],a[1]*b[1],a[2]*b[2],a[3]*b[3]); }
  178. //#endif
  179. // __forceinline vuint4 operator *(const vuint4& a, unsigned int b) { return a * vuint4(b); }
  180. // __forceinline vuint4 operator *(unsigned int a, const vuint4& b) { return vuint4(a) * b; }
  181. __forceinline vuint4 operator &(const vuint4& a, const vuint4& b) { return _mm_and_si128(a, b); }
  182. __forceinline vuint4 operator &(const vuint4& a, unsigned int b) { return a & vuint4(b); }
  183. __forceinline vuint4 operator &(unsigned int a, const vuint4& b) { return vuint4(a) & b; }
  184. __forceinline vuint4 operator |(const vuint4& a, const vuint4& b) { return _mm_or_si128(a, b); }
  185. __forceinline vuint4 operator |(const vuint4& a, unsigned int b) { return a | vuint4(b); }
  186. __forceinline vuint4 operator |(unsigned int a, const vuint4& b) { return vuint4(a) | b; }
  187. __forceinline vuint4 operator ^(const vuint4& a, const vuint4& b) { return _mm_xor_si128(a, b); }
  188. __forceinline vuint4 operator ^(const vuint4& a, unsigned int b) { return a ^ vuint4(b); }
  189. __forceinline vuint4 operator ^(unsigned int a, const vuint4& b) { return vuint4(a) ^ b; }
  190. __forceinline vuint4 operator <<(const vuint4& a, unsigned int n) { return _mm_slli_epi32(a, n); }
  191. __forceinline vuint4 operator >>(const vuint4& a, unsigned int n) { return _mm_srli_epi32(a, n); }
  192. __forceinline vuint4 sll (const vuint4& a, unsigned int b) { return _mm_slli_epi32(a, b); }
  193. __forceinline vuint4 sra (const vuint4& a, unsigned int b) { return _mm_srai_epi32(a, b); }
  194. __forceinline vuint4 srl (const vuint4& a, unsigned int b) { return _mm_srli_epi32(a, b); }
  195. ////////////////////////////////////////////////////////////////////////////////
  196. /// Assignment Operators
  197. ////////////////////////////////////////////////////////////////////////////////
  198. __forceinline vuint4& operator +=(vuint4& a, const vuint4& b) { return a = a + b; }
  199. __forceinline vuint4& operator +=(vuint4& a, unsigned int b) { return a = a + b; }
  200. __forceinline vuint4& operator -=(vuint4& a, const vuint4& b) { return a = a - b; }
  201. __forceinline vuint4& operator -=(vuint4& a, unsigned int b) { return a = a - b; }
  202. //#if defined(__SSE4_1__)
  203. // __forceinline vuint4& operator *=(vuint4& a, const vuint4& b) { return a = a * b; }
  204. // __forceinline vuint4& operator *=(vuint4& a, unsigned int b) { return a = a * b; }
  205. //#endif
  206. __forceinline vuint4& operator &=(vuint4& a, const vuint4& b) { return a = a & b; }
  207. __forceinline vuint4& operator &=(vuint4& a, unsigned int b) { return a = a & b; }
  208. __forceinline vuint4& operator |=(vuint4& a, const vuint4& b) { return a = a | b; }
  209. __forceinline vuint4& operator |=(vuint4& a, unsigned int b) { return a = a | b; }
  210. __forceinline vuint4& operator <<=(vuint4& a, unsigned int b) { return a = a << b; }
  211. __forceinline vuint4& operator >>=(vuint4& a, unsigned int b) { return a = a >> b; }
  212. ////////////////////////////////////////////////////////////////////////////////
  213. /// Comparison Operators + Select
  214. ////////////////////////////////////////////////////////////////////////////////
  215. #if defined(__AVX512VL__)
  216. __forceinline vboolf4 operator ==(const vuint4& a, const vuint4& b) { return _mm_cmp_epu32_mask(a,b,_MM_CMPINT_EQ); }
  217. __forceinline vboolf4 operator !=(const vuint4& a, const vuint4& b) { return _mm_cmp_epu32_mask(a,b,_MM_CMPINT_NE); }
  218. //__forceinline vboolf4 operator < (const vuint4& a, const vuint4& b) { return _mm_cmp_epu32_mask(a,b,_MM_CMPINT_LT); }
  219. //__forceinline vboolf4 operator >=(const vuint4& a, const vuint4& b) { return _mm_cmp_epu32_mask(a,b,_MM_CMPINT_GE); }
  220. //__forceinline vboolf4 operator > (const vuint4& a, const vuint4& b) { return _mm_cmp_epu32_mask(a,b,_MM_CMPINT_GT); }
  221. //__forceinline vboolf4 operator <=(const vuint4& a, const vuint4& b) { return _mm_cmp_epu32_mask(a,b,_MM_CMPINT_LE); }
  222. #else
  223. __forceinline vboolf4 operator ==(const vuint4& a, const vuint4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
  224. __forceinline vboolf4 operator !=(const vuint4& a, const vuint4& b) { return !(a == b); }
  225. //__forceinline vboolf4 operator < (const vuint4& a, const vuint4& b) { return _mm_castsi128_ps(_mm_cmplt_epu32(a, b)); }
  226. //__forceinline vboolf4 operator >=(const vuint4& a, const vuint4& b) { return !(a < b); }
  227. //__forceinline vboolf4 operator > (const vuint4& a, const vuint4& b) { return _mm_castsi128_ps(_mm_cmpgt_epu32(a, b)); }
  228. //__forceinline vboolf4 operator <=(const vuint4& a, const vuint4& b) { return !(a > b); }
  229. #endif
  230. __forceinline vboolf4 operator ==(const vuint4& a, unsigned int b) { return a == vuint4(b); }
  231. __forceinline vboolf4 operator ==(unsigned int a, const vuint4& b) { return vuint4(a) == b; }
  232. __forceinline vboolf4 operator !=(const vuint4& a, unsigned int b) { return a != vuint4(b); }
  233. __forceinline vboolf4 operator !=(unsigned int a, const vuint4& b) { return vuint4(a) != b; }
  234. //__forceinline vboolf4 operator < (const vuint4& a, unsigned int b) { return a < vuint4(b); }
  235. //__forceinline vboolf4 operator < (unsigned int a, const vuint4& b) { return vuint4(a) < b; }
  236. //__forceinline vboolf4 operator >=(const vuint4& a, unsigned int b) { return a >= vuint4(b); }
  237. //__forceinline vboolf4 operator >=(unsigned int a, const vuint4& b) { return vuint4(a) >= b; }
  238. //__forceinline vboolf4 operator > (const vuint4& a, unsigned int b) { return a > vuint4(b); }
  239. //__forceinline vboolf4 operator > (unsigned int a, const vuint4& b) { return vuint4(a) > b; }
  240. //__forceinline vboolf4 operator <=(const vuint4& a, unsigned int b) { return a <= vuint4(b); }
  241. //__forceinline vboolf4 operator <=(unsigned int a, const vuint4& b) { return vuint4(a) <= b; }
  242. __forceinline vboolf4 eq(const vuint4& a, const vuint4& b) { return a == b; }
  243. __forceinline vboolf4 ne(const vuint4& a, const vuint4& b) { return a != b; }
  244. //__forceinline vboolf4 lt(const vuint4& a, const vuint4& b) { return a < b; }
  245. //__forceinline vboolf4 ge(const vuint4& a, const vuint4& b) { return a >= b; }
  246. //__forceinline vboolf4 gt(const vuint4& a, const vuint4& b) { return a > b; }
  247. //__forceinline vboolf4 le(const vuint4& a, const vuint4& b) { return a <= b; }
  248. #if defined(__AVX512VL__)
  249. __forceinline vboolf4 eq(const vboolf4& mask, const vuint4& a, const vuint4& b) { return _mm_mask_cmp_epu32_mask(mask, a, b, _MM_CMPINT_EQ); }
  250. __forceinline vboolf4 ne(const vboolf4& mask, const vuint4& a, const vuint4& b) { return _mm_mask_cmp_epu32_mask(mask, a, b, _MM_CMPINT_NE); }
  251. //__forceinline vboolf4 lt(const vboolf4& mask, const vuint4& a, const vuint4& b) { return _mm_mask_cmp_epu32_mask(mask, a, b, _MM_CMPINT_LT); }
  252. //__forceinline vboolf4 ge(const vboolf4& mask, const vuint4& a, const vuint4& b) { return _mm_mask_cmp_epu32_mask(mask, a, b, _MM_CMPINT_GE); }
  253. //__forceinline vboolf4 gt(const vboolf4& mask, const vuint4& a, const vuint4& b) { return _mm_mask_cmp_epu32_mask(mask, a, b, _MM_CMPINT_GT); }
  254. //__forceinline vboolf4 le(const vboolf4& mask, const vuint4& a, const vuint4& b) { return _mm_mask_cmp_epu32_mask(mask, a, b, _MM_CMPINT_LE); }
  255. #else
  256. __forceinline vboolf4 eq(const vboolf4& mask, const vuint4& a, const vuint4& b) { return mask & (a == b); }
  257. __forceinline vboolf4 ne(const vboolf4& mask, const vuint4& a, const vuint4& b) { return mask & (a != b); }
  258. //__forceinline vboolf4 lt(const vboolf4& mask, const vuint4& a, const vuint4& b) { return mask & (a < b); }
  259. //__forceinline vboolf4 ge(const vboolf4& mask, const vuint4& a, const vuint4& b) { return mask & (a >= b); }
  260. //__forceinline vboolf4 gt(const vboolf4& mask, const vuint4& a, const vuint4& b) { return mask & (a > b); }
  261. //__forceinline vboolf4 le(const vboolf4& mask, const vuint4& a, const vuint4& b) { return mask & (a <= b); }
  262. #endif
  263. template<int mask>
  264. __forceinline vuint4 select(const vuint4& t, const vuint4& f) {
  265. #if defined(__SSE4_1__)
  266. return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask));
  267. #else
  268. return select(vboolf4(mask), t, f);
  269. #endif
  270. }
  271. /*#if defined(__SSE4_1__)
  272. __forceinline vuint4 min(const vuint4& a, const vuint4& b) { return _mm_min_epu32(a, b); }
  273. __forceinline vuint4 max(const vuint4& a, const vuint4& b) { return _mm_max_epu32(a, b); }
  274. #else
  275. __forceinline vuint4 min(const vuint4& a, const vuint4& b) { return select(a < b,a,b); }
  276. __forceinline vuint4 max(const vuint4& a, const vuint4& b) { return select(a < b,b,a); }
  277. #endif
  278. __forceinline vuint4 min(const vuint4& a, unsigned int b) { return min(a,vuint4(b)); }
  279. __forceinline vuint4 min(unsigned int a, const vuint4& b) { return min(vuint4(a),b); }
  280. __forceinline vuint4 max(const vuint4& a, unsigned int b) { return max(a,vuint4(b)); }
  281. __forceinline vuint4 max(unsigned int a, const vuint4& b) { return max(vuint4(a),b); }*/
  282. ////////////////////////////////////////////////////////////////////////////////
  283. // Movement/Shifting/Shuffling Functions
  284. ////////////////////////////////////////////////////////////////////////////////
  285. __forceinline vuint4 unpacklo(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
  286. __forceinline vuint4 unpackhi(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
  287. #if defined(__aarch64__)
  288. template<int i0, int i1, int i2, int i3>
  289. __forceinline vuint4 shuffle(const vuint4& v) {
  290. return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
  291. }
  292. template<int i0, int i1, int i2, int i3>
  293. __forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) {
  294. return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
  295. }
  296. #else
  297. template<int i0, int i1, int i2, int i3>
  298. __forceinline vuint4 shuffle(const vuint4& v) {
  299. return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0));
  300. }
  301. template<int i0, int i1, int i2, int i3>
  302. __forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) {
  303. return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
  304. }
  305. #endif
  306. #if defined(__SSE3__)
  307. template<> __forceinline vuint4 shuffle<0, 0, 2, 2>(const vuint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); }
  308. template<> __forceinline vuint4 shuffle<1, 1, 3, 3>(const vuint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); }
  309. template<> __forceinline vuint4 shuffle<0, 1, 0, 1>(const vuint4& v) { return _mm_castpd_si128(_mm_movedup_pd (_mm_castsi128_pd(v))); }
  310. #endif
  311. template<int i>
  312. __forceinline vuint4 shuffle(const vuint4& v) {
  313. return shuffle<i,i,i,i>(v);
  314. }
  315. #if defined(__SSE4_1__) && !defined(__aarch64__)
  316. template<int src> __forceinline unsigned int extract(const vuint4& b) { return _mm_extract_epi32(b, src); }
  317. template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { return _mm_insert_epi32(a, b, dst); }
  318. #else
  319. template<int src> __forceinline unsigned int extract(const vuint4& b) { return b[src&3]; }
  320. template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { vuint4 c = a; c[dst&3] = b; return c; }
  321. #endif
  322. template<> __forceinline unsigned int extract<0>(const vuint4& b) { return _mm_cvtsi128_si32(b); }
  323. __forceinline unsigned int toScalar(const vuint4& v) { return _mm_cvtsi128_si32(v); }
  324. ////////////////////////////////////////////////////////////////////////////////
  325. /// Reductions
  326. ////////////////////////////////////////////////////////////////////////////////
  327. #if 0
  328. #if defined(__SSE4_1__)
  329. __forceinline vuint4 vreduce_min(const vuint4& v) { vuint4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
  330. __forceinline vuint4 vreduce_max(const vuint4& v) { vuint4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
  331. __forceinline vuint4 vreduce_add(const vuint4& v) { vuint4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
  332. __forceinline unsigned int reduce_min(const vuint4& v) { return toScalar(vreduce_min(v)); }
  333. __forceinline unsigned int reduce_max(const vuint4& v) { return toScalar(vreduce_max(v)); }
  334. __forceinline unsigned int reduce_add(const vuint4& v) { return toScalar(vreduce_add(v)); }
  335. __forceinline size_t select_min(const vuint4& v) { return bsf(movemask(v == vreduce_min(v))); }
  336. __forceinline size_t select_max(const vuint4& v) { return bsf(movemask(v == vreduce_max(v))); }
  337. //__forceinline size_t select_min(const vboolf4& valid, const vuint4& v) { const vuint4 a = select(valid,v,vuint4(pos_inf)); return bsf(movemask(valid & (a == vreduce_min(a)))); }
  338. //__forceinline size_t select_max(const vboolf4& valid, const vuint4& v) { const vuint4 a = select(valid,v,vuint4(neg_inf)); return bsf(movemask(valid & (a == vreduce_max(a)))); }
  339. #else
  340. __forceinline unsigned int reduce_min(const vuint4& v) { return min(v[0],v[1],v[2],v[3]); }
  341. __forceinline unsigned int reduce_max(const vuint4& v) { return max(v[0],v[1],v[2],v[3]); }
  342. __forceinline unsigned int reduce_add(const vuint4& v) { return v[0]+v[1]+v[2]+v[3]; }
  343. #endif
  344. #endif
  345. ////////////////////////////////////////////////////////////////////////////////
  346. /// Output Operators
  347. ////////////////////////////////////////////////////////////////////////////////
  348. __forceinline embree_ostream operator <<(embree_ostream cout, const vuint4& a) {
  349. return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ">";
  350. }
  351. }
  352. #undef vboolf
  353. #undef vboold
  354. #undef vint
  355. #undef vuint
  356. #undef vllong
  357. #undef vfloat
  358. #undef vdouble