vec2fa.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. // Copyright 2009-2021 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #pragma once
  4. #include "../sys/alloc.h"
  5. #include "math.h"
  6. #include "../simd/sse.h"
  7. namespace embree
  8. {
  9. ////////////////////////////////////////////////////////////////////////////////
  10. /// SSE Vec2fa Type
  11. ////////////////////////////////////////////////////////////////////////////////
  12. struct __aligned(16) Vec2fa
  13. {
  14. ALIGNED_STRUCT_(16);
  15. typedef float Scalar;
  16. enum { N = 2 };
  17. union {
  18. __m128 m128;
  19. struct { float x,y,az,aw; };
  20. };
  21. ////////////////////////////////////////////////////////////////////////////////
  22. /// Constructors, Assignment & Cast Operators
  23. ////////////////////////////////////////////////////////////////////////////////
  24. __forceinline Vec2fa( ) {}
  25. __forceinline Vec2fa( const __m128 a ) : m128(a) {}
  26. __forceinline Vec2fa ( const Vec2<float>& other ) { x = other.x; y = other.y; }
  27. __forceinline Vec2fa& operator =( const Vec2<float>& other ) { x = other.x; y = other.y; return *this; }
  28. __forceinline Vec2fa ( const Vec2fa& other ) { m128 = other.m128; }
  29. __forceinline Vec2fa& operator =( const Vec2fa& other ) { m128 = other.m128; return *this; }
  30. __forceinline explicit Vec2fa( const float a ) : m128(_mm_set1_ps(a)) {}
  31. __forceinline Vec2fa( const float x, const float y) : m128(_mm_set_ps(y, y, y, x)) {}
  32. __forceinline explicit Vec2fa( const __m128i a ) : m128(_mm_cvtepi32_ps(a)) {}
  33. __forceinline operator const __m128&() const { return m128; }
  34. __forceinline operator __m128&() { return m128; }
  35. ////////////////////////////////////////////////////////////////////////////////
  36. /// Loads and Stores
  37. ////////////////////////////////////////////////////////////////////////////////
  38. static __forceinline Vec2fa load( const void* const a ) {
  39. return Vec2fa(_mm_and_ps(_mm_load_ps((float*)a),_mm_castsi128_ps(_mm_set_epi32(0, 0, -1, -1))));
  40. }
  41. static __forceinline Vec2fa loadu( const void* const a ) {
  42. return Vec2fa(_mm_and_ps(_mm_loadu_ps((float*)a),_mm_castsi128_ps(_mm_set_epi32(0, 0, -1, -1))));
  43. }
  44. static __forceinline void storeu ( void* ptr, const Vec2fa& v ) {
  45. _mm_storeu_ps((float*)ptr,v);
  46. }
  47. ////////////////////////////////////////////////////////////////////////////////
  48. /// Constants
  49. ////////////////////////////////////////////////////////////////////////////////
  50. __forceinline Vec2fa( ZeroTy ) : m128(_mm_setzero_ps()) {}
  51. __forceinline Vec2fa( OneTy ) : m128(_mm_set1_ps(1.0f)) {}
  52. __forceinline Vec2fa( PosInfTy ) : m128(_mm_set1_ps(pos_inf)) {}
  53. __forceinline Vec2fa( NegInfTy ) : m128(_mm_set1_ps(neg_inf)) {}
  54. ////////////////////////////////////////////////////////////////////////////////
  55. /// Array Access
  56. ////////////////////////////////////////////////////////////////////////////////
  57. __forceinline const float& operator []( const size_t index ) const { assert(index < 2); return (&x)[index]; }
  58. __forceinline float& operator []( const size_t index ) { assert(index < 2); return (&x)[index]; }
  59. };
  60. ////////////////////////////////////////////////////////////////////////////////
  61. /// Unary Operators
  62. ////////////////////////////////////////////////////////////////////////////////
  63. __forceinline Vec2fa operator +( const Vec2fa& a ) { return a; }
  64. __forceinline Vec2fa operator -( const Vec2fa& a ) {
  65. const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
  66. return _mm_xor_ps(a.m128, mask);
  67. }
  68. __forceinline Vec2fa abs ( const Vec2fa& a ) {
  69. const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff));
  70. return _mm_and_ps(a.m128, mask);
  71. }
  72. __forceinline Vec2fa sign ( const Vec2fa& a ) {
  73. return blendv_ps(Vec2fa(one), -Vec2fa(one), _mm_cmplt_ps (a,Vec2fa(zero)));
  74. }
  75. __forceinline Vec2fa rcp ( const Vec2fa& a )
  76. {
  77. #if defined(__aarch64__)
  78. __m128 reciprocal = _mm_rcp_ps(a.m128);
  79. reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
  80. reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
  81. return (const Vec2fa)reciprocal;
  82. #else
  83. #if defined(__AVX512VL__)
  84. const Vec2fa r = _mm_rcp14_ps(a.m128);
  85. #else
  86. const Vec2fa r = _mm_rcp_ps(a.m128);
  87. #endif
  88. #if defined(__AVX2__)
  89. const Vec2fa h_n = _mm_fnmadd_ps(a, r, vfloat4(1.0)); // First, compute 1 - a * r (which will be very close to 0)
  90. const Vec2fa res = _mm_fmadd_ps(r, h_n, r); // Then compute r + r * h_n
  91. #else
  92. const Vec2fa h_n = _mm_sub_ps(vfloat4(1.0f), _mm_mul_ps(a, r)); // First, compute 1 - a * r (which will be very close to 0)
  93. const Vec2fa res = _mm_add_ps(r,_mm_mul_ps(r, h_n)); // Then compute r + r * h_n
  94. #endif
  95. return res;
  96. #endif //defined(__aarch64__)
  97. }
  98. __forceinline Vec2fa sqrt ( const Vec2fa& a ) { return _mm_sqrt_ps(a.m128); }
  99. __forceinline Vec2fa sqr ( const Vec2fa& a ) { return _mm_mul_ps(a,a); }
  100. __forceinline Vec2fa rsqrt( const Vec2fa& a )
  101. {
  102. #if defined(__aarch64__)
  103. __m128 r = _mm_rsqrt_ps(a.m128);
  104. r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
  105. r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
  106. return r;
  107. #else
  108. #if defined(__AVX512VL__)
  109. __m128 r = _mm_rsqrt14_ps(a.m128);
  110. #else
  111. __m128 r = _mm_rsqrt_ps(a.m128);
  112. #endif
  113. return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
  114. #endif
  115. }
  116. __forceinline Vec2fa zero_fix(const Vec2fa& a) {
  117. return blendv_ps(a, _mm_set1_ps(min_rcp_input), _mm_cmplt_ps (abs(a).m128, _mm_set1_ps(min_rcp_input)));
  118. }
  119. __forceinline Vec2fa rcp_safe(const Vec2fa& a) {
  120. return rcp(zero_fix(a));
  121. }
  122. __forceinline Vec2fa log ( const Vec2fa& a ) {
  123. return Vec2fa(logf(a.x),logf(a.y));
  124. }
  125. __forceinline Vec2fa exp ( const Vec2fa& a ) {
  126. return Vec2fa(expf(a.x),expf(a.y));
  127. }
  128. ////////////////////////////////////////////////////////////////////////////////
  129. /// Binary Operators
  130. ////////////////////////////////////////////////////////////////////////////////
  131. __forceinline Vec2fa operator +( const Vec2fa& a, const Vec2fa& b ) { return _mm_add_ps(a.m128, b.m128); }
  132. __forceinline Vec2fa operator -( const Vec2fa& a, const Vec2fa& b ) { return _mm_sub_ps(a.m128, b.m128); }
  133. __forceinline Vec2fa operator *( const Vec2fa& a, const Vec2fa& b ) { return _mm_mul_ps(a.m128, b.m128); }
  134. __forceinline Vec2fa operator *( const Vec2fa& a, const float b ) { return a * Vec2fa(b); }
  135. __forceinline Vec2fa operator *( const float a, const Vec2fa& b ) { return Vec2fa(a) * b; }
  136. __forceinline Vec2fa operator /( const Vec2fa& a, const Vec2fa& b ) { return _mm_div_ps(a.m128,b.m128); }
  137. __forceinline Vec2fa operator /( const Vec2fa& a, const float b ) { return _mm_div_ps(a.m128,_mm_set1_ps(b)); }
  138. __forceinline Vec2fa operator /( const float a, const Vec2fa& b ) { return _mm_div_ps(_mm_set1_ps(a),b.m128); }
  139. __forceinline Vec2fa min( const Vec2fa& a, const Vec2fa& b ) { return _mm_min_ps(a.m128,b.m128); }
  140. __forceinline Vec2fa max( const Vec2fa& a, const Vec2fa& b ) { return _mm_max_ps(a.m128,b.m128); }
  141. #if defined(__aarch64__) || defined(__SSE4_1__)
  142. __forceinline Vec2fa mini(const Vec2fa& a, const Vec2fa& b) {
  143. const vint4 ai = _mm_castps_si128(a);
  144. const vint4 bi = _mm_castps_si128(b);
  145. const vint4 ci = _mm_min_epi32(ai,bi);
  146. return _mm_castsi128_ps(ci);
  147. }
  148. #endif
  149. #if defined(__aarch64__) || defined(__SSE4_1__)
  150. __forceinline Vec2fa maxi(const Vec2fa& a, const Vec2fa& b) {
  151. const vint4 ai = _mm_castps_si128(a);
  152. const vint4 bi = _mm_castps_si128(b);
  153. const vint4 ci = _mm_max_epi32(ai,bi);
  154. return _mm_castsi128_ps(ci);
  155. }
  156. #endif
  157. __forceinline Vec2fa pow ( const Vec2fa& a, const float& b ) {
  158. return Vec2fa(powf(a.x,b),powf(a.y,b));
  159. }
  160. ////////////////////////////////////////////////////////////////////////////////
  161. /// Ternary Operators
  162. ////////////////////////////////////////////////////////////////////////////////
  163. #if defined(__AVX2__)
  164. __forceinline Vec2fa madd ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return _mm_fmadd_ps(a,b,c); }
  165. __forceinline Vec2fa msub ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return _mm_fmsub_ps(a,b,c); }
  166. __forceinline Vec2fa nmadd ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return _mm_fnmadd_ps(a,b,c); }
  167. __forceinline Vec2fa nmsub ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return _mm_fnmsub_ps(a,b,c); }
  168. #else
  169. __forceinline Vec2fa madd ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return a*b+c; }
  170. __forceinline Vec2fa msub ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return a*b-c; }
  171. __forceinline Vec2fa nmadd ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return -a*b+c;}
  172. __forceinline Vec2fa nmsub ( const Vec2fa& a, const Vec2fa& b, const Vec2fa& c) { return -a*b-c; }
  173. #endif
  174. __forceinline Vec2fa madd ( const float a, const Vec2fa& b, const Vec2fa& c) { return madd(Vec2fa(a),b,c); }
  175. __forceinline Vec2fa msub ( const float a, const Vec2fa& b, const Vec2fa& c) { return msub(Vec2fa(a),b,c); }
  176. __forceinline Vec2fa nmadd ( const float a, const Vec2fa& b, const Vec2fa& c) { return nmadd(Vec2fa(a),b,c); }
  177. __forceinline Vec2fa nmsub ( const float a, const Vec2fa& b, const Vec2fa& c) { return nmsub(Vec2fa(a),b,c); }
  178. ////////////////////////////////////////////////////////////////////////////////
  179. /// Assignment Operators
  180. ////////////////////////////////////////////////////////////////////////////////
  181. __forceinline Vec2fa& operator +=( Vec2fa& a, const Vec2fa& b ) { return a = a + b; }
  182. __forceinline Vec2fa& operator -=( Vec2fa& a, const Vec2fa& b ) { return a = a - b; }
  183. __forceinline Vec2fa& operator *=( Vec2fa& a, const Vec2fa& b ) { return a = a * b; }
  184. __forceinline Vec2fa& operator *=( Vec2fa& a, const float b ) { return a = a * b; }
  185. __forceinline Vec2fa& operator /=( Vec2fa& a, const Vec2fa& b ) { return a = a / b; }
  186. __forceinline Vec2fa& operator /=( Vec2fa& a, const float b ) { return a = a / b; }
  187. ////////////////////////////////////////////////////////////////////////////////
  188. /// Reductions
  189. ////////////////////////////////////////////////////////////////////////////////
  190. __forceinline float reduce_add(const Vec2fa& v) { return v.x+v.y; }
  191. __forceinline float reduce_mul(const Vec2fa& v) { return v.x*v.y; }
  192. __forceinline float reduce_min(const Vec2fa& v) { return min(v.x,v.y); }
  193. __forceinline float reduce_max(const Vec2fa& v) { return max(v.x,v.y); }
  194. ////////////////////////////////////////////////////////////////////////////////
  195. /// Comparison Operators
  196. ////////////////////////////////////////////////////////////////////////////////
  197. __forceinline bool operator ==( const Vec2fa& a, const Vec2fa& b ) { return (_mm_movemask_ps(_mm_cmpeq_ps (a.m128, b.m128)) & 3) == 3; }
  198. __forceinline bool operator !=( const Vec2fa& a, const Vec2fa& b ) { return (_mm_movemask_ps(_mm_cmpneq_ps(a.m128, b.m128)) & 3) != 0; }
  199. ////////////////////////////////////////////////////////////////////////////////
  200. /// Euclidean Space Operators
  201. ////////////////////////////////////////////////////////////////////////////////
  202. #if defined(__SSE4_1__)
  203. __forceinline float dot ( const Vec2fa& a, const Vec2fa& b ) {
  204. return _mm_cvtss_f32(_mm_dp_ps(a,b,0x3F));
  205. }
  206. #else
  207. __forceinline float dot ( const Vec2fa& a, const Vec2fa& b ) {
  208. return reduce_add(a*b);
  209. }
  210. #endif
  211. __forceinline Vec2fa cross ( const Vec2fa& a ) {
  212. return Vec2fa(-a.y,a.x);
  213. }
  214. __forceinline float sqr_length ( const Vec2fa& a ) { return dot(a,a); }
  215. __forceinline float rcp_length ( const Vec2fa& a ) { return rsqrt(dot(a,a)); }
  216. __forceinline float rcp_length2( const Vec2fa& a ) { return rcp(dot(a,a)); }
  217. __forceinline float length ( const Vec2fa& a ) { return sqrt(dot(a,a)); }
  218. __forceinline Vec2fa normalize( const Vec2fa& a ) { return a*rsqrt(dot(a,a)); }
  219. __forceinline float distance ( const Vec2fa& a, const Vec2fa& b ) { return length(a-b); }
  220. ////////////////////////////////////////////////////////////////////////////////
  221. /// Select
  222. ////////////////////////////////////////////////////////////////////////////////
  223. __forceinline Vec2fa select( bool s, const Vec2fa& t, const Vec2fa& f ) {
  224. __m128 mask = s ? _mm_castsi128_ps(_mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128())) : _mm_setzero_ps();
  225. return blendv_ps(f, t, mask);
  226. }
  227. __forceinline Vec2fa lerp(const Vec2fa& v0, const Vec2fa& v1, const float t) {
  228. return madd(1.0f-t,v0,t*v1);
  229. }
  230. __forceinline int maxDim ( const Vec2fa& a )
  231. {
  232. const Vec2fa b = abs(a);
  233. if (b.x > b.y) return 0;
  234. else return 1;
  235. }
  236. ////////////////////////////////////////////////////////////////////////////////
  237. /// Rounding Functions
  238. ////////////////////////////////////////////////////////////////////////////////
  239. #if defined(__aarch64__)
  240. //__forceinline Vec2fa trunc(const Vec2fa& a) { return vrndq_f32(a); }
  241. __forceinline Vec2fa floor(const Vec2fa& a) { return vrndmq_f32(a); }
  242. __forceinline Vec2fa ceil (const Vec2fa& a) { return vrndpq_f32(a); }
  243. #elif defined (__SSE4_1__)
  244. //__forceinline Vec2fa trunc( const Vec2fa& a ) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
  245. __forceinline Vec2fa floor( const Vec2fa& a ) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
  246. __forceinline Vec2fa ceil ( const Vec2fa& a ) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }
  247. #else
  248. //__forceinline Vec2fa trunc( const Vec2fa& a ) { return Vec2fa(truncf(a.x),truncf(a.y),truncf(a.z)); }
  249. __forceinline Vec2fa floor( const Vec2fa& a ) { return Vec2fa(floorf(a.x),floorf(a.y)); }
  250. __forceinline Vec2fa ceil ( const Vec2fa& a ) { return Vec2fa(ceilf (a.x),ceilf (a.y)); }
  251. #endif
  252. ////////////////////////////////////////////////////////////////////////////////
  253. /// Output Operators
  254. ////////////////////////////////////////////////////////////////////////////////
  255. __forceinline embree_ostream operator<<(embree_ostream cout, const Vec2fa& a) {
  256. return cout << "(" << a.x << ", " << a.y << ")";
  257. }
  258. typedef Vec2fa Vec2fa_t;
  259. }