math.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. // Copyright 2009-2021 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #pragma once
  4. #include "../sys/platform.h"
  5. #include "../sys/intrinsics.h"
  6. #include "constants.h"
  7. #include <cmath>
  8. #if defined(__ARM_NEON)
  9. #include "../simd/arm/emulation.h"
  10. #else
  11. #include <emmintrin.h>
  12. #include <xmmintrin.h>
  13. #include <immintrin.h>
  14. #endif
  15. #if defined(__WIN32__)
  16. #if defined(_MSC_VER) && (_MSC_VER <= 1700)
  17. namespace std
  18. {
  19. __forceinline bool isinf ( const float x ) { return _finite(x) == 0; }
  20. __forceinline bool isnan ( const float x ) { return _isnan(x) != 0; }
  21. __forceinline bool isfinite (const float x) { return _finite(x) != 0; }
  22. }
  23. #endif
  24. #endif
  25. namespace embree
  26. {
  27. __forceinline bool isvalid ( const float& v ) {
  28. return (v > -FLT_LARGE) & (v < +FLT_LARGE);
  29. }
  30. __forceinline int cast_f2i(float f) {
  31. union { float f; int i; } v; v.f = f; return v.i;
  32. }
  33. __forceinline float cast_i2f(int i) {
  34. union { float f; int i; } v; v.i = i; return v.f;
  35. }
  36. __forceinline int toInt (const float& a) { return int(a); }
  37. __forceinline float toFloat(const int& a) { return float(a); }
  38. #if defined(__WIN32__)
  39. __forceinline bool finite ( const float x ) { return _finite(x) != 0; }
  40. #endif
  41. __forceinline float sign ( const float x ) { return x<0?-1.0f:1.0f; }
  42. __forceinline float sqr ( const float x ) { return x*x; }
  43. __forceinline float rcp ( const float x )
  44. {
  45. #if defined(__aarch64__)
  46. // Move scalar to vector register and do rcp.
  47. __m128 a;
  48. a[0] = x;
  49. float32x4_t reciprocal = vrecpeq_f32(a);
  50. reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
  51. reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
  52. return reciprocal[0];
  53. #else
  54. const __m128 a = _mm_set_ss(x);
  55. #if defined(__AVX512VL__)
  56. const __m128 r = _mm_rcp14_ss(_mm_set_ss(0.0f),a);
  57. #else
  58. const __m128 r = _mm_rcp_ss(a);
  59. #endif
  60. #if defined(__AVX2__)
  61. return _mm_cvtss_f32(_mm_mul_ss(r,_mm_fnmadd_ss(r, a, _mm_set_ss(2.0f))));
  62. #else
  63. return _mm_cvtss_f32(_mm_mul_ss(r,_mm_sub_ss(_mm_set_ss(2.0f), _mm_mul_ss(r, a))));
  64. #endif
  65. #endif //defined(__aarch64__)
  66. }
  67. __forceinline float signmsk ( const float x ) {
  68. #if defined(__aarch64__)
  69. // FP and Neon shares same vector register in arm64
  70. __m128 a;
  71. __m128i b;
  72. a[0] = x;
  73. b[0] = 0x80000000;
  74. a = _mm_and_ps(a, vreinterpretq_f32_s32(b));
  75. return a[0];
  76. #else
  77. return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(0x80000000))));
  78. #endif
  79. }
  80. __forceinline float xorf( const float x, const float y ) {
  81. #if defined(__aarch64__)
  82. // FP and Neon shares same vector register in arm64
  83. __m128 a;
  84. __m128 b;
  85. a[0] = x;
  86. b[0] = y;
  87. a = _mm_xor_ps(a, b);
  88. return a[0];
  89. #else
  90. return _mm_cvtss_f32(_mm_xor_ps(_mm_set_ss(x),_mm_set_ss(y)));
  91. #endif
  92. }
  93. __forceinline float andf( const float x, const unsigned y ) {
  94. #if defined(__aarch64__)
  95. // FP and Neon shares same vector register in arm64
  96. __m128 a;
  97. __m128i b;
  98. a[0] = x;
  99. b[0] = y;
  100. a = _mm_and_ps(a, vreinterpretq_f32_s32(b));
  101. return a[0];
  102. #else
  103. return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(y))));
  104. #endif
  105. }
  106. __forceinline float rsqrt( const float x )
  107. {
  108. #if defined(__aarch64__)
  109. // FP and Neon shares same vector register in arm64
  110. __m128 a;
  111. a[0] = x;
  112. __m128 value = _mm_rsqrt_ps(a);
  113. value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value));
  114. value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value));
  115. return value[0];
  116. #else
  117. const __m128 a = _mm_set_ss(x);
  118. #if defined(__AVX512VL__)
  119. __m128 r = _mm_rsqrt14_ss(_mm_set_ss(0.0f),a);
  120. #else
  121. __m128 r = _mm_rsqrt_ss(a);
  122. #endif
  123. const __m128 c = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r),
  124. _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r)));
  125. return _mm_cvtss_f32(c);
  126. #endif
  127. }
  128. #if defined(__WIN32__) && defined(_MSC_VER) && (_MSC_VER <= 1700)
  129. __forceinline float nextafter(float x, float y) { if ((x<y) == (x>0)) return x*(1.1f+float(ulp)); else return x*(0.9f-float(ulp)); }
  130. __forceinline double nextafter(double x, double y) { return _nextafter(x, y); }
  131. __forceinline int roundf(float f) { return (int)(f + 0.5f); }
  132. #else
  133. __forceinline float nextafter(float x, float y) { return ::nextafterf(x, y); }
  134. __forceinline double nextafter(double x, double y) { return ::nextafter(x, y); }
  135. #endif
  136. __forceinline float abs ( const float x ) { return ::fabsf(x); }
  137. __forceinline float acos ( const float x ) { return ::acosf (x); }
  138. __forceinline float asin ( const float x ) { return ::asinf (x); }
  139. __forceinline float atan ( const float x ) { return ::atanf (x); }
  140. __forceinline float atan2( const float y, const float x ) { return ::atan2f(y, x); }
  141. __forceinline float cos ( const float x ) { return ::cosf (x); }
  142. __forceinline float cosh ( const float x ) { return ::coshf (x); }
  143. __forceinline float exp ( const float x ) { return ::expf (x); }
  144. __forceinline float fmod ( const float x, const float y ) { return ::fmodf (x, y); }
  145. __forceinline float log ( const float x ) { return ::logf (x); }
  146. __forceinline float log10( const float x ) { return ::log10f(x); }
  147. __forceinline float pow ( const float x, const float y ) { return ::powf (x, y); }
  148. __forceinline float sin ( const float x ) { return ::sinf (x); }
  149. __forceinline float sinh ( const float x ) { return ::sinhf (x); }
  150. __forceinline float sqrt ( const float x ) { return ::sqrtf (x); }
  151. __forceinline float tan ( const float x ) { return ::tanf (x); }
  152. __forceinline float tanh ( const float x ) { return ::tanhf (x); }
  153. __forceinline float floor( const float x ) { return ::floorf (x); }
  154. __forceinline float ceil ( const float x ) { return ::ceilf (x); }
  155. __forceinline float frac ( const float x ) { return x-floor(x); }
  156. __forceinline double abs ( const double x ) { return ::fabs(x); }
  157. __forceinline double sign ( const double x ) { return x<0?-1.0:1.0; }
  158. __forceinline double acos ( const double x ) { return ::acos (x); }
  159. __forceinline double asin ( const double x ) { return ::asin (x); }
  160. __forceinline double atan ( const double x ) { return ::atan (x); }
  161. __forceinline double atan2( const double y, const double x ) { return ::atan2(y, x); }
  162. __forceinline double cos ( const double x ) { return ::cos (x); }
  163. __forceinline double cosh ( const double x ) { return ::cosh (x); }
  164. __forceinline double exp ( const double x ) { return ::exp (x); }
  165. __forceinline double fmod ( const double x, const double y ) { return ::fmod (x, y); }
  166. __forceinline double log ( const double x ) { return ::log (x); }
  167. __forceinline double log10( const double x ) { return ::log10(x); }
  168. __forceinline double pow ( const double x, const double y ) { return ::pow (x, y); }
  169. __forceinline double rcp ( const double x ) { return 1.0/x; }
  170. __forceinline double rsqrt( const double x ) { return 1.0/::sqrt(x); }
  171. __forceinline double sin ( const double x ) { return ::sin (x); }
  172. __forceinline double sinh ( const double x ) { return ::sinh (x); }
  173. __forceinline double sqr ( const double x ) { return x*x; }
  174. __forceinline double sqrt ( const double x ) { return ::sqrt (x); }
  175. __forceinline double tan ( const double x ) { return ::tan (x); }
  176. __forceinline double tanh ( const double x ) { return ::tanh (x); }
  177. __forceinline double floor( const double x ) { return ::floor (x); }
  178. __forceinline double ceil ( const double x ) { return ::ceil (x); }
  179. #if defined(__aarch64__)
  180. __forceinline float mini(float a, float b) {
  181. // FP and Neon shares same vector register in arm64
  182. __m128 x;
  183. __m128 y;
  184. x[0] = a;
  185. y[0] = b;
  186. x = _mm_min_ps(x, y);
  187. return x[0];
  188. }
  189. #elif defined(__SSE4_1__)
  190. __forceinline float mini(float a, float b) {
  191. const __m128i ai = _mm_castps_si128(_mm_set_ss(a));
  192. const __m128i bi = _mm_castps_si128(_mm_set_ss(b));
  193. const __m128i ci = _mm_min_epi32(ai,bi);
  194. return _mm_cvtss_f32(_mm_castsi128_ps(ci));
  195. }
  196. #endif
  197. #if defined(__aarch64__)
  198. __forceinline float maxi(float a, float b) {
  199. // FP and Neon shares same vector register in arm64
  200. __m128 x;
  201. __m128 y;
  202. x[0] = a;
  203. y[0] = b;
  204. x = _mm_max_ps(x, y);
  205. return x[0];
  206. }
  207. #elif defined(__SSE4_1__)
  208. __forceinline float maxi(float a, float b) {
  209. const __m128i ai = _mm_castps_si128(_mm_set_ss(a));
  210. const __m128i bi = _mm_castps_si128(_mm_set_ss(b));
  211. const __m128i ci = _mm_max_epi32(ai,bi);
  212. return _mm_cvtss_f32(_mm_castsi128_ps(ci));
  213. }
  214. #endif
  215. template<typename T>
  216. __forceinline T twice(const T& a) { return a+a; }
  217. __forceinline int min(int a, int b) { return a<b ? a:b; }
  218. __forceinline unsigned min(unsigned a, unsigned b) { return a<b ? a:b; }
  219. __forceinline int64_t min(int64_t a, int64_t b) { return a<b ? a:b; }
  220. __forceinline float min(float a, float b) { return a<b ? a:b; }
  221. __forceinline double min(double a, double b) { return a<b ? a:b; }
  222. #if defined(__64BIT__) || defined(__EMSCRIPTEN__)
  223. __forceinline size_t min(size_t a, size_t b) { return a<b ? a:b; }
  224. #endif
  225. #if defined(__EMSCRIPTEN__)
  226. __forceinline long min(long a, long b) { return a<b ? a:b; }
  227. #endif
  228. template<typename T> __forceinline T min(const T& a, const T& b, const T& c) { return min(min(a,b),c); }
  229. template<typename T> __forceinline T min(const T& a, const T& b, const T& c, const T& d) { return min(min(a,b),min(c,d)); }
  230. template<typename T> __forceinline T min(const T& a, const T& b, const T& c, const T& d, const T& e) { return min(min(min(a,b),min(c,d)),e); }
  231. template<typename T> __forceinline T mini(const T& a, const T& b, const T& c) { return mini(mini(a,b),c); }
  232. template<typename T> __forceinline T mini(const T& a, const T& b, const T& c, const T& d) { return mini(mini(a,b),mini(c,d)); }
  233. template<typename T> __forceinline T mini(const T& a, const T& b, const T& c, const T& d, const T& e) { return mini(mini(mini(a,b),mini(c,d)),e); }
  234. __forceinline int max(int a, int b) { return a<b ? b:a; }
  235. __forceinline unsigned max(unsigned a, unsigned b) { return a<b ? b:a; }
  236. __forceinline int64_t max(int64_t a, int64_t b) { return a<b ? b:a; }
  237. __forceinline float max(float a, float b) { return a<b ? b:a; }
  238. __forceinline double max(double a, double b) { return a<b ? b:a; }
  239. #if defined(__64BIT__) || defined(__EMSCRIPTEN__)
  240. __forceinline size_t max(size_t a, size_t b) { return a<b ? b:a; }
  241. #endif
  242. #if defined(__EMSCRIPTEN__)
  243. __forceinline long max(long a, long b) { return a<b ? b:a; }
  244. #endif
  245. template<typename T> __forceinline T max(const T& a, const T& b, const T& c) { return max(max(a,b),c); }
  246. template<typename T> __forceinline T max(const T& a, const T& b, const T& c, const T& d) { return max(max(a,b),max(c,d)); }
  247. template<typename T> __forceinline T max(const T& a, const T& b, const T& c, const T& d, const T& e) { return max(max(max(a,b),max(c,d)),e); }
  248. template<typename T> __forceinline T maxi(const T& a, const T& b, const T& c) { return maxi(maxi(a,b),c); }
  249. template<typename T> __forceinline T maxi(const T& a, const T& b, const T& c, const T& d) { return maxi(maxi(a,b),maxi(c,d)); }
  250. template<typename T> __forceinline T maxi(const T& a, const T& b, const T& c, const T& d, const T& e) { return maxi(maxi(maxi(a,b),maxi(c,d)),e); }
  251. #if defined(__MACOSX__)
  252. __forceinline ssize_t min(ssize_t a, ssize_t b) { return a<b ? a:b; }
  253. __forceinline ssize_t max(ssize_t a, ssize_t b) { return a<b ? b:a; }
  254. #endif
  255. #if defined(__MACOSX__) && !defined(__INTEL_COMPILER)
  256. __forceinline void sincosf(float x, float *sin, float *cos) {
  257. __sincosf(x,sin,cos);
  258. }
  259. #endif
  260. #if defined(__WIN32__) || defined(__FreeBSD__)
  261. __forceinline void sincosf(float x, float *s, float *c) {
  262. *s = sinf(x); *c = cosf(x);
  263. }
  264. #endif
  265. template<typename T> __forceinline T clamp(const T& x, const T& lower = T(zero), const T& upper = T(one)) { return max(min(x,upper),lower); }
  266. template<typename T> __forceinline T clampz(const T& x, const T& upper) { return max(T(zero), min(x,upper)); }
  267. template<typename T> __forceinline T deg2rad ( const T& x ) { return x * T(1.74532925199432957692e-2f); }
  268. template<typename T> __forceinline T rad2deg ( const T& x ) { return x * T(5.72957795130823208768e1f); }
  269. template<typename T> __forceinline T sin2cos ( const T& x ) { return sqrt(max(T(zero),T(one)-x*x)); }
  270. template<typename T> __forceinline T cos2sin ( const T& x ) { return sin2cos(x); }
  271. #if defined(__AVX2__)
  272. __forceinline float madd ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fmadd_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
  273. __forceinline float msub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
  274. __forceinline float nmadd ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmadd_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
  275. __forceinline float nmsub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
  276. #elif defined (__aarch64__) && defined(__clang__)
  277. #pragma clang fp contract(fast)
  278. __forceinline float madd ( const float a, const float b, const float c) { return a*b + c; }
  279. __forceinline float msub ( const float a, const float b, const float c) { return a*b - c; }
  280. __forceinline float nmadd ( const float a, const float b, const float c) { return c - a*b; }
  281. __forceinline float nmsub ( const float a, const float b, const float c) { return -(c + a*b); }
  282. #pragma clang fp contract(on)
  283. #else
  284. __forceinline float madd ( const float a, const float b, const float c) { return a*b+c; }
  285. __forceinline float msub ( const float a, const float b, const float c) { return a*b-c; }
  286. __forceinline float nmadd ( const float a, const float b, const float c) { return -a*b+c;}
  287. __forceinline float nmsub ( const float a, const float b, const float c) { return -a*b-c; }
  288. #endif
  289. /*! random functions */
  290. template<typename T> T random() { return T(0); }
  291. #if defined(_WIN32)
  292. template<> __forceinline int random() { return int(rand()) ^ (int(rand()) << 8) ^ (int(rand()) << 16); }
  293. template<> __forceinline uint32_t random() { return uint32_t(rand()) ^ (uint32_t(rand()) << 8) ^ (uint32_t(rand()) << 16); }
  294. #else
  295. template<> __forceinline int random() { return int(rand()); }
  296. template<> __forceinline uint32_t random() { return uint32_t(rand()) ^ (uint32_t(rand()) << 16); }
  297. #endif
  298. template<> __forceinline float random() { return rand()/float(RAND_MAX); }
  299. template<> __forceinline double random() { return rand()/double(RAND_MAX); }
  300. #if _WIN32
  301. __forceinline double drand48() {
  302. return double(rand())/double(RAND_MAX);
  303. }
  304. __forceinline void srand48(long seed) {
  305. return srand(seed);
  306. }
  307. #endif
  308. /*! selects */
  309. __forceinline bool select(bool s, bool t , bool f) { return s ? t : f; }
  310. __forceinline int select(bool s, int t, int f) { return s ? t : f; }
  311. __forceinline float select(bool s, float t, float f) { return s ? t : f; }
  312. __forceinline bool all(bool s) { return s; }
  313. __forceinline float lerp(const float v0, const float v1, const float t) {
  314. return madd(1.0f-t,v0,t*v1);
  315. }
  316. template<typename T>
  317. __forceinline T lerp2(const float x0, const float x1, const float x2, const float x3, const T& u, const T& v) {
  318. return madd((1.0f-u),madd((1.0f-v),T(x0),v*T(x2)),u*madd((1.0f-v),T(x1),v*T(x3)));
  319. }
  320. /*! exchange */
  321. template<typename T> __forceinline void xchg ( T& a, T& b ) { const T tmp = a; a = b; b = tmp; }
  322. /* load/store */
  323. template<typename Ty> struct mem;
  324. template<> struct mem<float> {
  325. static __forceinline float load (bool mask, const void* ptr) { return mask ? *(float*)ptr : 0.0f; }
  326. static __forceinline float loadu(bool mask, const void* ptr) { return mask ? *(float*)ptr : 0.0f; }
  327. static __forceinline void store (bool mask, void* ptr, const float v) { if (mask) *(float*)ptr = v; }
  328. static __forceinline void storeu(bool mask, void* ptr, const float v) { if (mask) *(float*)ptr = v; }
  329. };
  330. /*! bit reverse operation */
  331. template<class T>
  332. __forceinline T bitReverse(const T& vin)
  333. {
  334. T v = vin;
  335. v = ((v >> 1) & 0x55555555) | ((v & 0x55555555) << 1);
  336. v = ((v >> 2) & 0x33333333) | ((v & 0x33333333) << 2);
  337. v = ((v >> 4) & 0x0F0F0F0F) | ((v & 0x0F0F0F0F) << 4);
  338. v = ((v >> 8) & 0x00FF00FF) | ((v & 0x00FF00FF) << 8);
  339. v = ( v >> 16 ) | ( v << 16);
  340. return v;
  341. }
  342. /*! bit interleave operation */
  343. template<class T>
  344. __forceinline T bitInterleave(const T& xin, const T& yin, const T& zin)
  345. {
  346. T x = xin, y = yin, z = zin;
  347. x = (x | (x << 16)) & 0x030000FF;
  348. x = (x | (x << 8)) & 0x0300F00F;
  349. x = (x | (x << 4)) & 0x030C30C3;
  350. x = (x | (x << 2)) & 0x09249249;
  351. y = (y | (y << 16)) & 0x030000FF;
  352. y = (y | (y << 8)) & 0x0300F00F;
  353. y = (y | (y << 4)) & 0x030C30C3;
  354. y = (y | (y << 2)) & 0x09249249;
  355. z = (z | (z << 16)) & 0x030000FF;
  356. z = (z | (z << 8)) & 0x0300F00F;
  357. z = (z | (z << 4)) & 0x030C30C3;
  358. z = (z | (z << 2)) & 0x09249249;
  359. return x | (y << 1) | (z << 2);
  360. }
  361. #if defined(__AVX2__) && !defined(__aarch64__)
  362. template<>
  363. __forceinline unsigned int bitInterleave(const unsigned int &xi, const unsigned int& yi, const unsigned int& zi)
  364. {
  365. const unsigned int xx = pdep(xi,0x49249249 /* 0b01001001001001001001001001001001 */ );
  366. const unsigned int yy = pdep(yi,0x92492492 /* 0b10010010010010010010010010010010 */);
  367. const unsigned int zz = pdep(zi,0x24924924 /* 0b00100100100100100100100100100100 */);
  368. return xx | yy | zz;
  369. }
  370. #endif
  371. /*! bit interleave operation for 64bit data types*/
  372. template<class T>
  373. __forceinline T bitInterleave64(const T& xin, const T& yin, const T& zin){
  374. T x = xin & 0x1fffff;
  375. T y = yin & 0x1fffff;
  376. T z = zin & 0x1fffff;
  377. x = (x | x << 32) & 0x1f00000000ffff;
  378. x = (x | x << 16) & 0x1f0000ff0000ff;
  379. x = (x | x << 8) & 0x100f00f00f00f00f;
  380. x = (x | x << 4) & 0x10c30c30c30c30c3;
  381. x = (x | x << 2) & 0x1249249249249249;
  382. y = (y | y << 32) & 0x1f00000000ffff;
  383. y = (y | y << 16) & 0x1f0000ff0000ff;
  384. y = (y | y << 8) & 0x100f00f00f00f00f;
  385. y = (y | y << 4) & 0x10c30c30c30c30c3;
  386. y = (y | y << 2) & 0x1249249249249249;
  387. z = (z | z << 32) & 0x1f00000000ffff;
  388. z = (z | z << 16) & 0x1f0000ff0000ff;
  389. z = (z | z << 8) & 0x100f00f00f00f00f;
  390. z = (z | z << 4) & 0x10c30c30c30c30c3;
  391. z = (z | z << 2) & 0x1249249249249249;
  392. return x | (y << 1) | (z << 2);
  393. }
  394. }