btVector3.h 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337
  1. /*
  2. Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans https://bulletphysics.org
  3. This software is provided 'as-is', without any express or implied warranty.
  4. In no event will the authors be held liable for any damages arising from the use of this software.
  5. Permission is granted to anyone to use this software for any purpose,
  6. including commercial applications, and to alter it and redistribute it freely,
  7. subject to the following restrictions:
  8. 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
  9. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
  10. 3. This notice may not be removed or altered from any source distribution.
  11. */
  12. #ifndef BT_VECTOR3_H
  13. #define BT_VECTOR3_H
  14. //#include <stdint.h>
  15. #include "btScalar.h"
  16. #include "btMinMax.h"
  17. #include "btAlignedAllocator.h"
  18. #ifdef BT_USE_DOUBLE_PRECISION
  19. #define btVector3Data btVector3DoubleData
  20. #define btVector3DataName "btVector3DoubleData"
  21. #else
  22. #define btVector3Data btVector3FloatData
  23. #define btVector3DataName "btVector3FloatData"
  24. #endif //BT_USE_DOUBLE_PRECISION
  25. #if defined BT_USE_SSE
  26. //typedef uint32_t __m128i __attribute__ ((vector_size(16)));
  27. #ifdef _MSC_VER
  28. #pragma warning(disable : 4556) // value of intrinsic immediate argument '4294967239' is out of range '0 - 255'
  29. #endif
  30. #define BT_SHUFFLE(x, y, z, w) (((w) << 6 | (z) << 4 | (y) << 2 | (x)) & 0xff)
  31. //#define bt_pshufd_ps( _a, _mask ) (__m128) _mm_shuffle_epi32((__m128i)(_a), (_mask) )
  32. #define bt_pshufd_ps(_a, _mask) _mm_shuffle_ps((_a), (_a), (_mask))
  33. #define bt_splat3_ps(_a, _i) bt_pshufd_ps((_a), BT_SHUFFLE(_i, _i, _i, 3))
  34. #define bt_splat_ps(_a, _i) bt_pshufd_ps((_a), BT_SHUFFLE(_i, _i, _i, _i))
  35. #define btv3AbsiMask (_mm_set_epi32(0x00000000, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
  36. #define btvAbsMask (_mm_set_epi32(0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF))
  37. #define btvFFF0Mask (_mm_set_epi32(0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
  38. #define btv3AbsfMask btCastiTo128f(btv3AbsiMask)
  39. #define btvFFF0fMask btCastiTo128f(btvFFF0Mask)
  40. #define btvxyzMaskf btvFFF0fMask
  41. #define btvAbsfMask btCastiTo128f(btvAbsMask)
  42. //there is an issue with XCode 3.2 (LCx errors)
  43. #define btvMzeroMask (_mm_set_ps(-0.0f, -0.0f, -0.0f, -0.0f))
  44. #define v1110 (_mm_set_ps(0.0f, 1.0f, 1.0f, 1.0f))
  45. #define vHalf (_mm_set_ps(0.5f, 0.5f, 0.5f, 0.5f))
  46. #define v1_5 (_mm_set_ps(1.5f, 1.5f, 1.5f, 1.5f))
  47. //const __m128 ATTRIBUTE_ALIGNED16(btvMzeroMask) = {-0.0f, -0.0f, -0.0f, -0.0f};
  48. //const __m128 ATTRIBUTE_ALIGNED16(v1110) = {1.0f, 1.0f, 1.0f, 0.0f};
  49. //const __m128 ATTRIBUTE_ALIGNED16(vHalf) = {0.5f, 0.5f, 0.5f, 0.5f};
  50. //const __m128 ATTRIBUTE_ALIGNED16(v1_5) = {1.5f, 1.5f, 1.5f, 1.5f};
  51. #endif
  52. #ifdef BT_USE_NEON
  53. const float32x4_t ATTRIBUTE_ALIGNED16(btvMzeroMask) = (float32x4_t){-0.0f, -0.0f, -0.0f, -0.0f};
  54. const int32x4_t ATTRIBUTE_ALIGNED16(btvFFF0Mask) = (int32x4_t){static_cast<int32_t>(0xFFFFFFFF),
  55. static_cast<int32_t>(0xFFFFFFFF), static_cast<int32_t>(0xFFFFFFFF), 0x0};
  56. const int32x4_t ATTRIBUTE_ALIGNED16(btvAbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
  57. const int32x4_t ATTRIBUTE_ALIGNED16(btv3AbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x0};
  58. #endif
  59. /**@brief btVector3 can be used to represent 3D points and vectors.
  60. * It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user
  61. * Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers
  62. */
  63. ATTRIBUTE_ALIGNED16(class)
  64. btVector3
  65. {
  66. public:
  67. BT_DECLARE_ALIGNED_ALLOCATOR();
  68. #if defined(__SPU__) && defined(__CELLOS_LV2__)
  69. btScalar m_floats[4];
  70. public:
  71. SIMD_FORCE_INLINE const vec_float4& get128() const
  72. {
  73. return *((const vec_float4*)&m_floats[0]);
  74. }
  75. public:
  76. #else //__CELLOS_LV2__ __SPU__
  77. #if defined(BT_USE_SSE) || defined(BT_USE_NEON) // _WIN32 || ARM
  78. union {
  79. btSimdFloat4 mVec128;
  80. btScalar m_floats[4];
  81. };
  82. SIMD_FORCE_INLINE btSimdFloat4 get128() const
  83. {
  84. return mVec128;
  85. }
  86. SIMD_FORCE_INLINE void set128(btSimdFloat4 v128)
  87. {
  88. mVec128 = v128;
  89. }
  90. #else
  91. btScalar m_floats[4];
  92. #endif
  93. #endif //__CELLOS_LV2__ __SPU__
  94. public:
  95. /**@brief No initialization constructor */
  96. SIMD_FORCE_INLINE btVector3()
  97. {
  98. }
  99. /**@brief Constructor from scalars
  100. * @param x X value
  101. * @param y Y value
  102. * @param z Z value
  103. */
  104. SIMD_FORCE_INLINE btVector3(const btScalar& _x, const btScalar& _y, const btScalar& _z)
  105. {
  106. m_floats[0] = _x;
  107. m_floats[1] = _y;
  108. m_floats[2] = _z;
  109. m_floats[3] = btScalar(0.f);
  110. }
  111. #if (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)) || defined(BT_USE_NEON)
  112. // Set Vector
  113. SIMD_FORCE_INLINE btVector3(btSimdFloat4 v)
  114. {
  115. mVec128 = v;
  116. }
  117. // Copy constructor
  118. SIMD_FORCE_INLINE btVector3(const btVector3& rhs)
  119. {
  120. mVec128 = rhs.mVec128;
  121. }
  122. // Assignment Operator
  123. SIMD_FORCE_INLINE btVector3&
  124. operator=(const btVector3& v)
  125. {
  126. mVec128 = v.mVec128;
  127. return *this;
  128. }
  129. #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
  130. /**@brief Add a vector to this one
  131. * @param The vector to add to this one */
  132. SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v)
  133. {
  134. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  135. mVec128 = _mm_add_ps(mVec128, v.mVec128);
  136. #elif defined(BT_USE_NEON)
  137. mVec128 = vaddq_f32(mVec128, v.mVec128);
  138. #else
  139. m_floats[0] += v.m_floats[0];
  140. m_floats[1] += v.m_floats[1];
  141. m_floats[2] += v.m_floats[2];
  142. #endif
  143. return *this;
  144. }
  145. /**@brief Subtract a vector from this one
  146. * @param The vector to subtract */
  147. SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v)
  148. {
  149. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  150. mVec128 = _mm_sub_ps(mVec128, v.mVec128);
  151. #elif defined(BT_USE_NEON)
  152. mVec128 = vsubq_f32(mVec128, v.mVec128);
  153. #else
  154. m_floats[0] -= v.m_floats[0];
  155. m_floats[1] -= v.m_floats[1];
  156. m_floats[2] -= v.m_floats[2];
  157. #endif
  158. return *this;
  159. }
  160. /**@brief Scale the vector
  161. * @param s Scale factor */
  162. SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s)
  163. {
  164. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  165. __m128 vs = _mm_load_ss(&s); // (S 0 0 0)
  166. vs = bt_pshufd_ps(vs, 0x80); // (S S S 0.0)
  167. mVec128 = _mm_mul_ps(mVec128, vs);
  168. #elif defined(BT_USE_NEON)
  169. mVec128 = vmulq_n_f32(mVec128, s);
  170. #else
  171. m_floats[0] *= s;
  172. m_floats[1] *= s;
  173. m_floats[2] *= s;
  174. #endif
  175. return *this;
  176. }
  177. /**@brief Inversely scale the vector
  178. * @param s Scale factor to divide by */
  179. SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s)
  180. {
  181. btFullAssert(s != btScalar(0.0));
  182. #if 0 //defined(BT_USE_SSE_IN_API)
  183. // this code is not faster !
  184. __m128 vs = _mm_load_ss(&s);
  185. vs = _mm_div_ss(v1110, vs);
  186. vs = bt_pshufd_ps(vs, 0x00); // (S S S S)
  187. mVec128 = _mm_mul_ps(mVec128, vs);
  188. return *this;
  189. #else
  190. return *this *= btScalar(1.0) / s;
  191. #endif
  192. }
  193. /**@brief Return the dot product
  194. * @param v The other vector in the dot product */
  195. SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const
  196. {
  197. #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  198. __m128 vd = _mm_mul_ps(mVec128, v.mVec128);
  199. __m128 z = _mm_movehl_ps(vd, vd);
  200. __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
  201. vd = _mm_add_ss(vd, y);
  202. vd = _mm_add_ss(vd, z);
  203. return _mm_cvtss_f32(vd);
  204. #elif defined(BT_USE_NEON)
  205. float32x4_t vd = vmulq_f32(mVec128, v.mVec128);
  206. float32x2_t x = vpadd_f32(vget_low_f32(vd), vget_low_f32(vd));
  207. x = vadd_f32(x, vget_high_f32(vd));
  208. return vget_lane_f32(x, 0);
  209. #else
  210. return m_floats[0] * v.m_floats[0] +
  211. m_floats[1] * v.m_floats[1] +
  212. m_floats[2] * v.m_floats[2];
  213. #endif
  214. }
  215. /**@brief Return the length of the vector squared */
  216. SIMD_FORCE_INLINE btScalar length2() const
  217. {
  218. return dot(*this);
  219. }
  220. /**@brief Return the length of the vector */
  221. SIMD_FORCE_INLINE btScalar length() const
  222. {
  223. return btSqrt(length2());
  224. }
  225. /**@brief Return the norm (length) of the vector */
  226. SIMD_FORCE_INLINE btScalar norm() const
  227. {
  228. return length();
  229. }
  230. /**@brief Return the norm (length) of the vector */
  231. SIMD_FORCE_INLINE btScalar safeNorm() const
  232. {
  233. btScalar d = length2();
  234. //workaround for some clang/gcc issue of sqrtf(tiny number) = -INF
  235. if (d > SIMD_EPSILON)
  236. return btSqrt(d);
  237. return btScalar(0);
  238. }
  239. /**@brief Return the distance squared between the ends of this and another vector
  240. * This is symantically treating the vector like a point */
  241. SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const;
  242. /**@brief Return the distance between the ends of this and another vector
  243. * This is symantically treating the vector like a point */
  244. SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const;
  245. SIMD_FORCE_INLINE btVector3& safeNormalize()
  246. {
  247. btScalar l2 = length2();
  248. //triNormal.normalize();
  249. if (l2 >= SIMD_EPSILON * SIMD_EPSILON)
  250. {
  251. (*this) /= btSqrt(l2);
  252. }
  253. else
  254. {
  255. setValue(1, 0, 0);
  256. }
  257. return *this;
  258. }
  259. /**@brief Normalize this vector
  260. * x^2 + y^2 + z^2 = 1 */
  261. SIMD_FORCE_INLINE btVector3& normalize()
  262. {
  263. btAssert(!fuzzyZero());
  264. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  265. // dot product first
  266. __m128 vd = _mm_mul_ps(mVec128, mVec128);
  267. __m128 z = _mm_movehl_ps(vd, vd);
  268. __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
  269. vd = _mm_add_ss(vd, y);
  270. vd = _mm_add_ss(vd, z);
  271. #if 0
  272. vd = _mm_sqrt_ss(vd);
  273. vd = _mm_div_ss(v1110, vd);
  274. vd = bt_splat_ps(vd, 0x80);
  275. mVec128 = _mm_mul_ps(mVec128, vd);
  276. #else
  277. // NR step 1/sqrt(x) - vd is x, y is output
  278. y = _mm_rsqrt_ss(vd); // estimate
  279. // one step NR
  280. z = v1_5;
  281. vd = _mm_mul_ss(vd, vHalf); // vd * 0.5
  282. //x2 = vd;
  283. vd = _mm_mul_ss(vd, y); // vd * 0.5 * y0
  284. vd = _mm_mul_ss(vd, y); // vd * 0.5 * y0 * y0
  285. z = _mm_sub_ss(z, vd); // 1.5 - vd * 0.5 * y0 * y0
  286. y = _mm_mul_ss(y, z); // y0 * (1.5 - vd * 0.5 * y0 * y0)
  287. y = bt_splat_ps(y, 0x80);
  288. mVec128 = _mm_mul_ps(mVec128, y);
  289. #endif
  290. return *this;
  291. #else
  292. return *this /= length();
  293. #endif
  294. }
  295. /**@brief Return a normalized version of this vector */
  296. SIMD_FORCE_INLINE btVector3 normalized() const;
  297. /**@brief Return a rotated version of this vector
  298. * @param wAxis The axis to rotate about
  299. * @param angle The angle to rotate by */
  300. SIMD_FORCE_INLINE btVector3 rotate(const btVector3& wAxis, const btScalar angle) const;
  301. /**@brief Return the angle between this and another vector
  302. * @param v The other vector */
  303. SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const
  304. {
  305. btScalar s = btSqrt(length2() * v.length2());
  306. btFullAssert(s != btScalar(0.0));
  307. return btAcos(dot(v) / s);
  308. }
  309. /**@brief Return a vector with the absolute values of each element */
  310. SIMD_FORCE_INLINE btVector3 absolute() const
  311. {
  312. #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  313. return btVector3(_mm_and_ps(mVec128, btv3AbsfMask));
  314. #elif defined(BT_USE_NEON)
  315. return btVector3(vabsq_f32(mVec128));
  316. #else
  317. return btVector3(
  318. btFabs(m_floats[0]),
  319. btFabs(m_floats[1]),
  320. btFabs(m_floats[2]));
  321. #endif
  322. }
  323. /**@brief Return the cross product between this and another vector
  324. * @param v The other vector */
  325. SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const
  326. {
  327. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  328. __m128 T, V;
  329. T = bt_pshufd_ps(mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
  330. V = bt_pshufd_ps(v.mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
  331. V = _mm_mul_ps(V, mVec128);
  332. T = _mm_mul_ps(T, v.mVec128);
  333. V = _mm_sub_ps(V, T);
  334. V = bt_pshufd_ps(V, BT_SHUFFLE(1, 2, 0, 3));
  335. return btVector3(V);
  336. #elif defined(BT_USE_NEON)
  337. float32x4_t T, V;
  338. // form (Y, Z, X, _) of mVec128 and v.mVec128
  339. float32x2_t Tlow = vget_low_f32(mVec128);
  340. float32x2_t Vlow = vget_low_f32(v.mVec128);
  341. T = vcombine_f32(vext_f32(Tlow, vget_high_f32(mVec128), 1), Tlow);
  342. V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v.mVec128), 1), Vlow);
  343. V = vmulq_f32(V, mVec128);
  344. T = vmulq_f32(T, v.mVec128);
  345. V = vsubq_f32(V, T);
  346. Vlow = vget_low_f32(V);
  347. // form (Y, Z, X, _);
  348. V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
  349. V = (float32x4_t)vandq_s32((int32x4_t)V, btvFFF0Mask);
  350. return btVector3(V);
  351. #else
  352. return btVector3(
  353. m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1],
  354. m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2],
  355. m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]);
  356. #endif
  357. }
  358. SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const
  359. {
  360. #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  361. // cross:
  362. __m128 T = _mm_shuffle_ps(v1.mVec128, v1.mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
  363. __m128 V = _mm_shuffle_ps(v2.mVec128, v2.mVec128, BT_SHUFFLE(1, 2, 0, 3)); // (Y Z X 0)
  364. V = _mm_mul_ps(V, v1.mVec128);
  365. T = _mm_mul_ps(T, v2.mVec128);
  366. V = _mm_sub_ps(V, T);
  367. V = _mm_shuffle_ps(V, V, BT_SHUFFLE(1, 2, 0, 3));
  368. // dot:
  369. V = _mm_mul_ps(V, mVec128);
  370. __m128 z = _mm_movehl_ps(V, V);
  371. __m128 y = _mm_shuffle_ps(V, V, 0x55);
  372. V = _mm_add_ss(V, y);
  373. V = _mm_add_ss(V, z);
  374. return _mm_cvtss_f32(V);
  375. #elif defined(BT_USE_NEON)
  376. // cross:
  377. float32x4_t T, V;
  378. // form (Y, Z, X, _) of mVec128 and v.mVec128
  379. float32x2_t Tlow = vget_low_f32(v1.mVec128);
  380. float32x2_t Vlow = vget_low_f32(v2.mVec128);
  381. T = vcombine_f32(vext_f32(Tlow, vget_high_f32(v1.mVec128), 1), Tlow);
  382. V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v2.mVec128), 1), Vlow);
  383. V = vmulq_f32(V, v1.mVec128);
  384. T = vmulq_f32(T, v2.mVec128);
  385. V = vsubq_f32(V, T);
  386. Vlow = vget_low_f32(V);
  387. // form (Y, Z, X, _);
  388. V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
  389. // dot:
  390. V = vmulq_f32(mVec128, V);
  391. float32x2_t x = vpadd_f32(vget_low_f32(V), vget_low_f32(V));
  392. x = vadd_f32(x, vget_high_f32(V));
  393. return vget_lane_f32(x, 0);
  394. #else
  395. return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) +
  396. m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) +
  397. m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]);
  398. #endif
  399. }
  400. /**@brief Return the axis with the smallest value
  401. * Note return values are 0,1,2 for x, y, or z */
  402. SIMD_FORCE_INLINE int minAxis() const
  403. {
  404. return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2);
  405. }
  406. /**@brief Return the axis with the largest value
  407. * Note return values are 0,1,2 for x, y, or z */
  408. SIMD_FORCE_INLINE int maxAxis() const
  409. {
  410. return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0);
  411. }
  412. SIMD_FORCE_INLINE int furthestAxis() const
  413. {
  414. return absolute().minAxis();
  415. }
  416. SIMD_FORCE_INLINE int closestAxis() const
  417. {
  418. return absolute().maxAxis();
  419. }
  420. SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt)
  421. {
  422. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  423. __m128 vrt = _mm_load_ss(&rt); // (rt 0 0 0)
  424. btScalar s = btScalar(1.0) - rt;
  425. __m128 vs = _mm_load_ss(&s); // (S 0 0 0)
  426. vs = bt_pshufd_ps(vs, 0x80); // (S S S 0.0)
  427. __m128 r0 = _mm_mul_ps(v0.mVec128, vs);
  428. vrt = bt_pshufd_ps(vrt, 0x80); // (rt rt rt 0.0)
  429. __m128 r1 = _mm_mul_ps(v1.mVec128, vrt);
  430. __m128 tmp3 = _mm_add_ps(r0, r1);
  431. mVec128 = tmp3;
  432. #elif defined(BT_USE_NEON)
  433. float32x4_t vl = vsubq_f32(v1.mVec128, v0.mVec128);
  434. vl = vmulq_n_f32(vl, rt);
  435. mVec128 = vaddq_f32(vl, v0.mVec128);
  436. #else
  437. btScalar s = btScalar(1.0) - rt;
  438. m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0];
  439. m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1];
  440. m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2];
  441. //don't do the unused w component
  442. // m_co[3] = s * v0[3] + rt * v1[3];
  443. #endif
  444. }
  445. /**@brief Return the linear interpolation between this and another vector
  446. * @param v The other vector
  447. * @param t The ration of this to v (t = 0 => return this, t=1 => return other) */
  448. SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const
  449. {
  450. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  451. __m128 vt = _mm_load_ss(&t); // (t 0 0 0)
  452. vt = bt_pshufd_ps(vt, 0x80); // (rt rt rt 0.0)
  453. __m128 vl = _mm_sub_ps(v.mVec128, mVec128);
  454. vl = _mm_mul_ps(vl, vt);
  455. vl = _mm_add_ps(vl, mVec128);
  456. return btVector3(vl);
  457. #elif defined(BT_USE_NEON)
  458. float32x4_t vl = vsubq_f32(v.mVec128, mVec128);
  459. vl = vmulq_n_f32(vl, t);
  460. vl = vaddq_f32(vl, mVec128);
  461. return btVector3(vl);
  462. #else
  463. return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t,
  464. m_floats[1] + (v.m_floats[1] - m_floats[1]) * t,
  465. m_floats[2] + (v.m_floats[2] - m_floats[2]) * t);
  466. #endif
  467. }
  468. /**@brief Elementwise multiply this vector by the other
  469. * @param v The other vector */
  470. SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v)
  471. {
  472. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  473. mVec128 = _mm_mul_ps(mVec128, v.mVec128);
  474. #elif defined(BT_USE_NEON)
  475. mVec128 = vmulq_f32(mVec128, v.mVec128);
  476. #else
  477. m_floats[0] *= v.m_floats[0];
  478. m_floats[1] *= v.m_floats[1];
  479. m_floats[2] *= v.m_floats[2];
  480. #endif
  481. return *this;
  482. }
  483. /**@brief Return the x value */
  484. SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; }
  485. /**@brief Return the y value */
  486. SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; }
  487. /**@brief Return the z value */
  488. SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; }
  489. /**@brief Set the x value */
  490. SIMD_FORCE_INLINE void setX(btScalar _x) { m_floats[0] = _x; };
  491. /**@brief Set the y value */
  492. SIMD_FORCE_INLINE void setY(btScalar _y) { m_floats[1] = _y; };
  493. /**@brief Set the z value */
  494. SIMD_FORCE_INLINE void setZ(btScalar _z) { m_floats[2] = _z; };
  495. /**@brief Set the w value */
  496. SIMD_FORCE_INLINE void setW(btScalar _w) { m_floats[3] = _w; };
  497. /**@brief Return the x value */
  498. SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; }
  499. /**@brief Return the y value */
  500. SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; }
  501. /**@brief Return the z value */
  502. SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; }
  503. /**@brief Return the w value */
  504. SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; }
  505. //SIMD_FORCE_INLINE btScalar& operator[](int i) { return (&m_floats[0])[i]; }
  506. //SIMD_FORCE_INLINE const btScalar& operator[](int i) const { return (&m_floats[0])[i]; }
  507. ///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons.
  508. SIMD_FORCE_INLINE operator btScalar*() { return &m_floats[0]; }
  509. SIMD_FORCE_INLINE operator const btScalar*() const { return &m_floats[0]; }
  510. SIMD_FORCE_INLINE bool operator==(const btVector3& other) const
  511. {
  512. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  513. return (0xf == _mm_movemask_ps((__m128)_mm_cmpeq_ps(mVec128, other.mVec128)));
  514. #else
  515. return ((m_floats[3] == other.m_floats[3]) &&
  516. (m_floats[2] == other.m_floats[2]) &&
  517. (m_floats[1] == other.m_floats[1]) &&
  518. (m_floats[0] == other.m_floats[0]));
  519. #endif
  520. }
  521. SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const
  522. {
  523. return !(*this == other);
  524. }
  525. /**@brief Set each element to the max of the current values and the values of another btVector3
  526. * @param other The other btVector3 to compare with
  527. */
  528. SIMD_FORCE_INLINE void setMax(const btVector3& other)
  529. {
  530. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  531. mVec128 = _mm_max_ps(mVec128, other.mVec128);
  532. #elif defined(BT_USE_NEON)
  533. mVec128 = vmaxq_f32(mVec128, other.mVec128);
  534. #else
  535. btSetMax(m_floats[0], other.m_floats[0]);
  536. btSetMax(m_floats[1], other.m_floats[1]);
  537. btSetMax(m_floats[2], other.m_floats[2]);
  538. btSetMax(m_floats[3], other.w());
  539. #endif
  540. }
  541. /**@brief Set each element to the min of the current values and the values of another btVector3
  542. * @param other The other btVector3 to compare with
  543. */
  544. SIMD_FORCE_INLINE void setMin(const btVector3& other)
  545. {
  546. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  547. mVec128 = _mm_min_ps(mVec128, other.mVec128);
  548. #elif defined(BT_USE_NEON)
  549. mVec128 = vminq_f32(mVec128, other.mVec128);
  550. #else
  551. btSetMin(m_floats[0], other.m_floats[0]);
  552. btSetMin(m_floats[1], other.m_floats[1]);
  553. btSetMin(m_floats[2], other.m_floats[2]);
  554. btSetMin(m_floats[3], other.w());
  555. #endif
  556. }
  557. SIMD_FORCE_INLINE void setValue(const btScalar& _x, const btScalar& _y, const btScalar& _z)
  558. {
  559. m_floats[0] = _x;
  560. m_floats[1] = _y;
  561. m_floats[2] = _z;
  562. m_floats[3] = btScalar(0.f);
  563. }
  564. void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const
  565. {
  566. #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  567. __m128 V = _mm_and_ps(mVec128, btvFFF0fMask);
  568. __m128 V0 = _mm_xor_ps(btvMzeroMask, V);
  569. __m128 V2 = _mm_movelh_ps(V0, V);
  570. __m128 V1 = _mm_shuffle_ps(V, V0, 0xCE);
  571. V0 = _mm_shuffle_ps(V0, V, 0xDB);
  572. V2 = _mm_shuffle_ps(V2, V, 0xF9);
  573. v0->mVec128 = V0;
  574. v1->mVec128 = V1;
  575. v2->mVec128 = V2;
  576. #else
  577. v0->setValue(0., -z(), y());
  578. v1->setValue(z(), 0., -x());
  579. v2->setValue(-y(), x(), 0.);
  580. #endif
  581. }
  582. void setZero()
  583. {
  584. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  585. mVec128 = (__m128)_mm_xor_ps(mVec128, mVec128);
  586. #elif defined(BT_USE_NEON)
  587. int32x4_t vi = vdupq_n_s32(0);
  588. mVec128 = vreinterpretq_f32_s32(vi);
  589. #else
  590. setValue(btScalar(0.), btScalar(0.), btScalar(0.));
  591. #endif
  592. }
  593. SIMD_FORCE_INLINE bool isZero() const
  594. {
  595. return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0);
  596. }
  597. SIMD_FORCE_INLINE bool fuzzyZero() const
  598. {
  599. return length2() < SIMD_EPSILON * SIMD_EPSILON;
  600. }
  601. SIMD_FORCE_INLINE void serialize(struct btVector3Data & dataOut) const;
  602. SIMD_FORCE_INLINE void deSerialize(const struct btVector3DoubleData& dataIn);
  603. SIMD_FORCE_INLINE void deSerialize(const struct btVector3FloatData& dataIn);
  604. SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData & dataOut) const;
  605. SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn);
  606. SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData & dataOut) const;
  607. SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn);
  608. /**@brief returns index of maximum dot product between this and vectors in array[]
  609. * @param array The other vectors
  610. * @param array_count The number of other vectors
  611. * @param dotOut The maximum dot product */
  612. SIMD_FORCE_INLINE long maxDot(const btVector3* array, long array_count, btScalar& dotOut) const;
  613. /**@brief returns index of minimum dot product between this and vectors in array[]
  614. * @param array The other vectors
  615. * @param array_count The number of other vectors
  616. * @param dotOut The minimum dot product */
  617. SIMD_FORCE_INLINE long minDot(const btVector3* array, long array_count, btScalar& dotOut) const;
  618. /* create a vector as btVector3( this->dot( btVector3 v0 ), this->dot( btVector3 v1), this->dot( btVector3 v2 )) */
  619. SIMD_FORCE_INLINE btVector3 dot3(const btVector3& v0, const btVector3& v1, const btVector3& v2) const
  620. {
  621. #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  622. __m128 a0 = _mm_mul_ps(v0.mVec128, this->mVec128);
  623. __m128 a1 = _mm_mul_ps(v1.mVec128, this->mVec128);
  624. __m128 a2 = _mm_mul_ps(v2.mVec128, this->mVec128);
  625. __m128 b0 = _mm_unpacklo_ps(a0, a1);
  626. __m128 b1 = _mm_unpackhi_ps(a0, a1);
  627. __m128 b2 = _mm_unpacklo_ps(a2, _mm_setzero_ps());
  628. __m128 r = _mm_movelh_ps(b0, b2);
  629. r = _mm_add_ps(r, _mm_movehl_ps(b2, b0));
  630. a2 = _mm_and_ps(a2, btvxyzMaskf);
  631. r = _mm_add_ps(r, btCastdTo128f(_mm_move_sd(btCastfTo128d(a2), btCastfTo128d(b1))));
  632. return btVector3(r);
  633. #elif defined(BT_USE_NEON)
  634. static const uint32x4_t xyzMask = (const uint32x4_t){static_cast<uint32_t>(-1), static_cast<uint32_t>(-1), static_cast<uint32_t>(-1), 0};
  635. float32x4_t a0 = vmulq_f32(v0.mVec128, this->mVec128);
  636. float32x4_t a1 = vmulq_f32(v1.mVec128, this->mVec128);
  637. float32x4_t a2 = vmulq_f32(v2.mVec128, this->mVec128);
  638. float32x2x2_t zLo = vtrn_f32(vget_high_f32(a0), vget_high_f32(a1));
  639. a2 = (float32x4_t)vandq_u32((uint32x4_t)a2, xyzMask);
  640. float32x2_t b0 = vadd_f32(vpadd_f32(vget_low_f32(a0), vget_low_f32(a1)), zLo.val[0]);
  641. float32x2_t b1 = vpadd_f32(vpadd_f32(vget_low_f32(a2), vget_high_f32(a2)), vdup_n_f32(0.0f));
  642. return btVector3(vcombine_f32(b0, b1));
  643. #else
  644. return btVector3(dot(v0), dot(v1), dot(v2));
  645. #endif
  646. }
  647. };
  648. /**@brief Return the sum of two vectors (Point symantics)*/
  649. SIMD_FORCE_INLINE btVector3
  650. operator+(const btVector3& v1, const btVector3& v2)
  651. {
  652. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  653. return btVector3(_mm_add_ps(v1.mVec128, v2.mVec128));
  654. #elif defined(BT_USE_NEON)
  655. return btVector3(vaddq_f32(v1.mVec128, v2.mVec128));
  656. #else
  657. return btVector3(
  658. v1.m_floats[0] + v2.m_floats[0],
  659. v1.m_floats[1] + v2.m_floats[1],
  660. v1.m_floats[2] + v2.m_floats[2]);
  661. #endif
  662. }
  663. /**@brief Return the elementwise product of two vectors */
  664. SIMD_FORCE_INLINE btVector3
  665. operator*(const btVector3& v1, const btVector3& v2)
  666. {
  667. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  668. return btVector3(_mm_mul_ps(v1.mVec128, v2.mVec128));
  669. #elif defined(BT_USE_NEON)
  670. return btVector3(vmulq_f32(v1.mVec128, v2.mVec128));
  671. #else
  672. return btVector3(
  673. v1.m_floats[0] * v2.m_floats[0],
  674. v1.m_floats[1] * v2.m_floats[1],
  675. v1.m_floats[2] * v2.m_floats[2]);
  676. #endif
  677. }
  678. /**@brief Return the difference between two vectors */
  679. SIMD_FORCE_INLINE btVector3
  680. operator-(const btVector3& v1, const btVector3& v2)
  681. {
  682. #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))
  683. // without _mm_and_ps this code causes slowdown in Concave moving
  684. __m128 r = _mm_sub_ps(v1.mVec128, v2.mVec128);
  685. return btVector3(_mm_and_ps(r, btvFFF0fMask));
  686. #elif defined(BT_USE_NEON)
  687. float32x4_t r = vsubq_f32(v1.mVec128, v2.mVec128);
  688. return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
  689. #else
  690. return btVector3(
  691. v1.m_floats[0] - v2.m_floats[0],
  692. v1.m_floats[1] - v2.m_floats[1],
  693. v1.m_floats[2] - v2.m_floats[2]);
  694. #endif
  695. }
  696. /**@brief Return the negative of the vector */
  697. SIMD_FORCE_INLINE btVector3
  698. operator-(const btVector3& v)
  699. {
  700. #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))
  701. __m128 r = _mm_xor_ps(v.mVec128, btvMzeroMask);
  702. return btVector3(_mm_and_ps(r, btvFFF0fMask));
  703. #elif defined(BT_USE_NEON)
  704. return btVector3((btSimdFloat4)veorq_s32((int32x4_t)v.mVec128, (int32x4_t)btvMzeroMask));
  705. #else
  706. return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]);
  707. #endif
  708. }
  709. /**@brief Return the vector scaled by s */
  710. SIMD_FORCE_INLINE btVector3
  711. operator*(const btVector3& v, const btScalar& s)
  712. {
  713. #if defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  714. __m128 vs = _mm_load_ss(&s); // (S 0 0 0)
  715. vs = bt_pshufd_ps(vs, 0x80); // (S S S 0.0)
  716. return btVector3(_mm_mul_ps(v.mVec128, vs));
  717. #elif defined(BT_USE_NEON)
  718. float32x4_t r = vmulq_n_f32(v.mVec128, s);
  719. return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
  720. #else
  721. return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s);
  722. #endif
  723. }
  724. /**@brief Return the vector scaled by s */
  725. SIMD_FORCE_INLINE btVector3
  726. operator*(const btScalar& s, const btVector3& v)
  727. {
  728. return v * s;
  729. }
  730. /**@brief Return the vector inversely scaled by s */
  731. SIMD_FORCE_INLINE btVector3
  732. operator/(const btVector3& v, const btScalar& s)
  733. {
  734. btFullAssert(s != btScalar(0.0));
  735. #if 0 //defined(BT_USE_SSE_IN_API)
  736. // this code is not faster !
  737. __m128 vs = _mm_load_ss(&s);
  738. vs = _mm_div_ss(v1110, vs);
  739. vs = bt_pshufd_ps(vs, 0x00); // (S S S S)
  740. return btVector3(_mm_mul_ps(v.mVec128, vs));
  741. #else
  742. return v * (btScalar(1.0) / s);
  743. #endif
  744. }
  745. /**@brief Return the vector inversely scaled by s */
  746. SIMD_FORCE_INLINE btVector3
  747. operator/(const btVector3& v1, const btVector3& v2)
  748. {
  749. #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))
  750. __m128 vec = _mm_div_ps(v1.mVec128, v2.mVec128);
  751. vec = _mm_and_ps(vec, btvFFF0fMask);
  752. return btVector3(vec);
  753. #elif defined(BT_USE_NEON)
  754. float32x4_t x, y, v, m;
  755. x = v1.mVec128;
  756. y = v2.mVec128;
  757. v = vrecpeq_f32(y); // v ~ 1/y
  758. m = vrecpsq_f32(y, v); // m = (2-v*y)
  759. v = vmulq_f32(v, m); // vv = v*m ~~ 1/y
  760. m = vrecpsq_f32(y, v); // mm = (2-vv*y)
  761. v = vmulq_f32(v, x); // x*vv
  762. v = vmulq_f32(v, m); // (x*vv)*(2-vv*y) = x*(vv(2-vv*y)) ~~~ x/y
  763. return btVector3(v);
  764. #else
  765. return btVector3(
  766. v1.m_floats[0] / v2.m_floats[0],
  767. v1.m_floats[1] / v2.m_floats[1],
  768. v1.m_floats[2] / v2.m_floats[2]);
  769. #endif
  770. }
  771. /**@brief Return the dot product between two vectors */
  772. SIMD_FORCE_INLINE btScalar
  773. btDot(const btVector3& v1, const btVector3& v2)
  774. {
  775. return v1.dot(v2);
  776. }
  777. /**@brief Return the distance squared between two vectors */
  778. SIMD_FORCE_INLINE btScalar
  779. btDistance2(const btVector3& v1, const btVector3& v2)
  780. {
  781. return v1.distance2(v2);
  782. }
  783. /**@brief Return the distance between two vectors */
  784. SIMD_FORCE_INLINE btScalar
  785. btDistance(const btVector3& v1, const btVector3& v2)
  786. {
  787. return v1.distance(v2);
  788. }
  789. /**@brief Return the angle between two vectors */
  790. SIMD_FORCE_INLINE btScalar
  791. btAngle(const btVector3& v1, const btVector3& v2)
  792. {
  793. return v1.angle(v2);
  794. }
  795. /**@brief Return the cross product of two vectors */
  796. SIMD_FORCE_INLINE btVector3
  797. btCross(const btVector3& v1, const btVector3& v2)
  798. {
  799. return v1.cross(v2);
  800. }
  801. SIMD_FORCE_INLINE btScalar
  802. btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3)
  803. {
  804. return v1.triple(v2, v3);
  805. }
  806. /**@brief Return the linear interpolation between two vectors
  807. * @param v1 One vector
  808. * @param v2 The other vector
  809. * @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */
  810. SIMD_FORCE_INLINE btVector3
  811. lerp(const btVector3& v1, const btVector3& v2, const btScalar& t)
  812. {
  813. return v1.lerp(v2, t);
  814. }
  815. SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const
  816. {
  817. return (v - *this).length2();
  818. }
  819. SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const
  820. {
  821. return (v - *this).length();
  822. }
  823. SIMD_FORCE_INLINE btVector3 btVector3::normalized() const
  824. {
  825. btVector3 nrm = *this;
  826. return nrm.normalize();
  827. }
  828. SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar _angle) const
  829. {
  830. // wAxis must be a unit lenght vector
  831. #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  832. __m128 O = _mm_mul_ps(wAxis.mVec128, mVec128);
  833. btScalar ssin = btSin(_angle);
  834. __m128 C = wAxis.cross(mVec128).mVec128;
  835. O = _mm_and_ps(O, btvFFF0fMask);
  836. btScalar scos = btCos(_angle);
  837. __m128 vsin = _mm_load_ss(&ssin); // (S 0 0 0)
  838. __m128 vcos = _mm_load_ss(&scos); // (S 0 0 0)
  839. __m128 Y = bt_pshufd_ps(O, 0xC9); // (Y Z X 0)
  840. __m128 Z = bt_pshufd_ps(O, 0xD2); // (Z X Y 0)
  841. O = _mm_add_ps(O, Y);
  842. vsin = bt_pshufd_ps(vsin, 0x80); // (S S S 0)
  843. O = _mm_add_ps(O, Z);
  844. vcos = bt_pshufd_ps(vcos, 0x80); // (S S S 0)
  845. vsin = vsin * C;
  846. O = O * wAxis.mVec128;
  847. __m128 X = mVec128 - O;
  848. O = O + vsin;
  849. vcos = vcos * X;
  850. O = O + vcos;
  851. return btVector3(O);
  852. #else
  853. btVector3 o = wAxis * wAxis.dot(*this);
  854. btVector3 _x = *this - o;
  855. btVector3 _y;
  856. _y = wAxis.cross(*this);
  857. return (o + _x * btCos(_angle) + _y * btSin(_angle));
  858. #endif
  859. }
  860. SIMD_FORCE_INLINE long btVector3::maxDot(const btVector3* array, long array_count, btScalar& dotOut) const
  861. {
  862. #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined(BT_USE_NEON)
  863. #if defined _WIN32 || defined(BT_USE_SSE)
  864. const long scalar_cutoff = 10;
  865. long _maxdot_large(const float* array, const float* vec, unsigned long array_count, float* dotOut);
  866. #elif defined BT_USE_NEON
  867. const long scalar_cutoff = 4;
  868. extern long (*_maxdot_large)(const float* array, const float* vec, unsigned long array_count, float* dotOut);
  869. #endif
  870. if (array_count < scalar_cutoff)
  871. #endif
  872. {
  873. btScalar maxDot1 = -SIMD_INFINITY;
  874. int i = 0;
  875. int ptIndex = -1;
  876. for (i = 0; i < array_count; i++)
  877. {
  878. btScalar dot = array[i].dot(*this);
  879. if (dot > maxDot1)
  880. {
  881. maxDot1 = dot;
  882. ptIndex = i;
  883. }
  884. }
  885. dotOut = maxDot1;
  886. return ptIndex;
  887. }
  888. #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined(BT_USE_NEON)
  889. return _maxdot_large((float*)array, (float*)&m_floats[0], array_count, &dotOut);
  890. #endif
  891. }
  892. SIMD_FORCE_INLINE long btVector3::minDot(const btVector3* array, long array_count, btScalar& dotOut) const
  893. {
  894. #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined(BT_USE_NEON)
  895. #if defined BT_USE_SSE
  896. const long scalar_cutoff = 10;
  897. long _mindot_large(const float* array, const float* vec, unsigned long array_count, float* dotOut);
  898. #elif defined BT_USE_NEON
  899. const long scalar_cutoff = 4;
  900. extern long (*_mindot_large)(const float* array, const float* vec, unsigned long array_count, float* dotOut);
  901. #else
  902. #error unhandled arch!
  903. #endif
  904. if (array_count < scalar_cutoff)
  905. #endif
  906. {
  907. btScalar minDot = SIMD_INFINITY;
  908. int i = 0;
  909. int ptIndex = -1;
  910. for (i = 0; i < array_count; i++)
  911. {
  912. btScalar dot = array[i].dot(*this);
  913. if (dot < minDot)
  914. {
  915. minDot = dot;
  916. ptIndex = i;
  917. }
  918. }
  919. dotOut = minDot;
  920. return ptIndex;
  921. }
  922. #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined(BT_USE_NEON)
  923. return _mindot_large((float*)array, (float*)&m_floats[0], array_count, &dotOut);
  924. #endif //BT_USE_SIMD_VECTOR3
  925. }
  926. class btVector4 : public btVector3
  927. {
  928. public:
  929. SIMD_FORCE_INLINE btVector4() {}
  930. SIMD_FORCE_INLINE btVector4(const btScalar& _x, const btScalar& _y, const btScalar& _z, const btScalar& _w)
  931. : btVector3(_x, _y, _z)
  932. {
  933. m_floats[3] = _w;
  934. }
  935. #if (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)) || defined(BT_USE_NEON)
  936. SIMD_FORCE_INLINE btVector4(const btSimdFloat4 vec)
  937. {
  938. mVec128 = vec;
  939. }
  940. SIMD_FORCE_INLINE btVector4(const btVector3& rhs)
  941. {
  942. mVec128 = rhs.mVec128;
  943. }
  944. SIMD_FORCE_INLINE btVector4&
  945. operator=(const btVector4& v)
  946. {
  947. mVec128 = v.mVec128;
  948. return *this;
  949. }
  950. #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON)
  951. SIMD_FORCE_INLINE btVector4 absolute4() const
  952. {
  953. #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
  954. return btVector4(_mm_and_ps(mVec128, btvAbsfMask));
  955. #elif defined(BT_USE_NEON)
  956. return btVector4(vabsq_f32(mVec128));
  957. #else
  958. return btVector4(
  959. btFabs(m_floats[0]),
  960. btFabs(m_floats[1]),
  961. btFabs(m_floats[2]),
  962. btFabs(m_floats[3]));
  963. #endif
  964. }
  965. btScalar getW() const { return m_floats[3]; }
  966. SIMD_FORCE_INLINE int maxAxis4() const
  967. {
  968. int maxIndex = -1;
  969. btScalar maxVal = btScalar(-BT_LARGE_FLOAT);
  970. if (m_floats[0] > maxVal)
  971. {
  972. maxIndex = 0;
  973. maxVal = m_floats[0];
  974. }
  975. if (m_floats[1] > maxVal)
  976. {
  977. maxIndex = 1;
  978. maxVal = m_floats[1];
  979. }
  980. if (m_floats[2] > maxVal)
  981. {
  982. maxIndex = 2;
  983. maxVal = m_floats[2];
  984. }
  985. if (m_floats[3] > maxVal)
  986. {
  987. maxIndex = 3;
  988. }
  989. return maxIndex;
  990. }
  991. SIMD_FORCE_INLINE int minAxis4() const
  992. {
  993. int minIndex = -1;
  994. btScalar minVal = btScalar(BT_LARGE_FLOAT);
  995. if (m_floats[0] < minVal)
  996. {
  997. minIndex = 0;
  998. minVal = m_floats[0];
  999. }
  1000. if (m_floats[1] < minVal)
  1001. {
  1002. minIndex = 1;
  1003. minVal = m_floats[1];
  1004. }
  1005. if (m_floats[2] < minVal)
  1006. {
  1007. minIndex = 2;
  1008. minVal = m_floats[2];
  1009. }
  1010. if (m_floats[3] < minVal)
  1011. {
  1012. minIndex = 3;
  1013. }
  1014. return minIndex;
  1015. }
  1016. SIMD_FORCE_INLINE int closestAxis4() const
  1017. {
  1018. return absolute4().maxAxis4();
  1019. }
  1020. /**@brief Set x,y,z and zero w
  1021. * @param x Value of x
  1022. * @param y Value of y
  1023. * @param z Value of z
  1024. */
  1025. /* void getValue(btScalar *m) const
  1026. {
  1027. m[0] = m_floats[0];
  1028. m[1] = m_floats[1];
  1029. m[2] =m_floats[2];
  1030. }
  1031. */
  1032. /**@brief Set the values
  1033. * @param x Value of x
  1034. * @param y Value of y
  1035. * @param z Value of z
  1036. * @param w Value of w
  1037. */
  1038. SIMD_FORCE_INLINE void setValue(const btScalar& _x, const btScalar& _y, const btScalar& _z, const btScalar& _w)
  1039. {
  1040. m_floats[0] = _x;
  1041. m_floats[1] = _y;
  1042. m_floats[2] = _z;
  1043. m_floats[3] = _w;
  1044. }
  1045. };
  1046. ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
  1047. SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal)
  1048. {
  1049. #ifdef BT_USE_DOUBLE_PRECISION
  1050. unsigned char* dest = (unsigned char*)&destVal;
  1051. const unsigned char* src = (const unsigned char*)&sourceVal;
  1052. dest[0] = src[7];
  1053. dest[1] = src[6];
  1054. dest[2] = src[5];
  1055. dest[3] = src[4];
  1056. dest[4] = src[3];
  1057. dest[5] = src[2];
  1058. dest[6] = src[1];
  1059. dest[7] = src[0];
  1060. #else
  1061. unsigned char* dest = (unsigned char*)&destVal;
  1062. const unsigned char* src = (const unsigned char*)&sourceVal;
  1063. dest[0] = src[3];
  1064. dest[1] = src[2];
  1065. dest[2] = src[1];
  1066. dest[3] = src[0];
  1067. #endif //BT_USE_DOUBLE_PRECISION
  1068. }
  1069. ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
  1070. SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec)
  1071. {
  1072. for (int i = 0; i < 4; i++)
  1073. {
  1074. btSwapScalarEndian(sourceVec[i], destVec[i]);
  1075. }
  1076. }
  1077. ///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
  1078. SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector)
  1079. {
  1080. btVector3 swappedVec;
  1081. for (int i = 0; i < 4; i++)
  1082. {
  1083. btSwapScalarEndian(vector[i], swappedVec[i]);
  1084. }
  1085. vector = swappedVec;
  1086. }
  1087. template <class T>
  1088. SIMD_FORCE_INLINE void btPlaneSpace1(const T& n, T& p, T& q)
  1089. {
  1090. if (btFabs(n[2]) > SIMDSQRT12)
  1091. {
  1092. // choose p in y-z plane
  1093. btScalar a = n[1] * n[1] + n[2] * n[2];
  1094. btScalar k = btRecipSqrt(a);
  1095. p[0] = 0;
  1096. p[1] = -n[2] * k;
  1097. p[2] = n[1] * k;
  1098. // set q = n x p
  1099. q[0] = a * k;
  1100. q[1] = -n[0] * p[2];
  1101. q[2] = n[0] * p[1];
  1102. }
  1103. else
  1104. {
  1105. // choose p in x-y plane
  1106. btScalar a = n[0] * n[0] + n[1] * n[1];
  1107. btScalar k = btRecipSqrt(a);
  1108. p[0] = -n[1] * k;
  1109. p[1] = n[0] * k;
  1110. p[2] = 0;
  1111. // set q = n x p
  1112. q[0] = -n[2] * p[1];
  1113. q[1] = n[2] * p[0];
  1114. q[2] = a * k;
  1115. }
  1116. }
  1117. struct btVector3FloatData
  1118. {
  1119. float m_floats[4];
  1120. };
  1121. struct btVector3DoubleData
  1122. {
  1123. double m_floats[4];
  1124. };
  1125. SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const
  1126. {
  1127. ///could also do a memcpy, check if it is worth it
  1128. for (int i = 0; i < 4; i++)
  1129. dataOut.m_floats[i] = float(m_floats[i]);
  1130. }
  1131. SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn)
  1132. {
  1133. for (int i = 0; i < 4; i++)
  1134. m_floats[i] = btScalar(dataIn.m_floats[i]);
  1135. }
  1136. SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const
  1137. {
  1138. ///could also do a memcpy, check if it is worth it
  1139. for (int i = 0; i < 4; i++)
  1140. dataOut.m_floats[i] = double(m_floats[i]);
  1141. }
  1142. SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn)
  1143. {
  1144. for (int i = 0; i < 4; i++)
  1145. m_floats[i] = btScalar(dataIn.m_floats[i]);
  1146. }
  1147. SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const
  1148. {
  1149. ///could also do a memcpy, check if it is worth it
  1150. for (int i = 0; i < 4; i++)
  1151. dataOut.m_floats[i] = m_floats[i];
  1152. }
  1153. SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3FloatData& dataIn)
  1154. {
  1155. for (int i = 0; i < 4; i++)
  1156. m_floats[i] = (btScalar)dataIn.m_floats[i];
  1157. }
  1158. SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3DoubleData& dataIn)
  1159. {
  1160. for (int i = 0; i < 4; i++)
  1161. m_floats[i] = (btScalar)dataIn.m_floats[i];
  1162. }
  1163. #endif //BT_VECTOR3_H