si2vmx.h 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049
  1. /* Cell BEA specific SPU intrinsics to PPU/VMX intrinsics
  2. Copyright (C) 2007-2015 Free Software Foundation, Inc.
  3. This file is free software; you can redistribute it and/or modify it under
  4. the terms of the GNU General Public License as published by the Free
  5. Software Foundation; either version 3 of the License, or (at your option)
  6. any later version.
  7. This file is distributed in the hope that it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  10. for more details.
  11. Under Section 7 of GPL version 3, you are granted additional
  12. permissions described in the GCC Runtime Library Exception, version
  13. 3.1, as published by the Free Software Foundation.
  14. You should have received a copy of the GNU General Public License and
  15. a copy of the GCC Runtime Library Exception along with this program;
  16. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  17. <http://www.gnu.org/licenses/>. */
  18. #ifndef _SI2VMX_H_
  19. #define _SI2VMX_H_ 1
  20. #ifndef __SPU__
  21. #include <stdlib.h>
  22. #include <vec_types.h>
  23. /* Specify a default halt action for spu_hcmpeq and spu_hcmpgt intrinsics.
  24. * Users can override the action by defining it prior to including this
  25. * header file.
  26. */
  27. #ifndef SPU_HALT_ACTION
  28. #define SPU_HALT_ACTION abort()
  29. #endif
  30. /* Specify a default stop action for the spu_stop intrinsic.
  31. * Users can override the action by defining it prior to including this
  32. * header file.
  33. */
  34. #ifndef SPU_STOP_ACTION
  35. #define SPU_STOP_ACTION abort()
  36. #endif
  37. /* Specify a default action for unsupported intrinsic.
  38. * Users can override the action by defining it prior to including this
  39. * header file.
  40. */
  41. #ifndef SPU_UNSUPPORTED_ACTION
  42. #define SPU_UNSUPPORTED_ACTION abort()
  43. #endif
  44. /* Casting intrinsics - from scalar to quadword
  45. */
  46. static __inline qword si_from_uchar(unsigned char c) {
  47. union {
  48. qword q;
  49. unsigned char c[16];
  50. } x;
  51. x.c[3] = c;
  52. return (x.q);
  53. }
  54. static __inline qword si_from_char(signed char c) {
  55. union {
  56. qword q;
  57. signed char c[16];
  58. } x;
  59. x.c[3] = c;
  60. return (x.q);
  61. }
  62. static __inline qword si_from_ushort(unsigned short s) {
  63. union {
  64. qword q;
  65. unsigned short s[8];
  66. } x;
  67. x.s[1] = s;
  68. return (x.q);
  69. }
  70. static __inline qword si_from_short(short s) {
  71. union {
  72. qword q;
  73. short s[8];
  74. } x;
  75. x.s[1] = s;
  76. return (x.q);
  77. }
  78. static __inline qword si_from_uint(unsigned int i) {
  79. union {
  80. qword q;
  81. unsigned int i[4];
  82. } x;
  83. x.i[0] = i;
  84. return (x.q);
  85. }
  86. static __inline qword si_from_int(int i) {
  87. union {
  88. qword q;
  89. int i[4];
  90. } x;
  91. x.i[0] = i;
  92. return (x.q);
  93. }
  94. static __inline qword si_from_ullong(unsigned long long l) {
  95. union {
  96. qword q;
  97. unsigned long long l[2];
  98. } x;
  99. x.l[0] = l;
  100. return (x.q);
  101. }
  102. static __inline qword si_from_llong(long long l) {
  103. union {
  104. qword q;
  105. long long l[2];
  106. } x;
  107. x.l[0] = l;
  108. return (x.q);
  109. }
  110. static __inline qword si_from_float(float f) {
  111. union {
  112. qword q;
  113. float f[4];
  114. } x;
  115. x.f[0] = f;
  116. return (x.q);
  117. }
  118. static __inline qword si_from_double(double d) {
  119. union {
  120. qword q;
  121. double d[2];
  122. } x;
  123. x.d[0] = d;
  124. return (x.q);
  125. }
  126. static __inline qword si_from_ptr(void *ptr) {
  127. union {
  128. qword q;
  129. void *p;
  130. } x;
  131. x.p = ptr;
  132. return (x.q);
  133. }
  134. /* Casting intrinsics - from quadword to scalar
  135. */
  136. static __inline unsigned char si_to_uchar(qword q) {
  137. union {
  138. qword q;
  139. unsigned char c[16];
  140. } x;
  141. x.q = q;
  142. return (x.c[3]);
  143. }
  144. static __inline signed char si_to_char(qword q) {
  145. union {
  146. qword q;
  147. signed char c[16];
  148. } x;
  149. x.q = q;
  150. return (x.c[3]);
  151. }
  152. static __inline unsigned short si_to_ushort(qword q) {
  153. union {
  154. qword q;
  155. unsigned short s[8];
  156. } x;
  157. x.q = q;
  158. return (x.s[1]);
  159. }
  160. static __inline short si_to_short(qword q) {
  161. union {
  162. qword q;
  163. short s[8];
  164. } x;
  165. x.q = q;
  166. return (x.s[1]);
  167. }
  168. static __inline unsigned int si_to_uint(qword q) {
  169. union {
  170. qword q;
  171. unsigned int i[4];
  172. } x;
  173. x.q = q;
  174. return (x.i[0]);
  175. }
  176. static __inline int si_to_int(qword q) {
  177. union {
  178. qword q;
  179. int i[4];
  180. } x;
  181. x.q = q;
  182. return (x.i[0]);
  183. }
  184. static __inline unsigned long long si_to_ullong(qword q) {
  185. union {
  186. qword q;
  187. unsigned long long l[2];
  188. } x;
  189. x.q = q;
  190. return (x.l[0]);
  191. }
  192. static __inline long long si_to_llong(qword q) {
  193. union {
  194. qword q;
  195. long long l[2];
  196. } x;
  197. x.q = q;
  198. return (x.l[0]);
  199. }
  200. static __inline float si_to_float(qword q) {
  201. union {
  202. qword q;
  203. float f[4];
  204. } x;
  205. x.q = q;
  206. return (x.f[0]);
  207. }
  208. static __inline double si_to_double(qword q) {
  209. union {
  210. qword q;
  211. double d[2];
  212. } x;
  213. x.q = q;
  214. return (x.d[0]);
  215. }
  216. static __inline void * si_to_ptr(qword q) {
  217. union {
  218. qword q;
  219. void *p;
  220. } x;
  221. x.q = q;
  222. return (x.p);
  223. }
  224. /* Absolute difference
  225. */
  226. static __inline qword si_absdb(qword a, qword b)
  227. {
  228. vec_uchar16 ac, bc, dc;
  229. ac = (vec_uchar16)(a);
  230. bc = (vec_uchar16)(b);
  231. dc = vec_sel(vec_sub(bc, ac), vec_sub(ac, bc), vec_cmpgt(ac, bc));
  232. return ((qword)(dc));
  233. }
  234. /* Add intrinsics
  235. */
  236. #define si_a(_a, _b) ((qword)(vec_add((vec_uint4)(_a), (vec_uint4)(_b))))
  237. #define si_ah(_a, _b) ((qword)(vec_add((vec_ushort8)(_a), (vec_ushort8)(_b))))
  238. static __inline qword si_ai(qword a, int b)
  239. {
  240. return ((qword)(vec_add((vec_int4)(a),
  241. vec_splat((vec_int4)(si_from_int(b)), 0))));
  242. }
  243. static __inline qword si_ahi(qword a, short b)
  244. {
  245. return ((qword)(vec_add((vec_short8)(a),
  246. vec_splat((vec_short8)(si_from_short(b)), 1))));
  247. }
  248. #define si_fa(_a, _b) ((qword)(vec_add((vec_float4)(_a), (vec_float4)(_b))))
  249. static __inline qword si_dfa(qword a, qword b)
  250. {
  251. union {
  252. vec_double2 v;
  253. double d[2];
  254. } ad, bd, dd;
  255. ad.v = (vec_double2)(a);
  256. bd.v = (vec_double2)(b);
  257. dd.d[0] = ad.d[0] + bd.d[0];
  258. dd.d[1] = ad.d[1] + bd.d[1];
  259. return ((qword)(dd.v));
  260. }
  261. /* Add word extended
  262. */
  263. #define si_addx(_a, _b, _c) ((qword)(vec_add(vec_add((vec_uint4)(_a), (vec_uint4)(_b)), \
  264. vec_and((vec_uint4)(_c), vec_splat_u32(1)))))
  265. /* Bit-wise AND
  266. */
  267. #define si_and(_a, _b) ((qword)(vec_and((vec_uint4)(_a), (vec_uint4)(_b))))
  268. static __inline qword si_andbi(qword a, signed char b)
  269. {
  270. return ((qword)(vec_and((vec_char16)(a),
  271. vec_splat((vec_char16)(si_from_char(b)), 3))));
  272. }
  273. static __inline qword si_andhi(qword a, signed short b)
  274. {
  275. return ((qword)(vec_and((vec_short8)(a),
  276. vec_splat((vec_short8)(si_from_short(b)), 1))));
  277. }
  278. static __inline qword si_andi(qword a, signed int b)
  279. {
  280. return ((qword)(vec_and((vec_int4)(a),
  281. vec_splat((vec_int4)(si_from_int(b)), 0))));
  282. }
  283. /* Bit-wise AND with complement
  284. */
  285. #define si_andc(_a, _b) ((qword)(vec_andc((vec_uchar16)(_a), (vec_uchar16)(_b))))
  286. /* Average byte vectors
  287. */
  288. #define si_avgb(_a, _b) ((qword)(vec_avg((vec_uchar16)(_a), (vec_uchar16)(_b))))
  289. /* Branch indirect and set link on external data
  290. */
  291. #define si_bisled(_func) /* not mappable */
  292. #define si_bisledd(_func) /* not mappable */
  293. #define si_bislede(_func) /* not mappable */
  294. /* Borrow generate
  295. */
  296. #define si_bg(_a, _b) ((qword)(vec_subc((vec_uint4)(_b), (vec_uint4)(_a))))
  297. #define si_bgx(_a, _b, _c) ((qword)(vec_and(vec_or(vec_cmpgt((vec_uint4)(_b), (vec_uint4)(_a)), \
  298. vec_and(vec_cmpeq((vec_uint4)(_b), (vec_uint4)(_a)), \
  299. (vec_uint4)(_c))), vec_splat_u32(1))))
  300. /* Compare absolute equal
  301. */
  302. static __inline qword si_fcmeq(qword a, qword b)
  303. {
  304. vec_float4 msb = (vec_float4)((vec_uint4){0x80000000, 0x80000000, 0x80000000, 0x80000000});
  305. return ((qword)(vec_cmpeq(vec_andc((vec_float4)(a), msb),
  306. vec_andc((vec_float4)(b), msb))));
  307. }
  308. static __inline qword si_dfcmeq(qword a, qword b)
  309. {
  310. vec_uint4 sign_mask= (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
  311. vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000 };
  312. vec_uchar16 hihi_promote = (vec_uchar16) { 0,1,2,3, 16,17,18,19, 8,9,10,11, 24,25,26,27};
  313. vec_uint4 biteq;
  314. vec_uint4 aabs;
  315. vec_uint4 babs;
  316. vec_uint4 a_gt;
  317. vec_uint4 ahi_inf;
  318. vec_uint4 anan;
  319. vec_uint4 result;
  320. union {
  321. vec_uchar16 v;
  322. int i[4];
  323. } x;
  324. /* Shift 4 bytes */
  325. x.i[3] = 4 << 3;
  326. /* Mask out sign bits */
  327. aabs = vec_and((vec_uint4)a,sign_mask);
  328. babs = vec_and((vec_uint4)b,sign_mask);
  329. /* A) Check for bit equality, store in high word */
  330. biteq = (vec_uint4) vec_cmpeq((vec_uint4)aabs,(vec_uint4)babs);
  331. biteq = vec_and(biteq,(vec_uint4)vec_slo((vec_uchar16)biteq,x.v));
  332. /*
  333. B) Check if a is NaN, store in high word
  334. B1) If the high word is greater than max_exp (indicates a NaN)
  335. B2) If the low word is greater than 0
  336. */
  337. a_gt = (vec_uint4)vec_cmpgt(aabs,nan_mask);
  338. /* B3) Check if the high word is equal to the inf exponent */
  339. ahi_inf = (vec_uint4)vec_cmpeq(aabs,nan_mask);
  340. /* anan = B1[hi] or (B2[lo] and B3[hi]) */
  341. anan = (vec_uint4)vec_or(a_gt,vec_and((vec_uint4)vec_slo((vec_uchar16)a_gt,x.v),ahi_inf));
  342. /* result = A and not B */
  343. result = vec_andc(biteq, anan);
  344. /* Promote high words to 64 bits and return */
  345. return ((qword)(vec_perm((vec_uchar16)result, (vec_uchar16)result, hihi_promote)));
  346. }
  347. /* Compare absolute greater than
  348. */
  349. static __inline qword si_fcmgt(qword a, qword b)
  350. {
  351. vec_float4 msb = (vec_float4)((vec_uint4){0x80000000, 0x80000000, 0x80000000, 0x80000000});
  352. return ((qword)(vec_cmpgt(vec_andc((vec_float4)(a), msb),
  353. vec_andc((vec_float4)(b), msb))));
  354. }
  355. static __inline qword si_dfcmgt(qword a, qword b)
  356. {
  357. vec_uchar16 splat_hi = (vec_uchar16) { 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
  358. vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x0, 0x7FF00000, 0x0 };
  359. vec_uint4 sign_mask = (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
  360. union {
  361. vec_uchar16 v;
  362. int i[4];
  363. } x;
  364. /* Shift 4 bytes */
  365. x.i[3] = 4 << 3;
  366. // absolute value of a,b
  367. vec_uint4 aabs = vec_and((vec_uint4)a, sign_mask);
  368. vec_uint4 babs = vec_and((vec_uint4)b, sign_mask);
  369. // check if a is nan
  370. vec_uint4 a_inf = (vec_uint4)vec_cmpeq(aabs, nan_mask);
  371. vec_uint4 a_nan = (vec_uint4)vec_cmpgt(aabs, nan_mask);
  372. a_nan = vec_or(a_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)a_nan,x.v),a_inf));
  373. a_nan = (vec_uint4)vec_perm((vec_uchar16)a_nan, (vec_uchar16)a_nan, splat_hi);
  374. // check if b is nan
  375. vec_uint4 b_inf = (vec_uint4)vec_cmpeq(babs, nan_mask);
  376. vec_uint4 b_nan = (vec_uint4)vec_cmpgt(babs, nan_mask);
  377. b_nan = vec_or(b_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)b_nan,x.v),b_inf));
  378. b_nan = (vec_uint4)vec_perm((vec_uchar16)b_nan, (vec_uchar16)b_nan, splat_hi);
  379. // A) Check if the exponents are different
  380. vec_uint4 gt_hi = (vec_uint4)vec_cmpgt(aabs,babs);
  381. // B) Check if high word equal, and low word greater
  382. vec_uint4 gt_lo = (vec_uint4)vec_cmpgt((vec_uint4)aabs, (vec_uint4)babs);
  383. vec_uint4 eq = (vec_uint4)vec_cmpeq(aabs, babs);
  384. vec_uint4 eqgt = vec_and(eq,vec_slo(gt_lo,x.v));
  385. // If either A or B is true, return true (unless NaNs detected)
  386. vec_uint4 r = vec_or(gt_hi, eqgt);
  387. // splat the high words of the comparison step
  388. r = (vec_uint4)vec_perm((vec_uchar16)r,(vec_uchar16)r,splat_hi);
  389. // correct for NaNs in input
  390. return ((qword)vec_andc(r,vec_or(a_nan,b_nan)));
  391. }
  392. /* Compare equal
  393. */
  394. static __inline qword si_ceqb(qword a, qword b)
  395. {
  396. return ((qword)(vec_cmpeq((vec_uchar16)(a), (vec_uchar16)(b))));
  397. }
  398. static __inline qword si_ceqh(qword a, qword b)
  399. {
  400. return ((qword)(vec_cmpeq((vec_ushort8)(a), (vec_ushort8)(b))));
  401. }
  402. static __inline qword si_ceq(qword a, qword b)
  403. {
  404. return ((qword)(vec_cmpeq((vec_uint4)(a), (vec_uint4)(b))));
  405. }
  406. static __inline qword si_fceq(qword a, qword b)
  407. {
  408. return ((qword)(vec_cmpeq((vec_float4)(a), (vec_float4)(b))));
  409. }
  410. static __inline qword si_ceqbi(qword a, signed char b)
  411. {
  412. return ((qword)(vec_cmpeq((vec_char16)(a),
  413. vec_splat((vec_char16)(si_from_char(b)), 3))));
  414. }
  415. static __inline qword si_ceqhi(qword a, signed short b)
  416. {
  417. return ((qword)(vec_cmpeq((vec_short8)(a),
  418. vec_splat((vec_short8)(si_from_short(b)), 1))));
  419. }
  420. static __inline qword si_ceqi(qword a, signed int b)
  421. {
  422. return ((qword)(vec_cmpeq((vec_int4)(a),
  423. vec_splat((vec_int4)(si_from_int(b)), 0))));
  424. }
  425. static __inline qword si_dfceq(qword a, qword b)
  426. {
  427. vec_uint4 sign_mask= (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
  428. vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000 };
  429. vec_uchar16 hihi_promote = (vec_uchar16) { 0,1,2,3, 16,17,18,19, 8,9,10,11, 24,25,26,27};
  430. vec_uint4 biteq;
  431. vec_uint4 aabs;
  432. vec_uint4 babs;
  433. vec_uint4 a_gt;
  434. vec_uint4 ahi_inf;
  435. vec_uint4 anan;
  436. vec_uint4 iszero;
  437. vec_uint4 result;
  438. union {
  439. vec_uchar16 v;
  440. int i[4];
  441. } x;
  442. /* Shift 4 bytes */
  443. x.i[3] = 4 << 3;
  444. /* A) Check for bit equality, store in high word */
  445. biteq = (vec_uint4) vec_cmpeq((vec_uint4)a,(vec_uint4)b);
  446. biteq = vec_and(biteq,(vec_uint4)vec_slo((vec_uchar16)biteq,x.v));
  447. /* Mask out sign bits */
  448. aabs = vec_and((vec_uint4)a,sign_mask);
  449. babs = vec_and((vec_uint4)b,sign_mask);
  450. /*
  451. B) Check if a is NaN, store in high word
  452. B1) If the high word is greater than max_exp (indicates a NaN)
  453. B2) If the low word is greater than 0
  454. */
  455. a_gt = (vec_uint4)vec_cmpgt(aabs,nan_mask);
  456. /* B3) Check if the high word is equal to the inf exponent */
  457. ahi_inf = (vec_uint4)vec_cmpeq(aabs,nan_mask);
  458. /* anan = B1[hi] or (B2[lo] and B3[hi]) */
  459. anan = (vec_uint4)vec_or(a_gt,vec_and((vec_uint4)vec_slo((vec_uchar16)a_gt,x.v),ahi_inf));
  460. /* C) Check for 0 = -0 special case */
  461. iszero =(vec_uint4)vec_cmpeq((vec_uint4)vec_or(aabs,babs),(vec_uint4)vec_splat_u32(0));
  462. iszero = vec_and(iszero,(vec_uint4)vec_slo((vec_uchar16)iszero,x.v));
  463. /* result = (A or C) and not B */
  464. result = vec_or(biteq,iszero);
  465. result = vec_andc(result, anan);
  466. /* Promote high words to 64 bits and return */
  467. return ((qword)(vec_perm((vec_uchar16)result, (vec_uchar16)result, hihi_promote)));
  468. }
  469. /* Compare greater than
  470. */
  471. static __inline qword si_cgtb(qword a, qword b)
  472. {
  473. return ((qword)(vec_cmpgt((vec_char16)(a), (vec_char16)(b))));
  474. }
  475. static __inline qword si_cgth(qword a, qword b)
  476. {
  477. return ((qword)(vec_cmpgt((vec_short8)(a), (vec_short8)(b))));
  478. }
  479. static __inline qword si_cgt(qword a, qword b)
  480. {
  481. return ((qword)(vec_cmpgt((vec_int4)(a), (vec_int4)(b))));
  482. }
  483. static __inline qword si_clgtb(qword a, qword b)
  484. {
  485. return ((qword)(vec_cmpgt((vec_uchar16)(a), (vec_uchar16)(b))));
  486. }
  487. static __inline qword si_clgth(qword a, qword b)
  488. {
  489. return ((qword)(vec_cmpgt((vec_ushort8)(a), (vec_ushort8)(b))));
  490. }
  491. static __inline qword si_clgt(qword a, qword b)
  492. {
  493. return ((qword)(vec_cmpgt((vec_uint4)(a), (vec_uint4)(b))));
  494. }
  495. static __inline qword si_fcgt(qword a, qword b)
  496. {
  497. return ((qword)(vec_cmpgt((vec_float4)(a), (vec_float4)(b))));
  498. }
  499. static __inline qword si_dfcgt(qword a, qword b)
  500. {
  501. vec_uchar16 splat_hi = (vec_uchar16) { 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
  502. vec_uchar16 borrow_shuffle = (vec_uchar16) { 4,5,6,7, 192,192,192,192, 12,13,14,15, 192,192,192,192 };
  503. vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x0, 0x7FF00000, 0x0 };
  504. vec_uint4 sign_mask = (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
  505. union {
  506. vec_uchar16 v;
  507. int i[4];
  508. } x;
  509. /* Shift 4 bytes */
  510. x.i[3] = 4 << 3;
  511. // absolute value of a,b
  512. vec_uint4 aabs = vec_and((vec_uint4)a, sign_mask);
  513. vec_uint4 babs = vec_and((vec_uint4)b, sign_mask);
  514. // check if a is nan
  515. vec_uint4 a_inf = (vec_uint4)vec_cmpeq(aabs, nan_mask);
  516. vec_uint4 a_nan = (vec_uint4)vec_cmpgt(aabs, nan_mask);
  517. a_nan = vec_or(a_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)a_nan,x.v),a_inf));
  518. a_nan = (vec_uint4)vec_perm((vec_uchar16)a_nan, (vec_uchar16)a_nan, splat_hi);
  519. // check if b is nan
  520. vec_uint4 b_inf = (vec_uint4)vec_cmpeq(babs, nan_mask);
  521. vec_uint4 b_nan = (vec_uint4)vec_cmpgt(babs, nan_mask);
  522. b_nan = vec_or(b_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)b_nan,x.v),b_inf));
  523. b_nan = (vec_uint4)vec_perm((vec_uchar16)b_nan, (vec_uchar16)b_nan, splat_hi);
  524. // sign of a
  525. vec_uint4 asel = (vec_uint4)vec_sra((vec_int4)(a), (vec_uint4)vec_splat(((vec_uint4)si_from_int(31)), 0));
  526. asel = (vec_uint4)vec_perm((vec_uchar16)asel,(vec_uchar16)asel,splat_hi);
  527. // sign of b
  528. vec_uint4 bsel = (vec_uint4)vec_sra((vec_int4)(b), (vec_uint4)vec_splat(((vec_uint4)si_from_int(31)), 0));
  529. bsel = (vec_uint4)vec_perm((vec_uchar16)bsel,(vec_uchar16)bsel,splat_hi);
  530. // negative a
  531. vec_uint4 abor = vec_subc((vec_uint4)vec_splat_u32(0), aabs);
  532. vec_uchar16 pat = vec_sel(((vec_uchar16){0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15}), vec_sr(borrow_shuffle, vec_splat_u8(3)), vec_sra(borrow_shuffle, vec_splat_u8(7)));
  533. abor = (vec_uint4)(vec_perm(vec_perm((vec_uchar16)abor, (vec_uchar16)abor, borrow_shuffle),((vec_uchar16){0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80}),pat));
  534. vec_uint4 aneg = vec_add(vec_add(vec_splat_u32(0), vec_nor(aabs, aabs)), vec_and(abor, vec_splat_u32(1)));
  535. // pick the one we want
  536. vec_int4 aval = (vec_int4)vec_sel((vec_uchar16)aabs, (vec_uchar16)aneg, (vec_uchar16)asel);
  537. // negative b
  538. vec_uint4 bbor = vec_subc((vec_uint4)vec_splat_u32(0), babs);
  539. bbor = (vec_uint4)(vec_perm(vec_perm((vec_uchar16)bbor, (vec_uchar16)bbor, borrow_shuffle),((vec_uchar16){0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80}),pat));
  540. vec_uint4 bneg = vec_add(vec_nor(babs, babs), vec_and(bbor, vec_splat_u32(1)));
  541. // pick the one we want
  542. vec_int4 bval=(vec_int4)vec_sel((vec_uchar16)babs, (vec_uchar16)bneg, (vec_uchar16)bsel);
  543. // A) Check if the exponents are different
  544. vec_uint4 gt_hi = (vec_uint4)vec_cmpgt(aval,bval);
  545. // B) Check if high word equal, and low word greater
  546. vec_uint4 gt_lo = (vec_uint4)vec_cmpgt((vec_uint4)aval, (vec_uint4)bval);
  547. vec_uint4 eq = (vec_uint4)vec_cmpeq(aval, bval);
  548. vec_uint4 eqgt = vec_and(eq,vec_slo(gt_lo,x.v));
  549. // If either A or B is true, return true (unless NaNs detected)
  550. vec_uint4 r = vec_or(gt_hi, eqgt);
  551. // splat the high words of the comparison step
  552. r = (vec_uint4)vec_perm((vec_uchar16)r,(vec_uchar16)r,splat_hi);
  553. // correct for NaNs in input
  554. return ((qword)vec_andc(r,vec_or(a_nan,b_nan)));
  555. }
  556. static __inline qword si_cgtbi(qword a, signed char b)
  557. {
  558. return ((qword)(vec_cmpgt((vec_char16)(a),
  559. vec_splat((vec_char16)(si_from_char(b)), 3))));
  560. }
  561. static __inline qword si_cgthi(qword a, signed short b)
  562. {
  563. return ((qword)(vec_cmpgt((vec_short8)(a),
  564. vec_splat((vec_short8)(si_from_short(b)), 1))));
  565. }
  566. static __inline qword si_cgti(qword a, signed int b)
  567. {
  568. return ((qword)(vec_cmpgt((vec_int4)(a),
  569. vec_splat((vec_int4)(si_from_int(b)), 0))));
  570. }
  571. static __inline qword si_clgtbi(qword a, unsigned char b)
  572. {
  573. return ((qword)(vec_cmpgt((vec_uchar16)(a),
  574. vec_splat((vec_uchar16)(si_from_uchar(b)), 3))));
  575. }
  576. static __inline qword si_clgthi(qword a, unsigned short b)
  577. {
  578. return ((qword)(vec_cmpgt((vec_ushort8)(a),
  579. vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
  580. }
  581. static __inline qword si_clgti(qword a, unsigned int b)
  582. {
  583. return ((qword)(vec_cmpgt((vec_uint4)(a),
  584. vec_splat((vec_uint4)(si_from_uint(b)), 0))));
  585. }
  586. static __inline qword si_dftsv(qword a, char b)
  587. {
  588. vec_uchar16 splat_hi = (vec_uchar16) { 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
  589. vec_uint4 sign_mask = (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
  590. vec_uint4 result = (vec_uint4){0};
  591. vec_uint4 sign = (vec_uint4)vec_sra((vec_int4)(a), (vec_uint4)vec_splat(((vec_uint4)si_from_int(31)), 0));
  592. sign = (vec_uint4)vec_perm((vec_uchar16)sign,(vec_uchar16)sign,splat_hi);
  593. vec_uint4 aabs = vec_and((vec_uint4)a,sign_mask);
  594. union {
  595. vec_uchar16 v;
  596. int i[4];
  597. } x;
  598. /* Shift 4 bytes */
  599. x.i[3] = 4 << 3;
  600. /* Nan or +inf or -inf */
  601. if (b & 0x70)
  602. {
  603. vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x0, 0x7FF00000, 0x0 };
  604. vec_uint4 a_inf = (vec_uint4)vec_cmpeq(aabs, nan_mask);
  605. /* NaN */
  606. if (b & 0x40)
  607. {
  608. vec_uint4 a_nan = (vec_uint4)vec_cmpgt(aabs, nan_mask);
  609. a_nan = vec_or(a_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)a_nan,x.v),a_inf));
  610. a_nan = (vec_uint4)vec_perm((vec_uchar16)a_nan, (vec_uchar16)a_nan, splat_hi);
  611. result = vec_or(result, a_nan);
  612. }
  613. /* inf */
  614. if (b & 0x30)
  615. {
  616. a_inf = vec_and((vec_uint4)vec_slo((vec_uchar16)a_inf,x.v), a_inf);
  617. a_inf = (vec_uint4)vec_perm((vec_uchar16)a_inf, (vec_uchar16)a_inf, splat_hi);
  618. /* +inf */
  619. if (b & 0x20)
  620. result = vec_or(vec_andc(a_inf, sign), result);
  621. /* -inf */
  622. if (b & 0x10)
  623. result = vec_or(vec_and(a_inf, sign), result);
  624. }
  625. }
  626. /* 0 or denorm */
  627. if (b & 0xF)
  628. {
  629. vec_uint4 iszero =(vec_uint4)vec_cmpeq(aabs,(vec_uint4)vec_splat_u32(0));
  630. iszero = vec_and(iszero,(vec_uint4)vec_slo((vec_uchar16)iszero,x.v));
  631. /* denorm */
  632. if (b & 0x3)
  633. {
  634. vec_uint4 denorm_mask = (vec_uint4){0xFFFFF, 0xFFFFF, 0xFFFFF, 0xFFFFF};
  635. vec_uint4 isdenorm = vec_nor((vec_uint4)vec_cmpgt(aabs, denorm_mask), iszero);
  636. isdenorm = (vec_uint4)vec_perm((vec_uchar16)isdenorm, (vec_uchar16)isdenorm, splat_hi);
  637. /* +denorm */
  638. if (b & 0x2)
  639. result = vec_or(vec_andc(isdenorm, sign), result);
  640. /* -denorm */
  641. if (b & 0x1)
  642. result = vec_or(vec_and(isdenorm, sign), result);
  643. }
  644. /* 0 */
  645. if (b & 0xC)
  646. {
  647. iszero = (vec_uint4)vec_perm((vec_uchar16)iszero, (vec_uchar16)iszero, splat_hi);
  648. /* +0 */
  649. if (b & 0x8)
  650. result = vec_or(vec_andc(iszero, sign), result);
  651. /* -0 */
  652. if (b & 0x4)
  653. result = vec_or(vec_and(iszero, sign), result);
  654. }
  655. }
  656. return ((qword)result);
  657. }
  658. /* Carry generate
  659. */
  660. #define si_cg(_a, _b) ((qword)(vec_addc((vec_uint4)(_a), (vec_uint4)(_b))))
  661. #define si_cgx(_a, _b, _c) ((qword)(vec_or(vec_addc((vec_uint4)(_a), (vec_uint4)(_b)), \
  662. vec_addc(vec_add((vec_uint4)(_a), (vec_uint4)(_b)), \
  663. vec_and((vec_uint4)(_c), vec_splat_u32(1))))))
  664. /* Count ones for bytes
  665. */
  666. static __inline qword si_cntb(qword a)
  667. {
  668. vec_uchar16 nib_cnt = (vec_uchar16){0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
  669. vec_uchar16 four = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 };
  670. vec_uchar16 av;
  671. av = (vec_uchar16)(a);
  672. return ((qword)(vec_add(vec_perm(nib_cnt, nib_cnt, av),
  673. vec_perm(nib_cnt, nib_cnt, vec_sr (av, four)))));
  674. }
  675. /* Count ones for bytes
  676. */
  677. static __inline qword si_clz(qword a)
  678. {
  679. vec_uchar16 av;
  680. vec_uchar16 cnt_hi, cnt_lo, cnt, tmp1, tmp2, tmp3;
  681. vec_uchar16 four = vec_splat_u8(4);
  682. vec_uchar16 nib_cnt = (vec_uchar16){4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0};
  683. vec_uchar16 eight = vec_splat_u8(8);
  684. vec_uchar16 sixteen = (vec_uchar16){16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16};
  685. vec_uchar16 twentyfour = (vec_uchar16){24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24};
  686. av = (vec_uchar16)(a);
  687. cnt_hi = vec_perm(nib_cnt, nib_cnt, vec_sr(av, four));
  688. cnt_lo = vec_perm(nib_cnt, nib_cnt, av);
  689. cnt = vec_add(cnt_hi, vec_and(cnt_lo, vec_cmpeq(cnt_hi, four)));
  690. tmp1 = (vec_uchar16)vec_sl((vec_uint4)(cnt), (vec_uint4)(eight));
  691. tmp2 = (vec_uchar16)vec_sl((vec_uint4)(cnt), (vec_uint4)(sixteen));
  692. tmp3 = (vec_uchar16)vec_sl((vec_uint4)(cnt), (vec_uint4)(twentyfour));
  693. cnt = vec_add(cnt, vec_and(tmp1, vec_cmpeq(cnt, eight)));
  694. cnt = vec_add(cnt, vec_and(tmp2, vec_cmpeq(cnt, sixteen)));
  695. cnt = vec_add(cnt, vec_and(tmp3, vec_cmpeq(cnt, twentyfour)));
  696. return (qword)((vec_sr((vec_uint4)(cnt), (vec_uint4)(twentyfour))));
  697. }
  698. /* Convert to float
  699. */
  700. #define si_cuflt(_a, _b) ((qword)(vec_ctf((vec_uint4)(_a), _b)))
  701. #define si_csflt(_a, _b) ((qword)(vec_ctf((vec_int4)(_a), _b)))
  702. /* Convert to signed int
  703. */
  704. #define si_cflts(_a, _b) ((qword)(vec_cts((vec_float4)(_a), _b)))
  705. /* Convert to unsigned int
  706. */
  707. #define si_cfltu(_a, _b) ((qword)(vec_ctu((vec_float4)(_a), _b)))
  708. /* Synchronize
  709. */
  710. #define si_dsync() /* do nothing */
  711. #define si_sync() /* do nothing */
  712. #define si_syncc() /* do nothing */
  713. /* Equivalence
  714. */
  715. static __inline qword si_eqv(qword a, qword b)
  716. {
  717. vec_uchar16 d;
  718. d = vec_xor((vec_uchar16)(a), (vec_uchar16)(b));
  719. return ((qword)(vec_nor(d, d)));
  720. }
  721. /* Extend
  722. */
  723. static __inline qword si_xsbh(qword a)
  724. {
  725. vec_char16 av;
  726. av = (vec_char16)(a);
  727. return ((qword)(vec_unpackh(vec_perm(av, av, ((vec_uchar16){1, 3, 5, 7, 9,11,13,15,
  728. 0, 0, 0, 0, 0, 0, 0, 0})))));
  729. }
  730. static __inline qword si_xshw(qword a)
  731. {
  732. vec_short8 av;
  733. av = (vec_short8)(a);
  734. return ((qword)(vec_unpackh(vec_perm(av, av, ((vec_uchar16){2, 3, 6, 7,
  735. 10,11,14,15,
  736. 0, 0, 0, 0,
  737. 0, 0, 0, 0})))));
  738. }
  739. static __inline qword si_xswd(qword a)
  740. {
  741. vec_int4 av;
  742. av = (vec_int4)(a);
  743. return ((qword)(vec_perm(av, vec_sra(av, ((vec_uint4){31,31,31,31})),
  744. ((vec_uchar16){20, 21, 22, 23,
  745. 4, 5, 6, 7,
  746. 28, 29, 30, 31,
  747. 12, 13, 14, 15}))));
  748. }
  749. static __inline qword si_fesd(qword a)
  750. {
  751. union {
  752. double d[2];
  753. vec_double2 vd;
  754. } out;
  755. union {
  756. float f[4];
  757. vec_float4 vf;
  758. } in;
  759. in.vf = (vec_float4)(a);
  760. out.d[0] = (double)(in.f[0]);
  761. out.d[1] = (double)(in.f[2]);
  762. return ((qword)(out.vd));
  763. }
  764. /* Gather
  765. */
  766. static __inline qword si_gbb(qword a)
  767. {
  768. vec_uchar16 bits;
  769. vec_uint4 bytes;
  770. bits = vec_sl(vec_and((vec_uchar16)(a), vec_splat_u8(1)), ((vec_uchar16){7, 6, 5, 4, 3, 2, 1, 0,
  771. 7, 6, 5, 4, 3, 2, 1, 0}));
  772. bytes = (vec_uint4)vec_sum2s((vec_int4)(vec_sum4s(bits, ((vec_uint4){0}))), ((vec_int4){0}));
  773. return ((qword)(vec_perm(bytes, bytes, ((vec_uchar16){0, 0, 7,15, 0, 0, 0, 0,
  774. 0, 0, 0, 0, 0, 0, 0, 0}))));
  775. }
  776. static __inline qword si_gbh(qword a)
  777. {
  778. vec_ushort8 bits;
  779. vec_uint4 bytes;
  780. bits = vec_sl(vec_and((vec_ushort8)(a), vec_splat_u16(1)), ((vec_ushort8){7, 6, 5, 4, 3, 2, 1, 0}));
  781. bytes = (vec_uint4)vec_sums((vec_int4)(vec_sum4s((vec_short8)(bits), (vec_int4){0})), (vec_int4){0});
  782. return ((qword)(vec_sld(bytes, bytes, 12)));
  783. }
  784. static __inline qword si_gb(qword a)
  785. {
  786. vec_uint4 bits;
  787. vec_uint4 bytes;
  788. bits = vec_sl(vec_and((vec_uint4)(a), vec_splat_u32(1)), ((vec_uint4){3, 2, 1, 0}));
  789. bytes = (vec_uint4)vec_sums((vec_int4)(bits), ((vec_int4){0}));
  790. return ((qword)(vec_sld(bytes, bytes, 12)));
  791. }
  792. /* Compare and halt
  793. */
  794. static __inline void si_heq(qword a, qword b)
  795. {
  796. union {
  797. vector unsigned int v;
  798. unsigned int i[4];
  799. } aa, bb;
  800. aa.v = (vector unsigned int)(a);
  801. bb.v = (vector unsigned int)(b);
  802. if (aa.i[0] == bb.i[0]) { SPU_HALT_ACTION; };
  803. }
  804. static __inline void si_heqi(qword a, unsigned int b)
  805. {
  806. union {
  807. vector unsigned int v;
  808. unsigned int i[4];
  809. } aa;
  810. aa.v = (vector unsigned int)(a);
  811. if (aa.i[0] == b) { SPU_HALT_ACTION; };
  812. }
  813. static __inline void si_hgt(qword a, qword b)
  814. {
  815. union {
  816. vector signed int v;
  817. signed int i[4];
  818. } aa, bb;
  819. aa.v = (vector signed int)(a);
  820. bb.v = (vector signed int)(b);
  821. if (aa.i[0] > bb.i[0]) { SPU_HALT_ACTION; };
  822. }
  823. static __inline void si_hgti(qword a, signed int b)
  824. {
  825. union {
  826. vector signed int v;
  827. signed int i[4];
  828. } aa;
  829. aa.v = (vector signed int)(a);
  830. if (aa.i[0] > b) { SPU_HALT_ACTION; };
  831. }
  832. static __inline void si_hlgt(qword a, qword b)
  833. {
  834. union {
  835. vector unsigned int v;
  836. unsigned int i[4];
  837. } aa, bb;
  838. aa.v = (vector unsigned int)(a);
  839. bb.v = (vector unsigned int)(b);
  840. if (aa.i[0] > bb.i[0]) { SPU_HALT_ACTION; };
  841. }
  842. static __inline void si_hlgti(qword a, unsigned int b)
  843. {
  844. union {
  845. vector unsigned int v;
  846. unsigned int i[4];
  847. } aa;
  848. aa.v = (vector unsigned int)(a);
  849. if (aa.i[0] > b) { SPU_HALT_ACTION; };
  850. }
  851. /* Multiply and Add
  852. */
  853. static __inline qword si_mpya(qword a, qword b, qword c)
  854. {
  855. return ((qword)(vec_msum(vec_and((vec_short8)(a),
  856. ((vec_short8){0, -1, 0, -1, 0, -1, 0, -1})),
  857. (vec_short8)(b), (vec_int4)(c))));
  858. }
  859. static __inline qword si_fma(qword a, qword b, qword c)
  860. {
  861. return ((qword)(vec_madd((vec_float4)(a), (vec_float4)(b), (vec_float4)(c))));
  862. }
  863. static __inline qword si_dfma(qword a, qword b, qword c)
  864. {
  865. union {
  866. vec_double2 v;
  867. double d[2];
  868. } aa, bb, cc, dd;
  869. aa.v = (vec_double2)(a);
  870. bb.v = (vec_double2)(b);
  871. cc.v = (vec_double2)(c);
  872. dd.d[0] = aa.d[0] * bb.d[0] + cc.d[0];
  873. dd.d[1] = aa.d[1] * bb.d[1] + cc.d[1];
  874. return ((qword)(dd.v));
  875. }
  876. /* Form Mask
  877. */
  878. #define si_fsmbi(_a) si_fsmb(si_from_int(_a))
  879. static __inline qword si_fsmb(qword a)
  880. {
  881. vec_char16 mask;
  882. vec_ushort8 in;
  883. in = (vec_ushort8)(a);
  884. mask = (vec_char16)(vec_perm(in, in, ((vec_uchar16){2, 2, 2, 2, 2, 2, 2, 2,
  885. 3, 3, 3, 3, 3, 3, 3, 3})));
  886. return ((qword)(vec_sra(vec_sl(mask, ((vec_uchar16){0, 1, 2, 3, 4, 5, 6, 7,
  887. 0, 1, 2, 3, 4, 5, 6, 7})),
  888. vec_splat_u8(7))));
  889. }
  890. static __inline qword si_fsmh(qword a)
  891. {
  892. vec_uchar16 in;
  893. vec_short8 mask;
  894. in = (vec_uchar16)(a);
  895. mask = (vec_short8)(vec_splat(in, 3));
  896. return ((qword)(vec_sra(vec_sl(mask, ((vec_ushort8){0, 1, 2, 3, 4, 5, 6, 7})),
  897. vec_splat_u16(15))));
  898. }
  899. static __inline qword si_fsm(qword a)
  900. {
  901. vec_uchar16 in;
  902. vec_int4 mask;
  903. in = (vec_uchar16)(a);
  904. mask = (vec_int4)(vec_splat(in, 3));
  905. return ((qword)(vec_sra(vec_sl(mask, ((vec_uint4){28, 29, 30, 31})),
  906. ((vec_uint4){31,31,31,31}))));
  907. }
  908. /* Move from/to registers
  909. */
  910. #define si_fscrrd() ((qword)((vec_uint4){0}))
  911. #define si_fscrwr(_a)
  912. #define si_mfspr(_reg) ((qword)((vec_uint4){0}))
  913. #define si_mtspr(_reg, _a)
  914. /* Multiply High High Add
  915. */
  916. static __inline qword si_mpyhha(qword a, qword b, qword c)
  917. {
  918. return ((qword)(vec_add(vec_mule((vec_short8)(a), (vec_short8)(b)), (vec_int4)(c))));
  919. }
  920. static __inline qword si_mpyhhau(qword a, qword b, qword c)
  921. {
  922. return ((qword)(vec_add(vec_mule((vec_ushort8)(a), (vec_ushort8)(b)), (vec_uint4)(c))));
  923. }
  924. /* Multiply Subtract
  925. */
  926. static __inline qword si_fms(qword a, qword b, qword c)
  927. {
  928. return ((qword)(vec_madd((vec_float4)(a), (vec_float4)(b),
  929. vec_sub(((vec_float4){0.0f}), (vec_float4)(c)))));
  930. }
  931. static __inline qword si_dfms(qword a, qword b, qword c)
  932. {
  933. union {
  934. vec_double2 v;
  935. double d[2];
  936. } aa, bb, cc, dd;
  937. aa.v = (vec_double2)(a);
  938. bb.v = (vec_double2)(b);
  939. cc.v = (vec_double2)(c);
  940. dd.d[0] = aa.d[0] * bb.d[0] - cc.d[0];
  941. dd.d[1] = aa.d[1] * bb.d[1] - cc.d[1];
  942. return ((qword)(dd.v));
  943. }
  944. /* Multiply
  945. */
  946. static __inline qword si_fm(qword a, qword b)
  947. {
  948. return ((qword)(vec_madd((vec_float4)(a), (vec_float4)(b), ((vec_float4){0.0f}))));
  949. }
  950. static __inline qword si_dfm(qword a, qword b)
  951. {
  952. union {
  953. vec_double2 v;
  954. double d[2];
  955. } aa, bb, dd;
  956. aa.v = (vec_double2)(a);
  957. bb.v = (vec_double2)(b);
  958. dd.d[0] = aa.d[0] * bb.d[0];
  959. dd.d[1] = aa.d[1] * bb.d[1];
  960. return ((qword)(dd.v));
  961. }
  962. /* Multiply High
  963. */
  964. static __inline qword si_mpyh(qword a, qword b)
  965. {
  966. vec_uint4 sixteen = (vec_uint4){16, 16, 16, 16};
  967. return ((qword)(vec_sl(vec_mule((vec_short8)(a), (vec_short8)(vec_sl((vec_uint4)(b), sixteen))), sixteen)));
  968. }
  969. /* Multiply High High
  970. */
  971. static __inline qword si_mpyhh(qword a, qword b)
  972. {
  973. return ((qword)(vec_mule((vec_short8)(a), (vec_short8)(b))));
  974. }
  975. static __inline qword si_mpyhhu(qword a, qword b)
  976. {
  977. return ((qword)(vec_mule((vec_ushort8)(a), (vec_ushort8)(b))));
  978. }
  979. /* Multiply Odd
  980. */
  981. static __inline qword si_mpy(qword a, qword b)
  982. {
  983. return ((qword)(vec_mulo((vec_short8)(a), (vec_short8)(b))));
  984. }
  985. static __inline qword si_mpyu(qword a, qword b)
  986. {
  987. return ((qword)(vec_mulo((vec_ushort8)(a), (vec_ushort8)(b))));
  988. }
  989. static __inline qword si_mpyi(qword a, short b)
  990. {
  991. return ((qword)(vec_mulo((vec_short8)(a),
  992. vec_splat((vec_short8)(si_from_short(b)), 1))));
  993. }
  994. static __inline qword si_mpyui(qword a, unsigned short b)
  995. {
  996. return ((qword)(vec_mulo((vec_ushort8)(a),
  997. vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
  998. }
  999. /* Multiply and Shift Right
  1000. */
  1001. static __inline qword si_mpys(qword a, qword b)
  1002. {
  1003. return ((qword)(vec_sra(vec_mulo((vec_short8)(a), (vec_short8)(b)), ((vec_uint4){16,16,16,16}))));
  1004. }
  1005. /* Nand
  1006. */
  1007. static __inline qword si_nand(qword a, qword b)
  1008. {
  1009. vec_uchar16 d;
  1010. d = vec_and((vec_uchar16)(a), (vec_uchar16)(b));
  1011. return ((qword)(vec_nor(d, d)));
  1012. }
  1013. /* Negative Multiply Add
  1014. */
  1015. static __inline qword si_dfnma(qword a, qword b, qword c)
  1016. {
  1017. union {
  1018. vec_double2 v;
  1019. double d[2];
  1020. } aa, bb, cc, dd;
  1021. aa.v = (vec_double2)(a);
  1022. bb.v = (vec_double2)(b);
  1023. cc.v = (vec_double2)(c);
  1024. dd.d[0] = -cc.d[0] - aa.d[0] * bb.d[0];
  1025. dd.d[1] = -cc.d[1] - aa.d[1] * bb.d[1];
  1026. return ((qword)(dd.v));
  1027. }
  1028. /* Negative Multiply and Subtract
  1029. */
  1030. static __inline qword si_fnms(qword a, qword b, qword c)
  1031. {
  1032. return ((qword)(vec_nmsub((vec_float4)(a), (vec_float4)(b), (vec_float4)(c))));
  1033. }
  1034. static __inline qword si_dfnms(qword a, qword b, qword c)
  1035. {
  1036. union {
  1037. vec_double2 v;
  1038. double d[2];
  1039. } aa, bb, cc, dd;
  1040. aa.v = (vec_double2)(a);
  1041. bb.v = (vec_double2)(b);
  1042. cc.v = (vec_double2)(c);
  1043. dd.d[0] = cc.d[0] - aa.d[0] * bb.d[0];
  1044. dd.d[1] = cc.d[1] - aa.d[1] * bb.d[1];
  1045. return ((qword)(dd.v));
  1046. }
  1047. /* Nor
  1048. */
  1049. static __inline qword si_nor(qword a, qword b)
  1050. {
  1051. return ((qword)(vec_nor((vec_uchar16)(a), (vec_uchar16)(b))));
  1052. }
  1053. /* Or
  1054. */
  1055. static __inline qword si_or(qword a, qword b)
  1056. {
  1057. return ((qword)(vec_or((vec_uchar16)(a), (vec_uchar16)(b))));
  1058. }
  1059. static __inline qword si_orbi(qword a, unsigned char b)
  1060. {
  1061. return ((qword)(vec_or((vec_uchar16)(a),
  1062. vec_splat((vec_uchar16)(si_from_uchar(b)), 3))));
  1063. }
  1064. static __inline qword si_orhi(qword a, unsigned short b)
  1065. {
  1066. return ((qword)(vec_or((vec_ushort8)(a),
  1067. vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
  1068. }
  1069. static __inline qword si_ori(qword a, unsigned int b)
  1070. {
  1071. return ((qword)(vec_or((vec_uint4)(a),
  1072. vec_splat((vec_uint4)(si_from_uint(b)), 0))));
  1073. }
  1074. /* Or Complement
  1075. */
  1076. static __inline qword si_orc(qword a, qword b)
  1077. {
  1078. return ((qword)(vec_or((vec_uchar16)(a), vec_nor((vec_uchar16)(b), (vec_uchar16)(b)))));
  1079. }
  1080. /* Or Across
  1081. */
  1082. static __inline qword si_orx(qword a)
  1083. {
  1084. vec_uchar16 tmp;
  1085. tmp = (vec_uchar16)(a);
  1086. tmp = vec_or(tmp, vec_sld(tmp, tmp, 8));
  1087. tmp = vec_or(tmp, vec_sld(tmp, tmp, 4));
  1088. return ((qword)(vec_and(tmp, ((vec_uchar16){0xFF,0xFF,0xFF,0xFF, 0x00,0x00,0x00,0x00,
  1089. 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00}))));
  1090. }
  1091. /* Estimates
  1092. */
  1093. static __inline qword si_frest(qword a)
  1094. {
  1095. return ((qword)(vec_re((vec_float4)(a))));
  1096. }
  1097. static __inline qword si_frsqest(qword a)
  1098. {
  1099. return ((qword)(vec_rsqrte((vec_float4)(a))));
  1100. }
  1101. #define si_fi(_a, _d) (_d)
  1102. /* Channel Read and Write
  1103. */
  1104. #define si_rdch(_channel) ((qword)(vec_splat_u8(0))) /* not mappable */
  1105. #define si_rchcnt(_channel) ((qword)(vec_splat_u8(0))) /* not mappable */
  1106. #define si_wrch(_channel, _a) /* not mappable */
  1107. /* Rotate Left
  1108. */
  1109. static __inline qword si_roth(qword a, qword b)
  1110. {
  1111. return ((qword)(vec_rl((vec_ushort8)(a), (vec_ushort8)(b))));
  1112. }
  1113. static __inline qword si_rot(qword a, qword b)
  1114. {
  1115. return ((qword)(vec_rl((vec_uint4)(a), (vec_uint4)(b))));
  1116. }
  1117. static __inline qword si_rothi(qword a, int b)
  1118. {
  1119. return ((qword)(vec_rl((vec_ushort8)(a),
  1120. vec_splat((vec_ushort8)(si_from_int(b)), 1))));
  1121. }
  1122. static __inline qword si_roti(qword a, int b)
  1123. {
  1124. return ((qword)(vec_rl((vec_uint4)(a),
  1125. vec_splat((vec_uint4)(si_from_int(b)), 0))));
  1126. }
  1127. /* Rotate Left with Mask
  1128. */
  1129. static __inline qword si_rothm(qword a, qword b)
  1130. {
  1131. vec_ushort8 neg_b;
  1132. vec_ushort8 mask;
  1133. neg_b = (vec_ushort8)vec_sub(vec_splat_s16(0), (vec_short8)(b));
  1134. mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
  1135. return ((qword)(vec_andc(vec_sr((vec_ushort8)(a), neg_b), mask)));
  1136. }
  1137. static __inline qword si_rotm(qword a, qword b)
  1138. {
  1139. vec_uint4 neg_b;
  1140. vec_uint4 mask;
  1141. neg_b = (vec_uint4)vec_sub(vec_splat_s32(0), (vec_int4)(b));
  1142. mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
  1143. return ((qword)(vec_andc(vec_sr((vec_uint4)(a), neg_b), mask)));
  1144. }
  1145. static __inline qword si_rothmi(qword a, int b)
  1146. {
  1147. vec_ushort8 neg_b;
  1148. vec_ushort8 mask;
  1149. neg_b = vec_splat((vec_ushort8)(si_from_int(-b)), 1);
  1150. mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
  1151. return ((qword)(vec_andc(vec_sr((vec_ushort8)(a), neg_b), mask)));
  1152. }
  1153. static __inline qword si_rotmi(qword a, int b)
  1154. {
  1155. vec_uint4 neg_b;
  1156. vec_uint4 mask;
  1157. neg_b = vec_splat((vec_uint4)(si_from_int(-b)), 0);
  1158. mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
  1159. return ((qword)(vec_andc(vec_sr((vec_uint4)(a), neg_b), mask)));
  1160. }
  1161. /* Rotate Left Algebraic with Mask
  1162. */
  1163. static __inline qword si_rotmah(qword a, qword b)
  1164. {
  1165. vec_ushort8 neg_b;
  1166. vec_ushort8 mask;
  1167. neg_b = (vec_ushort8)vec_sub(vec_splat_s16(0), (vec_short8)(b));
  1168. mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
  1169. return ((qword)(vec_sra((vec_short8)(a), (vec_ushort8)vec_or(neg_b, mask))));
  1170. }
  1171. static __inline qword si_rotma(qword a, qword b)
  1172. {
  1173. vec_uint4 neg_b;
  1174. vec_uint4 mask;
  1175. neg_b = (vec_uint4)vec_sub(vec_splat_s32(0), (vec_int4)(b));
  1176. mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
  1177. return ((qword)(vec_sra((vec_int4)(a), (vec_uint4)vec_or(neg_b, mask))));
  1178. }
  1179. static __inline qword si_rotmahi(qword a, int b)
  1180. {
  1181. vec_ushort8 neg_b;
  1182. vec_ushort8 mask;
  1183. neg_b = vec_splat((vec_ushort8)(si_from_int(-b)), 1);
  1184. mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
  1185. return ((qword)(vec_sra((vec_short8)(a), (vec_ushort8)vec_or(neg_b, mask))));
  1186. }
  1187. static __inline qword si_rotmai(qword a, int b)
  1188. {
  1189. vec_uint4 neg_b;
  1190. vec_uint4 mask;
  1191. neg_b = vec_splat((vec_uint4)(si_from_int(-b)), 0);
  1192. mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
  1193. return ((qword)(vec_sra((vec_int4)(a), (vec_uint4)vec_or(neg_b, mask))));
  1194. }
  1195. /* Rotate Left Quadword by Bytes with Mask
  1196. */
  1197. static __inline qword si_rotqmbyi(qword a, int count)
  1198. {
  1199. union {
  1200. vec_uchar16 v;
  1201. int i[4];
  1202. } x;
  1203. vec_uchar16 mask;
  1204. count = 0 - count;
  1205. x.i[3] = count << 3;
  1206. mask = (count & 0x10) ? vec_splat_u8(0) : vec_splat_u8(-1);
  1207. return ((qword)(vec_and(vec_sro((vec_uchar16)(a), x.v), mask)));
  1208. }
  1209. static __inline qword si_rotqmby(qword a, qword count)
  1210. {
  1211. union {
  1212. vec_uchar16 v;
  1213. int i[4];
  1214. } x;
  1215. int cnt;
  1216. vec_uchar16 mask;
  1217. x.v = (vec_uchar16)(count);
  1218. x.i[0] = cnt = (0 - x.i[0]) << 3;
  1219. x.v = vec_splat(x.v, 3);
  1220. mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
  1221. return ((qword)(vec_and(vec_sro((vec_uchar16)(a), x.v), mask)));
  1222. }
  1223. /* Rotate Left Quadword by Bytes
  1224. */
  1225. static __inline qword si_rotqbyi(qword a, int count)
  1226. {
  1227. union {
  1228. vec_uchar16 v;
  1229. int i[4];
  1230. } left, right;
  1231. count <<= 3;
  1232. left.i[3] = count;
  1233. right.i[3] = 0 - count;
  1234. return ((qword)(vec_or(vec_slo((vec_uchar16)(a), left.v), vec_sro((vec_uchar16)(a), right.v))));
  1235. }
  1236. static __inline qword si_rotqby(qword a, qword count)
  1237. {
  1238. vec_uchar16 left, right;
  1239. left = vec_sl(vec_splat((vec_uchar16)(count), 3), vec_splat_u8(3));
  1240. right = vec_sub(vec_splat_u8(0), left);
  1241. return ((qword)(vec_or(vec_slo((vec_uchar16)(a), left), vec_sro((vec_uchar16)(a), right))));
  1242. }
  1243. /* Rotate Left Quadword by Bytes Bit Count
  1244. */
  1245. static __inline qword si_rotqbybi(qword a, qword count)
  1246. {
  1247. vec_uchar16 left, right;
  1248. left = vec_splat((vec_uchar16)(count), 3);
  1249. right = vec_sub(vec_splat_u8(7), left);
  1250. return ((qword)(vec_or(vec_slo((vec_uchar16)(a), left), vec_sro((vec_uchar16)(a), right))));
  1251. }
  1252. /* Rotate Left Quadword by Bytes Bit Count
  1253. */
  1254. static __inline qword si_rotqbii(qword a, int count)
  1255. {
  1256. vec_uchar16 x, y;
  1257. vec_uchar16 result;
  1258. x = vec_splat((vec_uchar16)(si_from_int(count & 7)), 3);
  1259. y = (vec_uchar16)(vec_sr((vec_uint4)vec_sro((vec_uchar16)(a), ((vec_uchar16)((vec_uint4){0,0,0,120}))),
  1260. (vec_uint4)vec_sub(vec_splat_u8(8), x)));
  1261. result = vec_or(vec_sll((qword)(a), x), y);
  1262. return ((qword)(result));
  1263. }
  1264. static __inline qword si_rotqbi(qword a, qword count)
  1265. {
  1266. vec_uchar16 x, y;
  1267. vec_uchar16 result;
  1268. x = vec_and(vec_splat((vec_uchar16)(count), 3), vec_splat_u8(7));
  1269. y = (vec_uchar16)(vec_sr((vec_uint4)vec_sro((vec_uchar16)(a), ((vec_uchar16)((vec_uint4){0,0,0,120}))),
  1270. (vec_uint4)vec_sub(vec_splat_u8(8), x)));
  1271. result = vec_or(vec_sll((qword)(a), x), y);
  1272. return ((qword)(result));
  1273. }
  1274. /* Rotate Left Quadword and Mask by Bits
  1275. */
  1276. static __inline qword si_rotqmbii(qword a, int count)
  1277. {
  1278. return ((qword)(vec_srl((vec_uchar16)(a), vec_splat((vec_uchar16)(si_from_int(0 - count)), 3))));
  1279. }
  1280. static __inline qword si_rotqmbi(qword a, qword count)
  1281. {
  1282. return ((qword)(vec_srl((vec_uchar16)(a), vec_sub(vec_splat_u8(0), vec_splat((vec_uchar16)(count), 3)))));
  1283. }
  1284. /* Rotate Left Quadword and Mask by Bytes with Bit Count
  1285. */
  1286. static __inline qword si_rotqmbybi(qword a, qword count)
  1287. {
  1288. union {
  1289. vec_uchar16 v;
  1290. int i[4];
  1291. } x;
  1292. int cnt;
  1293. vec_uchar16 mask;
  1294. x.v = (vec_uchar16)(count);
  1295. x.i[0] = cnt = 0 - (x.i[0] & ~7);
  1296. x.v = vec_splat(x.v, 3);
  1297. mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
  1298. return ((qword)(vec_and(vec_sro((vec_uchar16)(a), x.v), mask)));
  1299. }
  1300. /* Round Double to Float
  1301. */
  1302. static __inline qword si_frds(qword a)
  1303. {
  1304. union {
  1305. vec_float4 v;
  1306. float f[4];
  1307. } d;
  1308. union {
  1309. vec_double2 v;
  1310. double d[2];
  1311. } in;
  1312. in.v = (vec_double2)(a);
  1313. d.v = (vec_float4){0.0f};
  1314. d.f[0] = (float)in.d[0];
  1315. d.f[2] = (float)in.d[1];
  1316. return ((qword)(d.v));
  1317. }
  1318. /* Select Bits
  1319. */
  1320. static __inline qword si_selb(qword a, qword b, qword c)
  1321. {
  1322. return ((qword)(vec_sel((vec_uchar16)(a), (vec_uchar16)(b), (vec_uchar16)(c))));
  1323. }
  1324. /* Shuffle Bytes
  1325. */
  1326. static __inline qword si_shufb(qword a, qword b, qword pattern)
  1327. {
  1328. vec_uchar16 pat;
  1329. pat = vec_sel(((vec_uchar16){0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15}),
  1330. vec_sr((vec_uchar16)(pattern), vec_splat_u8(3)),
  1331. vec_sra((vec_uchar16)(pattern), vec_splat_u8(7)));
  1332. return ((qword)(vec_perm(vec_perm(a, b, pattern),
  1333. ((vec_uchar16){0, 0, 0, 0, 0, 0, 0, 0,
  1334. 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80}),
  1335. pat)));
  1336. }
  1337. /* Shift Left
  1338. */
  1339. static __inline qword si_shlh(qword a, qword b)
  1340. {
  1341. vec_ushort8 mask;
  1342. mask = (vec_ushort8)vec_sra(vec_sl((vec_ushort8)(b), vec_splat_u16(11)), vec_splat_u16(15));
  1343. return ((qword)(vec_andc(vec_sl((vec_ushort8)(a), (vec_ushort8)(b)), mask)));
  1344. }
  1345. static __inline qword si_shl(qword a, qword b)
  1346. {
  1347. vec_uint4 mask;
  1348. mask = (vec_uint4)vec_sra(vec_sl((vec_uint4)(b), ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
  1349. return ((qword)(vec_andc(vec_sl((vec_uint4)(a), (vec_uint4)(b)), mask)));
  1350. }
  1351. static __inline qword si_shlhi(qword a, unsigned int b)
  1352. {
  1353. vec_ushort8 mask;
  1354. vec_ushort8 bv;
  1355. bv = vec_splat((vec_ushort8)(si_from_int(b)), 1);
  1356. mask = (vec_ushort8)vec_sra(vec_sl(bv, vec_splat_u16(11)), vec_splat_u16(15));
  1357. return ((qword)(vec_andc(vec_sl((vec_ushort8)(a), bv), mask)));
  1358. }
  1359. static __inline qword si_shli(qword a, unsigned int b)
  1360. {
  1361. vec_uint4 bv;
  1362. vec_uint4 mask;
  1363. bv = vec_splat((vec_uint4)(si_from_uint(b)), 0);
  1364. mask = (vec_uint4)vec_sra(vec_sl(bv, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
  1365. return ((qword)(vec_andc(vec_sl((vec_uint4)(a), bv), mask)));
  1366. }
  1367. /* Shift Left Quadword
  1368. */
  1369. static __inline qword si_shlqbii(qword a, unsigned int count)
  1370. {
  1371. vec_uchar16 x;
  1372. x = vec_splat((vec_uchar16)(si_from_uint(count)), 3);
  1373. return ((qword)(vec_sll((vec_uchar16)(a), x)));
  1374. }
  1375. static __inline qword si_shlqbi(qword a, qword count)
  1376. {
  1377. vec_uchar16 x;
  1378. x = vec_splat((vec_uchar16)(count), 3);
  1379. return ((qword)(vec_sll((vec_uchar16)(a), x)));
  1380. }
  1381. /* Shift Left Quadword by Bytes
  1382. */
  1383. static __inline qword si_shlqbyi(qword a, unsigned int count)
  1384. {
  1385. union {
  1386. vec_uchar16 v;
  1387. int i[4];
  1388. } x;
  1389. vec_uchar16 mask;
  1390. x.i[3] = count << 3;
  1391. mask = (count & 0x10) ? vec_splat_u8(0) : vec_splat_u8(-1);
  1392. return ((qword)(vec_and(vec_slo((vec_uchar16)(a), x.v), mask)));
  1393. }
  1394. static __inline qword si_shlqby(qword a, qword count)
  1395. {
  1396. union {
  1397. vec_uchar16 v;
  1398. unsigned int i[4];
  1399. } x;
  1400. unsigned int cnt;
  1401. vec_uchar16 mask;
  1402. x.v = vec_sl(vec_splat((vec_uchar16)(count), 3), vec_splat_u8(3));
  1403. cnt = x.i[0];
  1404. mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
  1405. return ((qword)(vec_and(vec_slo((vec_uchar16)(a), x.v), mask)));
  1406. }
  1407. /* Shift Left Quadword by Bytes with Bit Count
  1408. */
  1409. static __inline qword si_shlqbybi(qword a, qword count)
  1410. {
  1411. union {
  1412. vec_uchar16 v;
  1413. int i[4];
  1414. } x;
  1415. unsigned int cnt;
  1416. vec_uchar16 mask;
  1417. x.v = vec_splat((vec_uchar16)(count), 3);
  1418. cnt = x.i[0];
  1419. mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
  1420. return ((qword)(vec_and(vec_slo((vec_uchar16)(a), x.v), mask)));
  1421. }
  1422. /* Stop and Signal
  1423. */
  1424. #define si_stop(_type) SPU_STOP_ACTION
  1425. #define si_stopd(a, b, c) SPU_STOP_ACTION
  1426. /* Subtract
  1427. */
  1428. static __inline qword si_sfh(qword a, qword b)
  1429. {
  1430. return ((qword)(vec_sub((vec_ushort8)(b), (vec_ushort8)(a))));
  1431. }
  1432. static __inline qword si_sf(qword a, qword b)
  1433. {
  1434. return ((qword)(vec_sub((vec_uint4)(b), (vec_uint4)(a))));
  1435. }
  1436. static __inline qword si_fs(qword a, qword b)
  1437. {
  1438. return ((qword)(vec_sub((vec_float4)(a), (vec_float4)(b))));
  1439. }
  1440. static __inline qword si_dfs(qword a, qword b)
  1441. {
  1442. union {
  1443. vec_double2 v;
  1444. double d[2];
  1445. } aa, bb, dd;
  1446. aa.v = (vec_double2)(a);
  1447. bb.v = (vec_double2)(b);
  1448. dd.d[0] = aa.d[0] - bb.d[0];
  1449. dd.d[1] = aa.d[1] - bb.d[1];
  1450. return ((qword)(dd.v));
  1451. }
  1452. static __inline qword si_sfhi(qword a, short b)
  1453. {
  1454. return ((qword)(vec_sub(vec_splat((vec_short8)(si_from_short(b)), 1),
  1455. (vec_short8)(a))));
  1456. }
  1457. static __inline qword si_sfi(qword a, int b)
  1458. {
  1459. return ((qword)(vec_sub(vec_splat((vec_int4)(si_from_int(b)), 0),
  1460. (vec_int4)(a))));
  1461. }
  1462. /* Subtract word extended
  1463. */
  1464. #define si_sfx(_a, _b, _c) ((qword)(vec_add(vec_add((vec_uint4)(_b), \
  1465. vec_nor((vec_uint4)(_a), (vec_uint4)(_a))), \
  1466. vec_and((vec_uint4)(_c), vec_splat_u32(1)))))
  1467. /* Sum Bytes into Shorts
  1468. */
  1469. static __inline qword si_sumb(qword a, qword b)
  1470. {
  1471. vec_uint4 zero = (vec_uint4){0};
  1472. vec_ushort8 sum_a, sum_b;
  1473. sum_a = (vec_ushort8)vec_sum4s((vec_uchar16)(a), zero);
  1474. sum_b = (vec_ushort8)vec_sum4s((vec_uchar16)(b), zero);
  1475. return ((qword)(vec_perm(sum_a, sum_b, ((vec_uchar16){18, 19, 2, 3, 22, 23, 6, 7,
  1476. 26, 27, 10, 11, 30, 31, 14, 15}))));
  1477. }
  1478. /* Exclusive OR
  1479. */
  1480. static __inline qword si_xor(qword a, qword b)
  1481. {
  1482. return ((qword)(vec_xor((vec_uchar16)(a), (vec_uchar16)(b))));
  1483. }
  1484. static __inline qword si_xorbi(qword a, unsigned char b)
  1485. {
  1486. return ((qword)(vec_xor((vec_uchar16)(a),
  1487. vec_splat((vec_uchar16)(si_from_uchar(b)), 3))));
  1488. }
  1489. static __inline qword si_xorhi(qword a, unsigned short b)
  1490. {
  1491. return ((qword)(vec_xor((vec_ushort8)(a),
  1492. vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
  1493. }
  1494. static __inline qword si_xori(qword a, unsigned int b)
  1495. {
  1496. return ((qword)(vec_xor((vec_uint4)(a),
  1497. vec_splat((vec_uint4)(si_from_uint(b)), 0))));
  1498. }
  1499. /* Generate Controls for Sub-Quadword Insertion
  1500. */
  1501. static __inline qword si_cbd(qword a, int imm)
  1502. {
  1503. union {
  1504. vec_uint4 v;
  1505. unsigned char c[16];
  1506. } shmask;
  1507. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1508. shmask.c[(si_to_uint(a) + (unsigned int)(imm)) & 0xF] = 0x03;
  1509. return ((qword)(shmask.v));
  1510. }
  1511. static __inline qword si_cdd(qword a, int imm)
  1512. {
  1513. union {
  1514. vec_uint4 v;
  1515. unsigned long long ll[2];
  1516. } shmask;
  1517. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1518. shmask.ll[((si_to_uint(a) + (unsigned int)(imm)) >> 3) & 0x1] = 0x0001020304050607ULL;
  1519. return ((qword)(shmask.v));
  1520. }
  1521. static __inline qword si_chd(qword a, int imm)
  1522. {
  1523. union {
  1524. vec_uint4 v;
  1525. unsigned short s[8];
  1526. } shmask;
  1527. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1528. shmask.s[((si_to_uint(a) + (unsigned int)(imm)) >> 1) & 0x7] = 0x0203;
  1529. return ((qword)(shmask.v));
  1530. }
  1531. static __inline qword si_cwd(qword a, int imm)
  1532. {
  1533. union {
  1534. vec_uint4 v;
  1535. unsigned int i[4];
  1536. } shmask;
  1537. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1538. shmask.i[((si_to_uint(a) + (unsigned int)(imm)) >> 2) & 0x3] = 0x00010203;
  1539. return ((qword)(shmask.v));
  1540. }
  1541. static __inline qword si_cbx(qword a, qword b)
  1542. {
  1543. union {
  1544. vec_uint4 v;
  1545. unsigned char c[16];
  1546. } shmask;
  1547. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1548. shmask.c[si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) & 0xF] = 0x03;
  1549. return ((qword)(shmask.v));
  1550. }
  1551. static __inline qword si_cdx(qword a, qword b)
  1552. {
  1553. union {
  1554. vec_uint4 v;
  1555. unsigned long long ll[2];
  1556. } shmask;
  1557. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1558. shmask.ll[(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) >> 3) & 0x1] = 0x0001020304050607ULL;
  1559. return ((qword)(shmask.v));
  1560. }
  1561. static __inline qword si_chx(qword a, qword b)
  1562. {
  1563. union {
  1564. vec_uint4 v;
  1565. unsigned short s[8];
  1566. } shmask;
  1567. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1568. shmask.s[(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) >> 1) & 0x7] = 0x0203;
  1569. return ((qword)(shmask.v));
  1570. }
  1571. static __inline qword si_cwx(qword a, qword b)
  1572. {
  1573. union {
  1574. vec_uint4 v;
  1575. unsigned int i[4];
  1576. } shmask;
  1577. shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
  1578. shmask.i[(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) >> 2) & 0x3] = 0x00010203;
  1579. return ((qword)(shmask.v));
  1580. }
  1581. /* Constant Formation
  1582. */
  1583. static __inline qword si_il(signed short imm)
  1584. {
  1585. return ((qword)(vec_splat((vec_int4)(si_from_int((signed int)(imm))), 0)));
  1586. }
  1587. static __inline qword si_ila(unsigned int imm)
  1588. {
  1589. return ((qword)(vec_splat((vec_uint4)(si_from_uint(imm)), 0)));
  1590. }
  1591. static __inline qword si_ilh(signed short imm)
  1592. {
  1593. return ((qword)(vec_splat((vec_short8)(si_from_short(imm)), 1)));
  1594. }
  1595. static __inline qword si_ilhu(signed short imm)
  1596. {
  1597. return ((qword)(vec_splat((vec_uint4)(si_from_uint((unsigned int)(imm) << 16)), 0)));
  1598. }
  1599. static __inline qword si_iohl(qword a, unsigned short imm)
  1600. {
  1601. return ((qword)(vec_or((vec_uint4)(a), vec_splat((vec_uint4)(si_from_uint((unsigned int)(imm))), 0))));
  1602. }
  1603. /* No Operation
  1604. */
  1605. #define si_lnop() /* do nothing */
  1606. #define si_nop() /* do nothing */
  1607. /* Memory Load and Store
  1608. */
  1609. static __inline qword si_lqa(unsigned int imm)
  1610. {
  1611. return ((qword)(vec_ld(0, (vector unsigned char *)(imm))));
  1612. }
  1613. static __inline qword si_lqd(qword a, unsigned int imm)
  1614. {
  1615. return ((qword)(vec_ld(si_to_uint(a) & ~0xF, (vector unsigned char *)(imm))));
  1616. }
  1617. static __inline qword si_lqr(unsigned int imm)
  1618. {
  1619. return ((qword)(vec_ld(0, (vector unsigned char *)(imm))));
  1620. }
  1621. static __inline qword si_lqx(qword a, qword b)
  1622. {
  1623. return ((qword)(vec_ld(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))), (vector unsigned char *)(0))));
  1624. }
  1625. static __inline void si_stqa(qword a, unsigned int imm)
  1626. {
  1627. vec_st((vec_uchar16)(a), 0, (vector unsigned char *)(imm));
  1628. }
  1629. static __inline void si_stqd(qword a, qword b, unsigned int imm)
  1630. {
  1631. vec_st((vec_uchar16)(a), si_to_uint(b) & ~0xF, (vector unsigned char *)(imm));
  1632. }
  1633. static __inline void si_stqr(qword a, unsigned int imm)
  1634. {
  1635. vec_st((vec_uchar16)(a), 0, (vector unsigned char *)(imm));
  1636. }
  1637. static __inline void si_stqx(qword a, qword b, qword c)
  1638. {
  1639. vec_st((vec_uchar16)(a),
  1640. si_to_uint((qword)(vec_add((vec_uint4)(b), (vec_uint4)(c)))),
  1641. (vector unsigned char *)(0));
  1642. }
  1643. #endif /* !__SPU__ */
  1644. #endif /* !_SI2VMX_H_ */