msa_macro.h 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. // Copyright 2016 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // MSA common macros
  11. //
  12. // Author(s): Prashant Patil (prashant.patil@imgtec.com)
  13. #ifndef WEBP_DSP_MSA_MACRO_H_
  14. #define WEBP_DSP_MSA_MACRO_H_
  15. #include <stdint.h>
  16. #include <msa.h>
  17. #if defined(__clang__)
  18. #define CLANG_BUILD
  19. #endif
  20. #ifdef CLANG_BUILD
  21. #define ALPHAVAL (-1)
  22. #define ADDVI_H(a, b) __msa_addvi_h((v8i16)a, b)
  23. #define ADDVI_W(a, b) __msa_addvi_w((v4i32)a, b)
  24. #define SRAI_B(a, b) __msa_srai_b((v16i8)a, b)
  25. #define SRAI_H(a, b) __msa_srai_h((v8i16)a, b)
  26. #define SRAI_W(a, b) __msa_srai_w((v4i32)a, b)
  27. #define SRLI_H(a, b) __msa_srli_h((v8i16)a, b)
  28. #define SLLI_B(a, b) __msa_slli_b((v4i32)a, b)
  29. #define ANDI_B(a, b) __msa_andi_b((v16u8)a, b)
  30. #define ORI_B(a, b) __msa_ori_b((v16u8)a, b)
  31. #else
  32. #define ALPHAVAL (0xff)
  33. #define ADDVI_H(a, b) (a + b)
  34. #define ADDVI_W(a, b) (a + b)
  35. #define SRAI_B(a, b) (a >> b)
  36. #define SRAI_H(a, b) (a >> b)
  37. #define SRAI_W(a, b) (a >> b)
  38. #define SRLI_H(a, b) (a << b)
  39. #define SLLI_B(a, b) (a << b)
  40. #define ANDI_B(a, b) (a & b)
  41. #define ORI_B(a, b) (a | b)
  42. #endif
  43. #define LD_B(RTYPE, psrc) *((RTYPE*)(psrc))
  44. #define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
  45. #define LD_SB(...) LD_B(v16i8, __VA_ARGS__)
  46. #define LD_H(RTYPE, psrc) *((RTYPE*)(psrc))
  47. #define LD_UH(...) LD_H(v8u16, __VA_ARGS__)
  48. #define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
  49. #define LD_W(RTYPE, psrc) *((RTYPE*)(psrc))
  50. #define LD_UW(...) LD_W(v4u32, __VA_ARGS__)
  51. #define LD_SW(...) LD_W(v4i32, __VA_ARGS__)
  52. #define ST_B(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
  53. #define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
  54. #define ST_SB(...) ST_B(v16i8, __VA_ARGS__)
  55. #define ST_H(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
  56. #define ST_UH(...) ST_H(v8u16, __VA_ARGS__)
  57. #define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
  58. #define ST_W(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
  59. #define ST_UW(...) ST_W(v4u32, __VA_ARGS__)
  60. #define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
  61. #define MSA_LOAD_FUNC(TYPE, INSTR, FUNC_NAME) \
  62. static inline TYPE FUNC_NAME(const void* const psrc) { \
  63. const uint8_t* const psrc_m = (const uint8_t*)psrc; \
  64. TYPE val_m; \
  65. asm volatile ( \
  66. "" #INSTR " %[val_m], %[psrc_m] \n\t" \
  67. : [val_m] "=r" (val_m) \
  68. : [psrc_m] "m" (*psrc_m)); \
  69. return val_m; \
  70. }
  71. #define MSA_LOAD(psrc, FUNC_NAME) FUNC_NAME(psrc)
  72. #define MSA_STORE_FUNC(TYPE, INSTR, FUNC_NAME) \
  73. static inline void FUNC_NAME(TYPE val, void* const pdst) { \
  74. uint8_t* const pdst_m = (uint8_t*)pdst; \
  75. TYPE val_m = val; \
  76. asm volatile ( \
  77. " " #INSTR " %[val_m], %[pdst_m] \n\t" \
  78. : [pdst_m] "=m" (*pdst_m) \
  79. : [val_m] "r" (val_m)); \
  80. }
  81. #define MSA_STORE(val, pdst, FUNC_NAME) FUNC_NAME(val, pdst)
  82. #if (__mips_isa_rev >= 6)
  83. MSA_LOAD_FUNC(uint16_t, lh, msa_lh);
  84. #define LH(psrc) MSA_LOAD(psrc, msa_lh)
  85. MSA_LOAD_FUNC(uint32_t, lw, msa_lw);
  86. #define LW(psrc) MSA_LOAD(psrc, msa_lw)
  87. #if (__mips == 64)
  88. MSA_LOAD_FUNC(uint64_t, ld, msa_ld);
  89. #define LD(psrc) MSA_LOAD(psrc, msa_ld)
  90. #else // !(__mips == 64)
  91. #define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_lw)) << 32) | \
  92. MSA_LOAD(psrc, msa_lw))
  93. #endif // (__mips == 64)
  94. MSA_STORE_FUNC(uint16_t, sh, msa_sh);
  95. #define SH(val, pdst) MSA_STORE(val, pdst, msa_sh)
  96. MSA_STORE_FUNC(uint32_t, sw, msa_sw);
  97. #define SW(val, pdst) MSA_STORE(val, pdst, msa_sw)
  98. MSA_STORE_FUNC(uint64_t, sd, msa_sd);
  99. #define SD(val, pdst) MSA_STORE(val, pdst, msa_sd)
  100. #else // !(__mips_isa_rev >= 6)
  101. MSA_LOAD_FUNC(uint16_t, ulh, msa_ulh);
  102. #define LH(psrc) MSA_LOAD(psrc, msa_ulh)
  103. MSA_LOAD_FUNC(uint32_t, ulw, msa_ulw);
  104. #define LW(psrc) MSA_LOAD(psrc, msa_ulw)
  105. #if (__mips == 64)
  106. MSA_LOAD_FUNC(uint64_t, uld, msa_uld);
  107. #define LD(psrc) MSA_LOAD(psrc, msa_uld)
  108. #else // !(__mips == 64)
  109. #define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_ulw)) << 32) | \
  110. MSA_LOAD(psrc, msa_ulw))
  111. #endif // (__mips == 64)
  112. MSA_STORE_FUNC(uint16_t, ush, msa_ush);
  113. #define SH(val, pdst) MSA_STORE(val, pdst, msa_ush)
  114. MSA_STORE_FUNC(uint32_t, usw, msa_usw);
  115. #define SW(val, pdst) MSA_STORE(val, pdst, msa_usw)
  116. #define SD(val, pdst) do { \
  117. uint8_t* const pdst_sd_m = (uint8_t*)(pdst); \
  118. const uint32_t val0_m = (uint32_t)(val & 0x00000000FFFFFFFF); \
  119. const uint32_t val1_m = (uint32_t)((val >> 32) & 0x00000000FFFFFFFF); \
  120. SW(val0_m, pdst_sd_m); \
  121. SW(val1_m, pdst_sd_m + 4); \
  122. } while (0)
  123. #endif // (__mips_isa_rev >= 6)
  124. /* Description : Load 4 words with stride
  125. * Arguments : Inputs - psrc, stride
  126. * Outputs - out0, out1, out2, out3
  127. * Details : Load word in 'out0' from (psrc)
  128. * Load word in 'out1' from (psrc + stride)
  129. * Load word in 'out2' from (psrc + 2 * stride)
  130. * Load word in 'out3' from (psrc + 3 * stride)
  131. */
  132. #define LW4(psrc, stride, out0, out1, out2, out3) do { \
  133. const uint8_t* ptmp = (const uint8_t*)psrc; \
  134. out0 = LW(ptmp); \
  135. ptmp += stride; \
  136. out1 = LW(ptmp); \
  137. ptmp += stride; \
  138. out2 = LW(ptmp); \
  139. ptmp += stride; \
  140. out3 = LW(ptmp); \
  141. } while (0)
  142. /* Description : Store words with stride
  143. * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
  144. * Details : Store word from 'in0' to (pdst)
  145. * Store word from 'in1' to (pdst + stride)
  146. * Store word from 'in2' to (pdst + 2 * stride)
  147. * Store word from 'in3' to (pdst + 3 * stride)
  148. */
  149. #define SW4(in0, in1, in2, in3, pdst, stride) do { \
  150. uint8_t* ptmp = (uint8_t*)pdst; \
  151. SW(in0, ptmp); \
  152. ptmp += stride; \
  153. SW(in1, ptmp); \
  154. ptmp += stride; \
  155. SW(in2, ptmp); \
  156. ptmp += stride; \
  157. SW(in3, ptmp); \
  158. } while (0)
  159. #define SW3(in0, in1, in2, pdst, stride) do { \
  160. uint8_t* ptmp = (uint8_t*)pdst; \
  161. SW(in0, ptmp); \
  162. ptmp += stride; \
  163. SW(in1, ptmp); \
  164. ptmp += stride; \
  165. SW(in2, ptmp); \
  166. } while (0)
  167. #define SW2(in0, in1, pdst, stride) do { \
  168. uint8_t* ptmp = (uint8_t*)pdst; \
  169. SW(in0, ptmp); \
  170. ptmp += stride; \
  171. SW(in1, ptmp); \
  172. } while (0)
  173. /* Description : Store 4 double words with stride
  174. * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
  175. * Details : Store double word from 'in0' to (pdst)
  176. * Store double word from 'in1' to (pdst + stride)
  177. * Store double word from 'in2' to (pdst + 2 * stride)
  178. * Store double word from 'in3' to (pdst + 3 * stride)
  179. */
  180. #define SD4(in0, in1, in2, in3, pdst, stride) do { \
  181. uint8_t* ptmp = (uint8_t*)pdst; \
  182. SD(in0, ptmp); \
  183. ptmp += stride; \
  184. SD(in1, ptmp); \
  185. ptmp += stride; \
  186. SD(in2, ptmp); \
  187. ptmp += stride; \
  188. SD(in3, ptmp); \
  189. } while (0)
  190. /* Description : Load vectors with 16 byte elements with stride
  191. * Arguments : Inputs - psrc, stride
  192. * Outputs - out0, out1
  193. * Return Type - as per RTYPE
  194. * Details : Load 16 byte elements in 'out0' from (psrc)
  195. * Load 16 byte elements in 'out1' from (psrc + stride)
  196. */
  197. #define LD_B2(RTYPE, psrc, stride, out0, out1) do { \
  198. out0 = LD_B(RTYPE, psrc); \
  199. out1 = LD_B(RTYPE, psrc + stride); \
  200. } while (0)
  201. #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
  202. #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
  203. #define LD_B3(RTYPE, psrc, stride, out0, out1, out2) do { \
  204. LD_B2(RTYPE, psrc, stride, out0, out1); \
  205. out2 = LD_B(RTYPE, psrc + 2 * stride); \
  206. } while (0)
  207. #define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__)
  208. #define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__)
  209. #define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \
  210. LD_B2(RTYPE, psrc, stride, out0, out1); \
  211. LD_B2(RTYPE, psrc + 2 * stride , stride, out2, out3); \
  212. } while (0)
  213. #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
  214. #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
  215. #define LD_B8(RTYPE, psrc, stride, \
  216. out0, out1, out2, out3, out4, out5, out6, out7) do { \
  217. LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3); \
  218. LD_B4(RTYPE, psrc + 4 * stride, stride, out4, out5, out6, out7); \
  219. } while (0)
  220. #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__)
  221. #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
  222. /* Description : Load vectors with 8 halfword elements with stride
  223. * Arguments : Inputs - psrc, stride
  224. * Outputs - out0, out1
  225. * Details : Load 8 halfword elements in 'out0' from (psrc)
  226. * Load 8 halfword elements in 'out1' from (psrc + stride)
  227. */
  228. #define LD_H2(RTYPE, psrc, stride, out0, out1) do { \
  229. out0 = LD_H(RTYPE, psrc); \
  230. out1 = LD_H(RTYPE, psrc + stride); \
  231. } while (0)
  232. #define LD_UH2(...) LD_H2(v8u16, __VA_ARGS__)
  233. #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
  234. /* Description : Load vectors with 4 word elements with stride
  235. * Arguments : Inputs - psrc, stride
  236. * Outputs - out0, out1, out2, out3
  237. * Details : Load 4 word elements in 'out0' from (psrc + 0 * stride)
  238. * Load 4 word elements in 'out1' from (psrc + 1 * stride)
  239. * Load 4 word elements in 'out2' from (psrc + 2 * stride)
  240. * Load 4 word elements in 'out3' from (psrc + 3 * stride)
  241. */
  242. #define LD_W2(RTYPE, psrc, stride, out0, out1) do { \
  243. out0 = LD_W(RTYPE, psrc); \
  244. out1 = LD_W(RTYPE, psrc + stride); \
  245. } while (0)
  246. #define LD_UW2(...) LD_W2(v4u32, __VA_ARGS__)
  247. #define LD_SW2(...) LD_W2(v4i32, __VA_ARGS__)
  248. #define LD_W3(RTYPE, psrc, stride, out0, out1, out2) do { \
  249. LD_W2(RTYPE, psrc, stride, out0, out1); \
  250. out2 = LD_W(RTYPE, psrc + 2 * stride); \
  251. } while (0)
  252. #define LD_UW3(...) LD_W3(v4u32, __VA_ARGS__)
  253. #define LD_SW3(...) LD_W3(v4i32, __VA_ARGS__)
  254. #define LD_W4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \
  255. LD_W2(RTYPE, psrc, stride, out0, out1); \
  256. LD_W2(RTYPE, psrc + 2 * stride, stride, out2, out3); \
  257. } while (0)
  258. #define LD_UW4(...) LD_W4(v4u32, __VA_ARGS__)
  259. #define LD_SW4(...) LD_W4(v4i32, __VA_ARGS__)
  260. /* Description : Store vectors of 16 byte elements with stride
  261. * Arguments : Inputs - in0, in1, pdst, stride
  262. * Details : Store 16 byte elements from 'in0' to (pdst)
  263. * Store 16 byte elements from 'in1' to (pdst + stride)
  264. */
  265. #define ST_B2(RTYPE, in0, in1, pdst, stride) do { \
  266. ST_B(RTYPE, in0, pdst); \
  267. ST_B(RTYPE, in1, pdst + stride); \
  268. } while (0)
  269. #define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
  270. #define ST_SB2(...) ST_B2(v16i8, __VA_ARGS__)
  271. #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \
  272. ST_B2(RTYPE, in0, in1, pdst, stride); \
  273. ST_B2(RTYPE, in2, in3, pdst + 2 * stride, stride); \
  274. } while (0)
  275. #define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
  276. #define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__)
  277. #define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
  278. pdst, stride) do { \
  279. ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
  280. ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \
  281. } while (0)
  282. #define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__)
  283. /* Description : Store vectors of 4 word elements with stride
  284. * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
  285. * Details : Store 4 word elements from 'in0' to (pdst + 0 * stride)
  286. * Store 4 word elements from 'in1' to (pdst + 1 * stride)
  287. * Store 4 word elements from 'in2' to (pdst + 2 * stride)
  288. * Store 4 word elements from 'in3' to (pdst + 3 * stride)
  289. */
  290. #define ST_W2(RTYPE, in0, in1, pdst, stride) do { \
  291. ST_W(RTYPE, in0, pdst); \
  292. ST_W(RTYPE, in1, pdst + stride); \
  293. } while (0)
  294. #define ST_UW2(...) ST_W2(v4u32, __VA_ARGS__)
  295. #define ST_SW2(...) ST_W2(v4i32, __VA_ARGS__)
  296. #define ST_W3(RTYPE, in0, in1, in2, pdst, stride) do { \
  297. ST_W2(RTYPE, in0, in1, pdst, stride); \
  298. ST_W(RTYPE, in2, pdst + 2 * stride); \
  299. } while (0)
  300. #define ST_UW3(...) ST_W3(v4u32, __VA_ARGS__)
  301. #define ST_SW3(...) ST_W3(v4i32, __VA_ARGS__)
  302. #define ST_W4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \
  303. ST_W2(RTYPE, in0, in1, pdst, stride); \
  304. ST_W2(RTYPE, in2, in3, pdst + 2 * stride, stride); \
  305. } while (0)
  306. #define ST_UW4(...) ST_W4(v4u32, __VA_ARGS__)
  307. #define ST_SW4(...) ST_W4(v4i32, __VA_ARGS__)
  308. /* Description : Store vectors of 8 halfword elements with stride
  309. * Arguments : Inputs - in0, in1, pdst, stride
  310. * Details : Store 8 halfword elements from 'in0' to (pdst)
  311. * Store 8 halfword elements from 'in1' to (pdst + stride)
  312. */
  313. #define ST_H2(RTYPE, in0, in1, pdst, stride) do { \
  314. ST_H(RTYPE, in0, pdst); \
  315. ST_H(RTYPE, in1, pdst + stride); \
  316. } while (0)
  317. #define ST_UH2(...) ST_H2(v8u16, __VA_ARGS__)
  318. #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
  319. /* Description : Store 2x4 byte block to destination memory from input vector
  320. * Arguments : Inputs - in, stidx, pdst, stride
  321. * Details : Index 'stidx' halfword element from 'in' vector is copied to
  322. * the GP register and stored to (pdst)
  323. * Index 'stidx+1' halfword element from 'in' vector is copied to
  324. * the GP register and stored to (pdst + stride)
  325. * Index 'stidx+2' halfword element from 'in' vector is copied to
  326. * the GP register and stored to (pdst + 2 * stride)
  327. * Index 'stidx+3' halfword element from 'in' vector is copied to
  328. * the GP register and stored to (pdst + 3 * stride)
  329. */
  330. #define ST2x4_UB(in, stidx, pdst, stride) do { \
  331. uint8_t* pblk_2x4_m = (uint8_t*)pdst; \
  332. const uint16_t out0_m = __msa_copy_s_h((v8i16)in, stidx); \
  333. const uint16_t out1_m = __msa_copy_s_h((v8i16)in, stidx + 1); \
  334. const uint16_t out2_m = __msa_copy_s_h((v8i16)in, stidx + 2); \
  335. const uint16_t out3_m = __msa_copy_s_h((v8i16)in, stidx + 3); \
  336. SH(out0_m, pblk_2x4_m); \
  337. pblk_2x4_m += stride; \
  338. SH(out1_m, pblk_2x4_m); \
  339. pblk_2x4_m += stride; \
  340. SH(out2_m, pblk_2x4_m); \
  341. pblk_2x4_m += stride; \
  342. SH(out3_m, pblk_2x4_m); \
  343. } while (0)
  344. /* Description : Store 4x4 byte block to destination memory from input vector
  345. * Arguments : Inputs - in0, in1, pdst, stride
  346. * Details : 'Idx0' word element from input vector 'in0' is copied to the
  347. * GP register and stored to (pdst)
  348. * 'Idx1' word element from input vector 'in0' is copied to the
  349. * GP register and stored to (pdst + stride)
  350. * 'Idx2' word element from input vector 'in0' is copied to the
  351. * GP register and stored to (pdst + 2 * stride)
  352. * 'Idx3' word element from input vector 'in0' is copied to the
  353. * GP register and stored to (pdst + 3 * stride)
  354. */
  355. #define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) do { \
  356. uint8_t* const pblk_4x4_m = (uint8_t*)pdst; \
  357. const uint32_t out0_m = __msa_copy_s_w((v4i32)in0, idx0); \
  358. const uint32_t out1_m = __msa_copy_s_w((v4i32)in0, idx1); \
  359. const uint32_t out2_m = __msa_copy_s_w((v4i32)in1, idx2); \
  360. const uint32_t out3_m = __msa_copy_s_w((v4i32)in1, idx3); \
  361. SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \
  362. } while (0)
  363. #define ST4x8_UB(in0, in1, pdst, stride) do { \
  364. uint8_t* const pblk_4x8 = (uint8_t*)pdst; \
  365. ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \
  366. ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \
  367. } while (0)
  368. /* Description : Immediate number of elements to slide
  369. * Arguments : Inputs - in0, in1, slide_val
  370. * Outputs - out
  371. * Return Type - as per RTYPE
  372. * Details : Byte elements from 'in1' vector are slid into 'in0' by
  373. * value specified in the 'slide_val'
  374. */
  375. #define SLDI_B(RTYPE, in0, in1, slide_val) \
  376. (RTYPE)__msa_sldi_b((v16i8)in0, (v16i8)in1, slide_val) \
  377. #define SLDI_UB(...) SLDI_B(v16u8, __VA_ARGS__)
  378. #define SLDI_SB(...) SLDI_B(v16i8, __VA_ARGS__)
  379. #define SLDI_SH(...) SLDI_B(v8i16, __VA_ARGS__)
  380. /* Description : Shuffle byte vector elements as per mask vector
  381. * Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
  382. * Outputs - out0, out1
  383. * Return Type - as per RTYPE
  384. * Details : Byte elements from 'in0' & 'in1' are copied selectively to
  385. * 'out0' as per control vector 'mask0'
  386. */
  387. #define VSHF_B(RTYPE, in0, in1, mask) \
  388. (RTYPE)__msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0)
  389. #define VSHF_UB(...) VSHF_B(v16u8, __VA_ARGS__)
  390. #define VSHF_SB(...) VSHF_B(v16i8, __VA_ARGS__)
  391. #define VSHF_UH(...) VSHF_B(v8u16, __VA_ARGS__)
  392. #define VSHF_SH(...) VSHF_B(v8i16, __VA_ARGS__)
  393. #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \
  394. out0 = VSHF_B(RTYPE, in0, in1, mask0); \
  395. out1 = VSHF_B(RTYPE, in2, in3, mask1); \
  396. } while (0)
  397. #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
  398. #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
  399. #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
  400. #define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__)
  401. /* Description : Shuffle halfword vector elements as per mask vector
  402. * Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
  403. * Outputs - out0, out1
  404. * Return Type - as per RTYPE
  405. * Details : halfword elements from 'in0' & 'in1' are copied selectively to
  406. * 'out0' as per control vector 'mask0'
  407. */
  408. #define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \
  409. out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \
  410. out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \
  411. } while (0)
  412. #define VSHF_H2_UH(...) VSHF_H2(v8u16, __VA_ARGS__)
  413. #define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
  414. /* Description : Dot product of byte vector elements
  415. * Arguments : Inputs - mult0, mult1, cnst0, cnst1
  416. * Outputs - out0, out1
  417. * Return Type - as per RTYPE
  418. * Details : Signed byte elements from 'mult0' are multiplied with
  419. * signed byte elements from 'cnst0' producing a result
  420. * twice the size of input i.e. signed halfword.
  421. * The multiplication result of adjacent odd-even elements
  422. * are added together and written to the 'out0' vector
  423. */
  424. #define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
  425. out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \
  426. out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \
  427. } while (0)
  428. #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
  429. /* Description : Dot product of halfword vector elements
  430. * Arguments : Inputs - mult0, mult1, cnst0, cnst1
  431. * Outputs - out0, out1
  432. * Return Type - as per RTYPE
  433. * Details : Signed halfword elements from 'mult0' are multiplied with
  434. * signed halfword elements from 'cnst0' producing a result
  435. * twice the size of input i.e. signed word.
  436. * The multiplication result of adjacent odd-even elements
  437. * are added together and written to the 'out0' vector
  438. */
  439. #define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
  440. out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \
  441. out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \
  442. } while (0)
  443. #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
  444. /* Description : Dot product of unsigned word vector elements
  445. * Arguments : Inputs - mult0, mult1, cnst0, cnst1
  446. * Outputs - out0, out1
  447. * Return Type - as per RTYPE
  448. * Details : Unsigned word elements from 'mult0' are multiplied with
  449. * unsigned word elements from 'cnst0' producing a result
  450. * twice the size of input i.e. unsigned double word.
  451. * The multiplication result of adjacent odd-even elements
  452. * are added together and written to the 'out0' vector
  453. */
  454. #define DOTP_UW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
  455. out0 = (RTYPE)__msa_dotp_u_d((v4u32)mult0, (v4u32)cnst0); \
  456. out1 = (RTYPE)__msa_dotp_u_d((v4u32)mult1, (v4u32)cnst1); \
  457. } while (0)
  458. #define DOTP_UW2_UD(...) DOTP_UW2(v2u64, __VA_ARGS__)
  459. /* Description : Dot product & addition of halfword vector elements
  460. * Arguments : Inputs - mult0, mult1, cnst0, cnst1
  461. * Outputs - out0, out1
  462. * Return Type - as per RTYPE
  463. * Details : Signed halfword elements from 'mult0' are multiplied with
  464. * signed halfword elements from 'cnst0' producing a result
  465. * twice the size of input i.e. signed word.
  466. * The multiplication result of adjacent odd-even elements
  467. * are added to the 'out0' vector
  468. */
  469. #define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
  470. out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
  471. out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \
  472. } while (0)
  473. #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
  474. /* Description : Clips all signed halfword elements of input vector
  475. * between 0 & 255
  476. * Arguments : Input/output - val
  477. * Return Type - signed halfword
  478. */
  479. #define CLIP_SH_0_255(val) do { \
  480. const v8i16 max_m = __msa_ldi_h(255); \
  481. val = __msa_maxi_s_h((v8i16)val, 0); \
  482. val = __msa_min_s_h(max_m, (v8i16)val); \
  483. } while (0)
  484. #define CLIP_SH2_0_255(in0, in1) do { \
  485. CLIP_SH_0_255(in0); \
  486. CLIP_SH_0_255(in1); \
  487. } while (0)
  488. #define CLIP_SH4_0_255(in0, in1, in2, in3) do { \
  489. CLIP_SH2_0_255(in0, in1); \
  490. CLIP_SH2_0_255(in2, in3); \
  491. } while (0)
  492. /* Description : Clips all unsigned halfword elements of input vector
  493. * between 0 & 255
  494. * Arguments : Input - in
  495. * Output - out_m
  496. * Return Type - unsigned halfword
  497. */
  498. #define CLIP_UH_0_255(in) do { \
  499. const v8u16 max_m = (v8u16)__msa_ldi_h(255); \
  500. in = __msa_maxi_u_h((v8u16) in, 0); \
  501. in = __msa_min_u_h((v8u16) max_m, (v8u16) in); \
  502. } while (0)
  503. #define CLIP_UH2_0_255(in0, in1) do { \
  504. CLIP_UH_0_255(in0); \
  505. CLIP_UH_0_255(in1); \
  506. } while (0)
  507. /* Description : Clips all signed word elements of input vector
  508. * between 0 & 255
  509. * Arguments : Input/output - val
  510. * Return Type - signed word
  511. */
  512. #define CLIP_SW_0_255(val) do { \
  513. const v4i32 max_m = __msa_ldi_w(255); \
  514. val = __msa_maxi_s_w((v4i32)val, 0); \
  515. val = __msa_min_s_w(max_m, (v4i32)val); \
  516. } while (0)
  517. #define CLIP_SW4_0_255(in0, in1, in2, in3) do { \
  518. CLIP_SW_0_255(in0); \
  519. CLIP_SW_0_255(in1); \
  520. CLIP_SW_0_255(in2); \
  521. CLIP_SW_0_255(in3); \
  522. } while (0)
  523. /* Description : Horizontal addition of 4 signed word elements of input vector
  524. * Arguments : Input - in (signed word vector)
  525. * Output - sum_m (i32 sum)
  526. * Return Type - signed word (GP)
  527. * Details : 4 signed word elements of 'in' vector are added together and
  528. * the resulting integer sum is returned
  529. */
  530. static WEBP_INLINE int32_t func_hadd_sw_s32(v4i32 in) {
  531. const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in);
  532. const v2i64 res1_m = __msa_splati_d(res0_m, 1);
  533. const v2i64 out = res0_m + res1_m;
  534. int32_t sum_m = __msa_copy_s_w((v4i32)out, 0);
  535. return sum_m;
  536. }
  537. #define HADD_SW_S32(in) func_hadd_sw_s32(in)
  538. /* Description : Horizontal addition of 8 signed halfword elements
  539. * Arguments : Input - in (signed halfword vector)
  540. * Output - sum_m (s32 sum)
  541. * Return Type - signed word
  542. * Details : 8 signed halfword elements of input vector are added
  543. * together and the resulting integer sum is returned
  544. */
  545. static WEBP_INLINE int32_t func_hadd_sh_s32(v8i16 in) {
  546. const v4i32 res = __msa_hadd_s_w(in, in);
  547. const v2i64 res0 = __msa_hadd_s_d(res, res);
  548. const v2i64 res1 = __msa_splati_d(res0, 1);
  549. const v2i64 res2 = res0 + res1;
  550. const int32_t sum_m = __msa_copy_s_w((v4i32)res2, 0);
  551. return sum_m;
  552. }
  553. #define HADD_SH_S32(in) func_hadd_sh_s32(in)
  554. /* Description : Horizontal addition of 8 unsigned halfword elements
  555. * Arguments : Input - in (unsigned halfword vector)
  556. * Output - sum_m (u32 sum)
  557. * Return Type - unsigned word
  558. * Details : 8 unsigned halfword elements of input vector are added
  559. * together and the resulting integer sum is returned
  560. */
  561. static WEBP_INLINE uint32_t func_hadd_uh_u32(v8u16 in) {
  562. uint32_t sum_m;
  563. const v4u32 res_m = __msa_hadd_u_w(in, in);
  564. v2u64 res0_m = __msa_hadd_u_d(res_m, res_m);
  565. v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1);
  566. res0_m = res0_m + res1_m;
  567. sum_m = __msa_copy_s_w((v4i32)res0_m, 0);
  568. return sum_m;
  569. }
  570. #define HADD_UH_U32(in) func_hadd_uh_u32(in)
  571. /* Description : Horizontal addition of signed half word vector elements
  572. Arguments : Inputs - in0, in1
  573. Outputs - out0, out1
  574. Return Type - as per RTYPE
  575. Details : Each signed odd half word element from 'in0' is added to
  576. even signed half word element from 'in0' (pairwise) and the
  577. halfword result is written in 'out0'
  578. */
  579. #define HADD_SH2(RTYPE, in0, in1, out0, out1) do { \
  580. out0 = (RTYPE)__msa_hadd_s_w((v8i16)in0, (v8i16)in0); \
  581. out1 = (RTYPE)__msa_hadd_s_w((v8i16)in1, (v8i16)in1); \
  582. } while (0)
  583. #define HADD_SH2_SW(...) HADD_SH2(v4i32, __VA_ARGS__)
  584. #define HADD_SH4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) do { \
  585. HADD_SH2(RTYPE, in0, in1, out0, out1); \
  586. HADD_SH2(RTYPE, in2, in3, out2, out3); \
  587. } while (0)
  588. #define HADD_SH4_SW(...) HADD_SH4(v4i32, __VA_ARGS__)
  589. /* Description : Horizontal subtraction of unsigned byte vector elements
  590. * Arguments : Inputs - in0, in1
  591. * Outputs - out0, out1
  592. * Return Type - as per RTYPE
  593. * Details : Each unsigned odd byte element from 'in0' is subtracted from
  594. * even unsigned byte element from 'in0' (pairwise) and the
  595. * halfword result is written to 'out0'
  596. */
  597. #define HSUB_UB2(RTYPE, in0, in1, out0, out1) do { \
  598. out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \
  599. out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \
  600. } while (0)
  601. #define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__)
  602. #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
  603. #define HSUB_UB2_SW(...) HSUB_UB2(v4i32, __VA_ARGS__)
  604. /* Description : Set element n input vector to GPR value
  605. * Arguments : Inputs - in0, in1, in2, in3
  606. * Output - out
  607. * Return Type - as per RTYPE
  608. * Details : Set element 0 in vector 'out' to value specified in 'in0'
  609. */
  610. #define INSERT_W2(RTYPE, in0, in1, out) do { \
  611. out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
  612. out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
  613. } while (0)
  614. #define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__)
  615. #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
  616. #define INSERT_W4(RTYPE, in0, in1, in2, in3, out) do { \
  617. out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
  618. out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
  619. out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \
  620. out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \
  621. } while (0)
  622. #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
  623. #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
  624. #define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__)
  625. /* Description : Set element n of double word input vector to GPR value
  626. * Arguments : Inputs - in0, in1
  627. * Output - out
  628. * Return Type - as per RTYPE
  629. * Details : Set element 0 in vector 'out' to GPR value specified in 'in0'
  630. * Set element 1 in vector 'out' to GPR value specified in 'in1'
  631. */
  632. #define INSERT_D2(RTYPE, in0, in1, out) do { \
  633. out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
  634. out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
  635. } while (0)
  636. #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
  637. #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
  638. /* Description : Interleave even byte elements from vectors
  639. * Arguments : Inputs - in0, in1, in2, in3
  640. * Outputs - out0, out1
  641. * Return Type - as per RTYPE
  642. * Details : Even byte elements of 'in0' and 'in1' are interleaved
  643. * and written to 'out0'
  644. */
  645. #define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  646. out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \
  647. out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \
  648. } while (0)
  649. #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
  650. #define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__)
  651. #define ILVEV_B2_UH(...) ILVEV_B2(v8u16, __VA_ARGS__)
  652. #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
  653. #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
  654. /* Description : Interleave odd byte elements from vectors
  655. * Arguments : Inputs - in0, in1, in2, in3
  656. * Outputs - out0, out1
  657. * Return Type - as per RTYPE
  658. * Details : Odd byte elements of 'in0' and 'in1' are interleaved
  659. * and written to 'out0'
  660. */
  661. #define ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  662. out0 = (RTYPE)__msa_ilvod_b((v16i8)in1, (v16i8)in0); \
  663. out1 = (RTYPE)__msa_ilvod_b((v16i8)in3, (v16i8)in2); \
  664. } while (0)
  665. #define ILVOD_B2_UB(...) ILVOD_B2(v16u8, __VA_ARGS__)
  666. #define ILVOD_B2_SB(...) ILVOD_B2(v16i8, __VA_ARGS__)
  667. #define ILVOD_B2_UH(...) ILVOD_B2(v8u16, __VA_ARGS__)
  668. #define ILVOD_B2_SH(...) ILVOD_B2(v8i16, __VA_ARGS__)
  669. #define ILVOD_B2_SD(...) ILVOD_B2(v2i64, __VA_ARGS__)
  670. /* Description : Interleave even halfword elements from vectors
  671. * Arguments : Inputs - in0, in1, in2, in3
  672. * Outputs - out0, out1
  673. * Return Type - as per RTYPE
  674. * Details : Even halfword elements of 'in0' and 'in1' are interleaved
  675. * and written to 'out0'
  676. */
  677. #define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  678. out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
  679. out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \
  680. } while (0)
  681. #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
  682. #define ILVEV_H2_UH(...) ILVEV_H2(v8u16, __VA_ARGS__)
  683. #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
  684. #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
  685. /* Description : Interleave odd halfword elements from vectors
  686. * Arguments : Inputs - in0, in1, in2, in3
  687. * Outputs - out0, out1
  688. * Return Type - as per RTYPE
  689. * Details : Odd halfword elements of 'in0' and 'in1' are interleaved
  690. * and written to 'out0'
  691. */
  692. #define ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  693. out0 = (RTYPE)__msa_ilvod_h((v8i16)in1, (v8i16)in0); \
  694. out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \
  695. } while (0)
  696. #define ILVOD_H2_UB(...) ILVOD_H2(v16u8, __VA_ARGS__)
  697. #define ILVOD_H2_UH(...) ILVOD_H2(v8u16, __VA_ARGS__)
  698. #define ILVOD_H2_SH(...) ILVOD_H2(v8i16, __VA_ARGS__)
  699. #define ILVOD_H2_SW(...) ILVOD_H2(v4i32, __VA_ARGS__)
  700. /* Description : Interleave even word elements from vectors
  701. * Arguments : Inputs - in0, in1, in2, in3
  702. * Outputs - out0, out1
  703. * Return Type - as per RTYPE
  704. * Details : Even word elements of 'in0' and 'in1' are interleaved
  705. * and written to 'out0'
  706. */
  707. #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  708. out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
  709. out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
  710. } while (0)
  711. #define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
  712. #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
  713. #define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__)
  714. #define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
  715. /* Description : Interleave even-odd word elements from vectors
  716. * Arguments : Inputs - in0, in1, in2, in3
  717. * Outputs - out0, out1
  718. * Return Type - as per RTYPE
  719. * Details : Even word elements of 'in0' and 'in1' are interleaved
  720. * and written to 'out0'
  721. * Odd word elements of 'in2' and 'in3' are interleaved
  722. * and written to 'out1'
  723. */
  724. #define ILVEVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  725. out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
  726. out1 = (RTYPE)__msa_ilvod_w((v4i32)in3, (v4i32)in2); \
  727. } while (0)
  728. #define ILVEVOD_W2_UB(...) ILVEVOD_W2(v16u8, __VA_ARGS__)
  729. #define ILVEVOD_W2_UH(...) ILVEVOD_W2(v8u16, __VA_ARGS__)
  730. #define ILVEVOD_W2_SH(...) ILVEVOD_W2(v8i16, __VA_ARGS__)
  731. #define ILVEVOD_W2_SW(...) ILVEVOD_W2(v4i32, __VA_ARGS__)
  732. /* Description : Interleave even-odd half-word elements from vectors
  733. * Arguments : Inputs - in0, in1, in2, in3
  734. * Outputs - out0, out1
  735. * Return Type - as per RTYPE
  736. * Details : Even half-word elements of 'in0' and 'in1' are interleaved
  737. * and written to 'out0'
  738. * Odd half-word elements of 'in2' and 'in3' are interleaved
  739. * and written to 'out1'
  740. */
  741. #define ILVEVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  742. out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
  743. out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \
  744. } while (0)
  745. #define ILVEVOD_H2_UB(...) ILVEVOD_H2(v16u8, __VA_ARGS__)
  746. #define ILVEVOD_H2_UH(...) ILVEVOD_H2(v8u16, __VA_ARGS__)
  747. #define ILVEVOD_H2_SH(...) ILVEVOD_H2(v8i16, __VA_ARGS__)
  748. #define ILVEVOD_H2_SW(...) ILVEVOD_H2(v4i32, __VA_ARGS__)
  749. /* Description : Interleave even double word elements from vectors
  750. * Arguments : Inputs - in0, in1, in2, in3
  751. * Outputs - out0, out1
  752. * Return Type - as per RTYPE
  753. * Details : Even double word elements of 'in0' and 'in1' are interleaved
  754. * and written to 'out0'
  755. */
  756. #define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  757. out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \
  758. out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \
  759. } while (0)
  760. #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
  761. #define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__)
  762. #define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__)
  763. #define ILVEV_D2_SD(...) ILVEV_D2(v2i64, __VA_ARGS__)
  764. /* Description : Interleave left half of byte elements from vectors
  765. * Arguments : Inputs - in0, in1, in2, in3
  766. * Outputs - out0, out1
  767. * Return Type - as per RTYPE
  768. * Details : Left half of byte elements of 'in0' and 'in1' are interleaved
  769. * and written to 'out0'.
  770. */
  771. #define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  772. out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
  773. out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \
  774. } while (0)
  775. #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
  776. #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
  777. #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
  778. #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
  779. #define ILVL_B2_SW(...) ILVL_B2(v4i32, __VA_ARGS__)
  780. /* Description : Interleave right half of byte elements from vectors
  781. * Arguments : Inputs - in0, in1, in2, in3
  782. * Outputs - out0, out1
  783. * Return Type - as per RTYPE
  784. * Details : Right half of byte elements of 'in0' and 'in1' are interleaved
  785. * and written to out0.
  786. */
  787. #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  788. out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
  789. out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
  790. } while (0)
  791. #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
  792. #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
  793. #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
  794. #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
  795. #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
  796. #define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
  797. out0, out1, out2, out3) do { \
  798. ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
  799. ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
  800. } while (0)
  801. #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
  802. #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
  803. #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
  804. #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
  805. #define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
  806. /* Description : Interleave right half of halfword elements from vectors
  807. * Arguments : Inputs - in0, in1, in2, in3
  808. * Outputs - out0, out1
  809. * Return Type - as per RTYPE
  810. * Details : Right half of halfword elements of 'in0' and 'in1' are
  811. * interleaved and written to 'out0'.
  812. */
  813. #define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  814. out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
  815. out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
  816. } while (0)
  817. #define ILVR_H2_UB(...) ILVR_H2(v16u8, __VA_ARGS__)
  818. #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
  819. #define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
  820. #define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
  821. out0, out1, out2, out3) do { \
  822. ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
  823. ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
  824. } while (0)
  825. #define ILVR_H4_UB(...) ILVR_H4(v16u8, __VA_ARGS__)
  826. #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
  827. #define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
  828. /* Description : Interleave right half of double word elements from vectors
  829. * Arguments : Inputs - in0, in1, in2, in3
  830. * Outputs - out0, out1
  831. * Return Type - as per RTYPE
  832. * Details : Right half of double word elements of 'in0' and 'in1' are
  833. * interleaved and written to 'out0'.
  834. */
  835. #define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  836. out0 = (RTYPE)__msa_ilvr_d((v2i64)in0, (v2i64)in1); \
  837. out1 = (RTYPE)__msa_ilvr_d((v2i64)in2, (v2i64)in3); \
  838. } while (0)
  839. #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
  840. #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
  841. #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
  842. #define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
  843. out0, out1, out2, out3) do { \
  844. ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
  845. ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
  846. } while (0)
  847. #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
  848. #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
  849. /* Description : Interleave both left and right half of input vectors
  850. * Arguments : Inputs - in0, in1
  851. * Outputs - out0, out1
  852. * Return Type - as per RTYPE
  853. * Details : Right half of byte elements from 'in0' and 'in1' are
  854. * interleaved and written to 'out0'
  855. */
  856. #define ILVRL_B2(RTYPE, in0, in1, out0, out1) do { \
  857. out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
  858. out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
  859. } while (0)
  860. #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
  861. #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
  862. #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
  863. #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
  864. #define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
  865. #define ILVRL_H2(RTYPE, in0, in1, out0, out1) do { \
  866. out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
  867. out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
  868. } while (0)
  869. #define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
  870. #define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
  871. #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
  872. #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
  873. #define ILVRL_H2_UW(...) ILVRL_H2(v4u32, __VA_ARGS__)
  874. #define ILVRL_W2(RTYPE, in0, in1, out0, out1) do { \
  875. out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
  876. out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
  877. } while (0)
  878. #define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
  879. #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
  880. #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
  881. #define ILVRL_W2_UW(...) ILVRL_W2(v4u32, __VA_ARGS__)
  882. /* Description : Pack even byte elements of vector pairs
  883. * Arguments : Inputs - in0, in1, in2, in3
  884. * Outputs - out0, out1
  885. * Return Type - as per RTYPE
  886. * Details : Even byte elements of 'in0' are copied to the left half of
  887. * 'out0' & even byte elements of 'in1' are copied to the right
  888. * half of 'out0'.
  889. */
  890. #define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  891. out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
  892. out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
  893. } while (0)
  894. #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
  895. #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
  896. #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
  897. #define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
  898. #define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
  899. out0, out1, out2, out3) do { \
  900. PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
  901. PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
  902. } while (0)
  903. #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
  904. #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
  905. #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
  906. #define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__)
  907. /* Description : Pack even halfword elements of vector pairs
  908. * Arguments : Inputs - in0, in1, in2, in3
  909. * Outputs - out0, out1
  910. * Return Type - as per RTYPE
  911. * Details : Even halfword elements of 'in0' are copied to the left half of
  912. * 'out0' & even halfword elements of 'in1' are copied to the
  913. * right half of 'out0'.
  914. */
  915. #define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  916. out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
  917. out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
  918. } while (0)
  919. #define PCKEV_H2_UH(...) PCKEV_H2(v8u16, __VA_ARGS__)
  920. #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
  921. #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
  922. #define PCKEV_H2_UW(...) PCKEV_H2(v4u32, __VA_ARGS__)
  923. /* Description : Pack even word elements of vector pairs
  924. * Arguments : Inputs - in0, in1, in2, in3
  925. * Outputs - out0, out1
  926. * Return Type - as per RTYPE
  927. * Details : Even word elements of 'in0' are copied to the left half of
  928. * 'out0' & even word elements of 'in1' are copied to the
  929. * right half of 'out0'.
  930. */
  931. #define PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  932. out0 = (RTYPE)__msa_pckev_w((v4i32)in0, (v4i32)in1); \
  933. out1 = (RTYPE)__msa_pckev_w((v4i32)in2, (v4i32)in3); \
  934. } while (0)
  935. #define PCKEV_W2_UH(...) PCKEV_W2(v8u16, __VA_ARGS__)
  936. #define PCKEV_W2_SH(...) PCKEV_W2(v8i16, __VA_ARGS__)
  937. #define PCKEV_W2_SW(...) PCKEV_W2(v4i32, __VA_ARGS__)
  938. #define PCKEV_W2_UW(...) PCKEV_W2(v4u32, __VA_ARGS__)
  939. /* Description : Pack odd halfword elements of vector pairs
  940. * Arguments : Inputs - in0, in1, in2, in3
  941. * Outputs - out0, out1
  942. * Return Type - as per RTYPE
  943. * Details : Odd halfword elements of 'in0' are copied to the left half of
  944. * 'out0' & odd halfword elements of 'in1' are copied to the
  945. * right half of 'out0'.
  946. */
  947. #define PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  948. out0 = (RTYPE)__msa_pckod_h((v8i16)in0, (v8i16)in1); \
  949. out1 = (RTYPE)__msa_pckod_h((v8i16)in2, (v8i16)in3); \
  950. } while (0)
  951. #define PCKOD_H2_UH(...) PCKOD_H2(v8u16, __VA_ARGS__)
  952. #define PCKOD_H2_SH(...) PCKOD_H2(v8i16, __VA_ARGS__)
  953. #define PCKOD_H2_SW(...) PCKOD_H2(v4i32, __VA_ARGS__)
  954. #define PCKOD_H2_UW(...) PCKOD_H2(v4u32, __VA_ARGS__)
  955. /* Description : Arithmetic immediate shift right all elements of word vector
  956. * Arguments : Inputs - in0, in1, shift
  957. * Outputs - in place operation
  958. * Return Type - as per input vector RTYPE
  959. * Details : Each element of vector 'in0' is right shifted by 'shift' and
  960. * the result is written in-place. 'shift' is a GP variable.
  961. */
  962. #define SRAI_W2(RTYPE, in0, in1, shift_val) do { \
  963. in0 = (RTYPE)SRAI_W(in0, shift_val); \
  964. in1 = (RTYPE)SRAI_W(in1, shift_val); \
  965. } while (0)
  966. #define SRAI_W2_SW(...) SRAI_W2(v4i32, __VA_ARGS__)
  967. #define SRAI_W2_UW(...) SRAI_W2(v4u32, __VA_ARGS__)
  968. #define SRAI_W4(RTYPE, in0, in1, in2, in3, shift_val) do { \
  969. SRAI_W2(RTYPE, in0, in1, shift_val); \
  970. SRAI_W2(RTYPE, in2, in3, shift_val); \
  971. } while (0)
  972. #define SRAI_W4_SW(...) SRAI_W4(v4i32, __VA_ARGS__)
  973. #define SRAI_W4_UW(...) SRAI_W4(v4u32, __VA_ARGS__)
  974. /* Description : Arithmetic shift right all elements of half-word vector
  975. * Arguments : Inputs - in0, in1, shift
  976. * Outputs - in place operation
  977. * Return Type - as per input vector RTYPE
  978. * Details : Each element of vector 'in0' is right shifted by 'shift' and
  979. * the result is written in-place. 'shift' is a GP variable.
  980. */
  981. #define SRAI_H2(RTYPE, in0, in1, shift_val) do { \
  982. in0 = (RTYPE)SRAI_H(in0, shift_val); \
  983. in1 = (RTYPE)SRAI_H(in1, shift_val); \
  984. } while (0)
  985. #define SRAI_H2_SH(...) SRAI_H2(v8i16, __VA_ARGS__)
  986. #define SRAI_H2_UH(...) SRAI_H2(v8u16, __VA_ARGS__)
  987. /* Description : Arithmetic rounded shift right all elements of word vector
  988. * Arguments : Inputs - in0, in1, shift
  989. * Outputs - in place operation
  990. * Return Type - as per input vector RTYPE
  991. * Details : Each element of vector 'in0' is right shifted by 'shift' and
  992. * the result is written in-place. 'shift' is a GP variable.
  993. */
  994. #define SRARI_W2(RTYPE, in0, in1, shift) do { \
  995. in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
  996. in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
  997. } while (0)
  998. #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
  999. #define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) do { \
  1000. SRARI_W2(RTYPE, in0, in1, shift); \
  1001. SRARI_W2(RTYPE, in2, in3, shift); \
  1002. } while (0)
  1003. #define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__)
  1004. #define SRARI_W4_UW(...) SRARI_W4(v4u32, __VA_ARGS__)
  1005. #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
  1006. /* Description : Shift right arithmetic rounded double words
  1007. * Arguments : Inputs - in0, in1, shift
  1008. * Outputs - in place operation
  1009. * Return Type - as per RTYPE
  1010. * Details : Each element of vector 'in0' is shifted right arithmetically by
  1011. * the number of bits in the corresponding element in the vector
  1012. * 'shift'. The last discarded bit is added to shifted value for
  1013. * rounding and the result is written in-place.
  1014. * 'shift' is a vector.
  1015. */
  1016. #define SRAR_D2(RTYPE, in0, in1, shift) do { \
  1017. in0 = (RTYPE)__msa_srar_d((v2i64)in0, (v2i64)shift); \
  1018. in1 = (RTYPE)__msa_srar_d((v2i64)in1, (v2i64)shift); \
  1019. } while (0)
  1020. #define SRAR_D2_SW(...) SRAR_D2(v4i32, __VA_ARGS__)
  1021. #define SRAR_D2_SD(...) SRAR_D2(v2i64, __VA_ARGS__)
  1022. #define SRAR_D2_UD(...) SRAR_D2(v2u64, __VA_ARGS__)
  1023. #define SRAR_D4(RTYPE, in0, in1, in2, in3, shift) do { \
  1024. SRAR_D2(RTYPE, in0, in1, shift); \
  1025. SRAR_D2(RTYPE, in2, in3, shift); \
  1026. } while (0)
  1027. #define SRAR_D4_SD(...) SRAR_D4(v2i64, __VA_ARGS__)
  1028. #define SRAR_D4_UD(...) SRAR_D4(v2u64, __VA_ARGS__)
  1029. /* Description : Addition of 2 pairs of half-word vectors
  1030. * Arguments : Inputs - in0, in1, in2, in3
  1031. * Outputs - out0, out1
  1032. * Details : Each element in 'in0' is added to 'in1' and result is written
  1033. * to 'out0'.
  1034. */
  1035. #define ADDVI_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  1036. out0 = (RTYPE)ADDVI_H(in0, in1); \
  1037. out1 = (RTYPE)ADDVI_H(in2, in3); \
  1038. } while (0)
  1039. #define ADDVI_H2_SH(...) ADDVI_H2(v8i16, __VA_ARGS__)
  1040. #define ADDVI_H2_UH(...) ADDVI_H2(v8u16, __VA_ARGS__)
  1041. /* Description : Addition of 2 pairs of word vectors
  1042. * Arguments : Inputs - in0, in1, in2, in3
  1043. * Outputs - out0, out1
  1044. * Details : Each element in 'in0' is added to 'in1' and result is written
  1045. * to 'out0'.
  1046. */
  1047. #define ADDVI_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  1048. out0 = (RTYPE)ADDVI_W(in0, in1); \
  1049. out1 = (RTYPE)ADDVI_W(in2, in3); \
  1050. } while (0)
  1051. #define ADDVI_W2_SW(...) ADDVI_W2(v4i32, __VA_ARGS__)
  1052. /* Description : Fill 2 pairs of word vectors with GP registers
  1053. * Arguments : Inputs - in0, in1
  1054. * Outputs - out0, out1
  1055. * Details : GP register in0 is replicated in each word element of out0
  1056. * GP register in1 is replicated in each word element of out1
  1057. */
  1058. #define FILL_W2(RTYPE, in0, in1, out0, out1) do { \
  1059. out0 = (RTYPE)__msa_fill_w(in0); \
  1060. out1 = (RTYPE)__msa_fill_w(in1); \
  1061. } while (0)
  1062. #define FILL_W2_SW(...) FILL_W2(v4i32, __VA_ARGS__)
  1063. /* Description : Addition of 2 pairs of vectors
  1064. * Arguments : Inputs - in0, in1, in2, in3
  1065. * Outputs - out0, out1
  1066. * Details : Each element in 'in0' is added to 'in1' and result is written
  1067. * to 'out0'.
  1068. */
  1069. #define ADD2(in0, in1, in2, in3, out0, out1) do { \
  1070. out0 = in0 + in1; \
  1071. out1 = in2 + in3; \
  1072. } while (0)
  1073. #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \
  1074. out0, out1, out2, out3) do { \
  1075. ADD2(in0, in1, in2, in3, out0, out1); \
  1076. ADD2(in4, in5, in6, in7, out2, out3); \
  1077. } while (0)
  1078. /* Description : Subtraction of 2 pairs of vectors
  1079. * Arguments : Inputs - in0, in1, in2, in3
  1080. * Outputs - out0, out1
  1081. * Details : Each element in 'in1' is subtracted from 'in0' and result is
  1082. * written to 'out0'.
  1083. */
  1084. #define SUB2(in0, in1, in2, in3, out0, out1) do { \
  1085. out0 = in0 - in1; \
  1086. out1 = in2 - in3; \
  1087. } while (0)
  1088. #define SUB3(in0, in1, in2, in3, in4, in5, out0, out1, out2) do { \
  1089. out0 = in0 - in1; \
  1090. out1 = in2 - in3; \
  1091. out2 = in4 - in5; \
  1092. } while (0)
  1093. #define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, \
  1094. out0, out1, out2, out3) do { \
  1095. out0 = in0 - in1; \
  1096. out1 = in2 - in3; \
  1097. out2 = in4 - in5; \
  1098. out3 = in6 - in7; \
  1099. } while (0)
  1100. /* Description : Addition - Subtraction of input vectors
  1101. * Arguments : Inputs - in0, in1
  1102. * Outputs - out0, out1
  1103. * Details : Each element in 'in1' is added to 'in0' and result is
  1104. * written to 'out0'.
  1105. * Each element in 'in1' is subtracted from 'in0' and result is
  1106. * written to 'out1'.
  1107. */
  1108. #define ADDSUB2(in0, in1, out0, out1) do { \
  1109. out0 = in0 + in1; \
  1110. out1 = in0 - in1; \
  1111. } while (0)
  1112. /* Description : Multiplication of pairs of vectors
  1113. * Arguments : Inputs - in0, in1, in2, in3
  1114. * Outputs - out0, out1
  1115. * Details : Each element from 'in0' is multiplied with elements from 'in1'
  1116. * and the result is written to 'out0'
  1117. */
  1118. #define MUL2(in0, in1, in2, in3, out0, out1) do { \
  1119. out0 = in0 * in1; \
  1120. out1 = in2 * in3; \
  1121. } while (0)
  1122. #define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, \
  1123. out0, out1, out2, out3) do { \
  1124. MUL2(in0, in1, in2, in3, out0, out1); \
  1125. MUL2(in4, in5, in6, in7, out2, out3); \
  1126. } while (0)
  1127. /* Description : Sign extend halfword elements from right half of the vector
  1128. * Arguments : Input - in (halfword vector)
  1129. * Output - out (sign extended word vector)
  1130. * Return Type - signed word
  1131. * Details : Sign bit of halfword elements from input vector 'in' is
  1132. * extracted and interleaved with same vector 'in0' to generate
  1133. * 4 word elements keeping sign intact
  1134. */
  1135. #define UNPCK_R_SH_SW(in, out) do { \
  1136. const v8i16 sign_m = __msa_clti_s_h((v8i16)in, 0); \
  1137. out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \
  1138. } while (0)
  1139. /* Description : Sign extend halfword elements from input vector and return
  1140. * the result in pair of vectors
  1141. * Arguments : Input - in (halfword vector)
  1142. * Outputs - out0, out1 (sign extended word vectors)
  1143. * Return Type - signed word
  1144. * Details : Sign bit of halfword elements from input vector 'in' is
  1145. * extracted and interleaved right with same vector 'in0' to
  1146. * generate 4 signed word elements in 'out0'
  1147. * Then interleaved left with same vector 'in0' to
  1148. * generate 4 signed word elements in 'out1'
  1149. */
  1150. #define UNPCK_SH_SW(in, out0, out1) do { \
  1151. const v8i16 tmp_m = __msa_clti_s_h((v8i16)in, 0); \
  1152. ILVRL_H2_SW(tmp_m, in, out0, out1); \
  1153. } while (0)
  1154. /* Description : Butterfly of 4 input vectors
  1155. * Arguments : Inputs - in0, in1, in2, in3
  1156. * Outputs - out0, out1, out2, out3
  1157. * Details : Butterfly operation
  1158. */
  1159. #define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) do { \
  1160. out0 = in0 + in3; \
  1161. out1 = in1 + in2; \
  1162. out2 = in1 - in2; \
  1163. out3 = in0 - in3; \
  1164. } while (0)
  1165. /* Description : Transpose 16x4 block into 4x16 with byte elements in vectors
  1166. * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
  1167. * in8, in9, in10, in11, in12, in13, in14, in15
  1168. * Outputs - out0, out1, out2, out3
  1169. * Return Type - unsigned byte
  1170. */
  1171. #define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
  1172. in8, in9, in10, in11, in12, in13, in14, in15, \
  1173. out0, out1, out2, out3) do { \
  1174. v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m, tmp4_m, tmp5_m; \
  1175. ILVEV_W2_SD(in0, in4, in8, in12, tmp2_m, tmp3_m); \
  1176. ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
  1177. ILVEV_D2_UB(tmp2_m, tmp3_m, tmp0_m, tmp1_m, out1, out3); \
  1178. ILVEV_W2_SD(in2, in6, in10, in14, tmp4_m, tmp5_m); \
  1179. ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
  1180. ILVEV_D2_SD(tmp4_m, tmp5_m, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
  1181. ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
  1182. ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out0, out2); \
  1183. ILVOD_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
  1184. ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out1, out3); \
  1185. } while (0)
  1186. /* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
  1187. * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
  1188. * in8, in9, in10, in11, in12, in13, in14, in15
  1189. * Outputs - out0, out1, out2, out3, out4, out5, out6, out7
  1190. * Return Type - unsigned byte
  1191. */
  1192. #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
  1193. in8, in9, in10, in11, in12, in13, in14, in15, \
  1194. out0, out1, out2, out3, out4, out5, \
  1195. out6, out7) do { \
  1196. v8i16 tmp0_m, tmp1_m, tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
  1197. v4i32 tmp2_m, tmp3_m; \
  1198. ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
  1199. ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
  1200. ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
  1201. ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
  1202. ILVEV_B2_SH(out7, out6, out5, out4, tmp0_m, tmp1_m); \
  1203. ILVOD_B2_SH(out7, out6, out5, out4, tmp4_m, tmp5_m); \
  1204. ILVEV_B2_UB(out3, out2, out1, out0, out5, out7); \
  1205. ILVOD_B2_SH(out3, out2, out1, out0, tmp6_m, tmp7_m); \
  1206. ILVEV_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
  1207. ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out0, out4); \
  1208. ILVOD_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
  1209. ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out2, out6); \
  1210. ILVEV_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
  1211. ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out1, out5); \
  1212. ILVOD_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
  1213. ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out3, out7); \
  1214. } while (0)
  1215. /* Description : Transpose 4x4 block with word elements in vectors
  1216. * Arguments : Inputs - in0, in1, in2, in3
  1217. * Outputs - out0, out1, out2, out3
  1218. * Return Type - as per RTYPE
  1219. */
  1220. #define TRANSPOSE4x4_W(RTYPE, in0, in1, in2, in3, \
  1221. out0, out1, out2, out3) do { \
  1222. v4i32 s0_m, s1_m, s2_m, s3_m; \
  1223. ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
  1224. ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
  1225. out0 = (RTYPE)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \
  1226. out1 = (RTYPE)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \
  1227. out2 = (RTYPE)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \
  1228. out3 = (RTYPE)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \
  1229. } while (0)
  1230. #define TRANSPOSE4x4_SW_SW(...) TRANSPOSE4x4_W(v4i32, __VA_ARGS__)
  1231. /* Description : Add block 4x4
  1232. * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
  1233. * Details : Least significant 4 bytes from each input vector are added to
  1234. * the destination bytes, clipped between 0-255 and stored.
  1235. */
  1236. #define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \
  1237. uint32_t src0_m, src1_m, src2_m, src3_m; \
  1238. v8i16 inp0_m, inp1_m, res0_m, res1_m; \
  1239. v16i8 dst0_m = { 0 }; \
  1240. v16i8 dst1_m = { 0 }; \
  1241. const v16i8 zero_m = { 0 }; \
  1242. ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m); \
  1243. LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
  1244. INSERT_W2_SB(src0_m, src1_m, dst0_m); \
  1245. INSERT_W2_SB(src2_m, src3_m, dst1_m); \
  1246. ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
  1247. ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
  1248. CLIP_SH2_0_255(res0_m, res1_m); \
  1249. PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
  1250. ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride); \
  1251. } while (0)
  1252. /* Description : Pack even byte elements, extract 0 & 2 index words from pair
  1253. * of results and store 4 words in destination memory as per
  1254. * stride
  1255. * Arguments : Inputs - in0, in1, in2, in3, pdst, stride
  1256. */
  1257. #define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \
  1258. v16i8 tmp0_m, tmp1_m; \
  1259. PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
  1260. ST4x4_UB(tmp0_m, tmp1_m, 0, 2, 0, 2, pdst, stride); \
  1261. } while (0)
  1262. /* Description : average with rounding (in0 + in1 + 1) / 2.
  1263. * Arguments : Inputs - in0, in1, in2, in3,
  1264. * Outputs - out0, out1
  1265. * Return Type - as per RTYPE
  1266. * Details : Each unsigned byte element from 'in0' vector is added with
  1267. * each unsigned byte element from 'in1' vector. Then the average
  1268. * with rounding is calculated and written to 'out0'
  1269. */
  1270. #define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
  1271. out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \
  1272. out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
  1273. } while (0)
  1274. #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
  1275. #endif // WEBP_DSP_MSA_MACRO_H_