mmxloop.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. #if !defined(_x86_vc_mmxloop_H)
  2. # define _x86_vc_mmxloop_H (1)
  3. # include <stddef.h>
  4. # include "x86int.h"
  5. #if defined(OC_X86_ASM)
  6. /*On entry, mm0={a0,...,a7}, mm1={b0,...,b7}, mm2={c0,...,c7}, mm3={d0,...d7}.
  7. On exit, mm1={b0+lflim(R_0,L),...,b7+lflim(R_7,L)} and
  8. mm2={c0-lflim(R_0,L),...,c7-lflim(R_7,L)}; mm0 and mm3 are clobbered.*/
  9. #define OC_LOOP_FILTER8_MMX __asm{ \
  10. /*mm7=0*/ \
  11. __asm pxor mm7,mm7 \
  12. /*mm6:mm0={a0,...,a7}*/ \
  13. __asm movq mm6,mm0 \
  14. __asm punpcklbw mm0,mm7 \
  15. __asm punpckhbw mm6,mm7 \
  16. /*mm3:mm5={d0,...,d7}*/ \
  17. __asm movq mm5,mm3 \
  18. __asm punpcklbw mm3,mm7 \
  19. __asm punpckhbw mm5,mm7 \
  20. /*mm6:mm0={a0-d0,...,a7-d7}*/ \
  21. __asm psubw mm0,mm3 \
  22. __asm psubw mm6,mm5 \
  23. /*mm3:mm1={b0,...,b7}*/ \
  24. __asm movq mm3,mm1 \
  25. __asm punpcklbw mm1,mm7 \
  26. __asm movq mm4,mm2 \
  27. __asm punpckhbw mm3,mm7 \
  28. /*mm5:mm4={c0,...,c7}*/ \
  29. __asm movq mm5,mm2 \
  30. __asm punpcklbw mm4,mm7 \
  31. __asm punpckhbw mm5,mm7 \
  32. /*mm7={3}x4 \
  33. mm5:mm4={c0-b0,...,c7-b7}*/ \
  34. __asm pcmpeqw mm7,mm7 \
  35. __asm psubw mm4,mm1 \
  36. __asm psrlw mm7,14 \
  37. __asm psubw mm5,mm3 \
  38. /*Scale by 3.*/ \
  39. __asm pmullw mm4,mm7 \
  40. __asm pmullw mm5,mm7 \
  41. /*mm7={4}x4 \
  42. mm5:mm4=f={a0-d0+3*(c0-b0),...,a7-d7+3*(c7-b7)}*/ \
  43. __asm psrlw mm7,1 \
  44. __asm paddw mm4,mm0 \
  45. __asm psllw mm7,2 \
  46. __asm movq mm0,[LL] \
  47. __asm paddw mm5,mm6 \
  48. /*R_i has the range [-127,128], so we compute -R_i instead. \
  49. mm4=-R_i=-(f+4>>3)=0xFF^(f-4>>3)*/ \
  50. __asm psubw mm4,mm7 \
  51. __asm psubw mm5,mm7 \
  52. __asm psraw mm4,3 \
  53. __asm psraw mm5,3 \
  54. __asm pcmpeqb mm7,mm7 \
  55. __asm packsswb mm4,mm5 \
  56. __asm pxor mm6,mm6 \
  57. __asm pxor mm4,mm7 \
  58. __asm packuswb mm1,mm3 \
  59. /*Now compute lflim of -mm4 cf. Section 7.10 of the sepc.*/ \
  60. /*There's no unsigned byte+signed byte with unsigned saturation op code, so \
  61. we have to split things by sign (the other option is to work in 16 bits, \
  62. but working in 8 bits gives much better parallelism). \
  63. We compute abs(R_i), but save a mask of which terms were negative in mm6. \
  64. Then we compute mm4=abs(lflim(R_i,L))=min(abs(R_i),max(2*L-abs(R_i),0)). \
  65. Finally, we split mm4 into positive and negative pieces using the mask in \
  66. mm6, and add and subtract them as appropriate.*/ \
  67. /*mm4=abs(-R_i)*/ \
  68. /*mm7=255-2*L*/ \
  69. __asm pcmpgtb mm6,mm4 \
  70. __asm psubb mm7,mm0 \
  71. __asm pxor mm4,mm6 \
  72. __asm psubb mm7,mm0 \
  73. __asm psubb mm4,mm6 \
  74. /*mm7=255-max(2*L-abs(R_i),0)*/ \
  75. __asm paddusb mm7,mm4 \
  76. /*mm4=min(abs(R_i),max(2*L-abs(R_i),0))*/ \
  77. __asm paddusb mm4,mm7 \
  78. __asm psubusb mm4,mm7 \
  79. /*Now split mm4 by the original sign of -R_i.*/ \
  80. __asm movq mm5,mm4 \
  81. __asm pand mm4,mm6 \
  82. __asm pandn mm6,mm5 \
  83. /*mm1={b0+lflim(R_0,L),...,b7+lflim(R_7,L)}*/ \
  84. /*mm2={c0-lflim(R_0,L),...,c7-lflim(R_7,L)}*/ \
  85. __asm paddusb mm1,mm4 \
  86. __asm psubusb mm2,mm4 \
  87. __asm psubusb mm1,mm6 \
  88. __asm paddusb mm2,mm6 \
  89. }
  90. #define OC_LOOP_FILTER_V_MMX(_pix,_ystride,_ll) \
  91. do{ \
  92. /*Used local variable pix__ in order to fix compilation errors like: \
  93. "error C2425: 'SHL' : non-constant expression in 'second operand'".*/ \
  94. unsigned char *pix__; \
  95. unsigned char *ll__; \
  96. ll__=(_ll); \
  97. pix__=(_pix); \
  98. __asm mov YSTRIDE,_ystride \
  99. __asm mov LL,ll__ \
  100. __asm mov PIX,pix__ \
  101. __asm sub PIX,YSTRIDE \
  102. __asm sub PIX,YSTRIDE \
  103. /*mm0={a0,...,a7}*/ \
  104. __asm movq mm0,[PIX] \
  105. /*ystride3=_ystride*3*/ \
  106. __asm lea YSTRIDE3,[YSTRIDE+YSTRIDE*2] \
  107. /*mm3={d0,...,d7}*/ \
  108. __asm movq mm3,[PIX+YSTRIDE3] \
  109. /*mm1={b0,...,b7}*/ \
  110. __asm movq mm1,[PIX+YSTRIDE] \
  111. /*mm2={c0,...,c7}*/ \
  112. __asm movq mm2,[PIX+YSTRIDE*2] \
  113. OC_LOOP_FILTER8_MMX \
  114. /*Write it back out.*/ \
  115. __asm movq [PIX+YSTRIDE],mm1 \
  116. __asm movq [PIX+YSTRIDE*2],mm2 \
  117. } \
  118. while(0)
  119. #define OC_LOOP_FILTER_H_MMX(_pix,_ystride,_ll) \
  120. do{ \
  121. /*Used local variable ll__ in order to fix compilation errors like: \
  122. "error C2443: operand size conflict".*/ \
  123. unsigned char *ll__; \
  124. unsigned char *pix__; \
  125. ll__=(_ll); \
  126. pix__=(_pix)-2; \
  127. __asm mov PIX,pix__ \
  128. __asm mov YSTRIDE,_ystride \
  129. __asm mov LL,ll__ \
  130. /*x x x x d0 c0 b0 a0*/ \
  131. __asm movd mm0,[PIX] \
  132. /*x x x x d1 c1 b1 a1*/ \
  133. __asm movd mm1,[PIX+YSTRIDE] \
  134. /*ystride3=_ystride*3*/ \
  135. __asm lea YSTRIDE3,[YSTRIDE+YSTRIDE*2] \
  136. /*x x x x d2 c2 b2 a2*/ \
  137. __asm movd mm2,[PIX+YSTRIDE*2] \
  138. /*x x x x d3 c3 b3 a3*/ \
  139. __asm lea D,[PIX+YSTRIDE*4] \
  140. __asm movd mm3,[PIX+YSTRIDE3] \
  141. /*x x x x d4 c4 b4 a4*/ \
  142. __asm movd mm4,[D] \
  143. /*x x x x d5 c5 b5 a5*/ \
  144. __asm movd mm5,[D+YSTRIDE] \
  145. /*x x x x d6 c6 b6 a6*/ \
  146. __asm movd mm6,[D+YSTRIDE*2] \
  147. /*x x x x d7 c7 b7 a7*/ \
  148. __asm movd mm7,[D+YSTRIDE3] \
  149. /*mm0=d1 d0 c1 c0 b1 b0 a1 a0*/ \
  150. __asm punpcklbw mm0,mm1 \
  151. /*mm2=d3 d2 c3 c2 b3 b2 a3 a2*/ \
  152. __asm punpcklbw mm2,mm3 \
  153. /*mm3=d1 d0 c1 c0 b1 b0 a1 a0*/ \
  154. __asm movq mm3,mm0 \
  155. /*mm0=b3 b2 b1 b0 a3 a2 a1 a0*/ \
  156. __asm punpcklwd mm0,mm2 \
  157. /*mm3=d3 d2 d1 d0 c3 c2 c1 c0*/ \
  158. __asm punpckhwd mm3,mm2 \
  159. /*mm1=b3 b2 b1 b0 a3 a2 a1 a0*/ \
  160. __asm movq mm1,mm0 \
  161. /*mm4=d5 d4 c5 c4 b5 b4 a5 a4*/ \
  162. __asm punpcklbw mm4,mm5 \
  163. /*mm6=d7 d6 c7 c6 b7 b6 a7 a6*/ \
  164. __asm punpcklbw mm6,mm7 \
  165. /*mm5=d5 d4 c5 c4 b5 b4 a5 a4*/ \
  166. __asm movq mm5,mm4 \
  167. /*mm4=b7 b6 b5 b4 a7 a6 a5 a4*/ \
  168. __asm punpcklwd mm4,mm6 \
  169. /*mm5=d7 d6 d5 d4 c7 c6 c5 c4*/ \
  170. __asm punpckhwd mm5,mm6 \
  171. /*mm2=d3 d2 d1 d0 c3 c2 c1 c0*/ \
  172. __asm movq mm2,mm3 \
  173. /*mm0=a7 a6 a5 a4 a3 a2 a1 a0*/ \
  174. __asm punpckldq mm0,mm4 \
  175. /*mm1=b7 b6 b5 b4 b3 b2 b1 b0*/ \
  176. __asm punpckhdq mm1,mm4 \
  177. /*mm2=c7 c6 c5 c4 c3 c2 c1 c0*/ \
  178. __asm punpckldq mm2,mm5 \
  179. /*mm3=d7 d6 d5 d4 d3 d2 d1 d0*/ \
  180. __asm punpckhdq mm3,mm5 \
  181. OC_LOOP_FILTER8_MMX \
  182. /*mm2={b0+R_0'',...,b7+R_7''}*/ \
  183. __asm movq mm0,mm1 \
  184. /*mm1={b0+R_0'',c0-R_0'',...,b3+R_3'',c3-R_3''}*/ \
  185. __asm punpcklbw mm1,mm2 \
  186. /*mm2={b4+R_4'',c4-R_4'',...,b7+R_7'',c7-R_7''}*/ \
  187. __asm punpckhbw mm0,mm2 \
  188. /*[d]=c1 b1 c0 b0*/ \
  189. __asm movd D,mm1 \
  190. __asm mov [PIX+1],D_WORD \
  191. __asm psrlq mm1,32 \
  192. __asm shr D,16 \
  193. __asm mov [PIX+YSTRIDE+1],D_WORD \
  194. /*[d]=c3 b3 c2 b2*/ \
  195. __asm movd D,mm1 \
  196. __asm mov [PIX+YSTRIDE*2+1],D_WORD \
  197. __asm shr D,16 \
  198. __asm mov [PIX+YSTRIDE3+1],D_WORD \
  199. __asm lea PIX,[PIX+YSTRIDE*4] \
  200. /*[d]=c5 b5 c4 b4*/ \
  201. __asm movd D,mm0 \
  202. __asm mov [PIX+1],D_WORD \
  203. __asm psrlq mm0,32 \
  204. __asm shr D,16 \
  205. __asm mov [PIX+YSTRIDE+1],D_WORD \
  206. /*[d]=c7 b7 c6 b6*/ \
  207. __asm movd D,mm0 \
  208. __asm mov [PIX+YSTRIDE*2+1],D_WORD \
  209. __asm shr D,16 \
  210. __asm mov [PIX+YSTRIDE3+1],D_WORD \
  211. } \
  212. while(0)
  213. # endif
  214. #endif