mmxfrag.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
  4. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  5. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  6. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  7. * *
  8. * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
  9. * by the Xiph.Org Foundation and contributors http://www.xiph.org/ *
  10. * *
  11. ********************************************************************
  12. function:
  13. last mod: $Id: mmxfrag.c 16503 2009-08-22 18:14:02Z giles $
  14. ********************************************************************/
  15. /*MMX acceleration of fragment reconstruction for motion compensation.
  16. Originally written by Rudolf Marek.
  17. Additional optimization by Nils Pipenbrinck.
  18. Note: Loops are unrolled for best performance.
  19. The iteration each instruction belongs to is marked in the comments as #i.*/
  20. #include <stddef.h>
  21. #include "x86int.h"
  22. #include "mmxfrag.h"
  23. #if defined(OC_X86_ASM)
  24. /*Copies an 8x8 block of pixels from _src to _dst, assuming _ystride bytes
  25. between rows.*/
  26. void oc_frag_copy_mmx(unsigned char *_dst,
  27. const unsigned char *_src,int _ystride){
  28. OC_FRAG_COPY_MMX(_dst,_src,_ystride);
  29. }
  30. void oc_frag_recon_intra_mmx(unsigned char *_dst,int _ystride,
  31. const ogg_int16_t *_residue){
  32. __asm__ __volatile__(
  33. /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
  34. "pcmpeqw %%mm0,%%mm0\n\t"
  35. /*#0 Load low residue.*/
  36. "movq 0*8(%[residue]),%%mm1\n\t"
  37. /*#0 Load high residue.*/
  38. "movq 1*8(%[residue]),%%mm2\n\t"
  39. /*Set mm0 to 0x8000800080008000.*/
  40. "psllw $15,%%mm0\n\t"
  41. /*#1 Load low residue.*/
  42. "movq 2*8(%[residue]),%%mm3\n\t"
  43. /*#1 Load high residue.*/
  44. "movq 3*8(%[residue]),%%mm4\n\t"
  45. /*Set mm0 to 0x0080008000800080.*/
  46. "psrlw $8,%%mm0\n\t"
  47. /*#2 Load low residue.*/
  48. "movq 4*8(%[residue]),%%mm5\n\t"
  49. /*#2 Load high residue.*/
  50. "movq 5*8(%[residue]),%%mm6\n\t"
  51. /*#0 Bias low residue.*/
  52. "paddsw %%mm0,%%mm1\n\t"
  53. /*#0 Bias high residue.*/
  54. "paddsw %%mm0,%%mm2\n\t"
  55. /*#0 Pack to byte.*/
  56. "packuswb %%mm2,%%mm1\n\t"
  57. /*#1 Bias low residue.*/
  58. "paddsw %%mm0,%%mm3\n\t"
  59. /*#1 Bias high residue.*/
  60. "paddsw %%mm0,%%mm4\n\t"
  61. /*#1 Pack to byte.*/
  62. "packuswb %%mm4,%%mm3\n\t"
  63. /*#2 Bias low residue.*/
  64. "paddsw %%mm0,%%mm5\n\t"
  65. /*#2 Bias high residue.*/
  66. "paddsw %%mm0,%%mm6\n\t"
  67. /*#2 Pack to byte.*/
  68. "packuswb %%mm6,%%mm5\n\t"
  69. /*#0 Write row.*/
  70. "movq %%mm1,(%[dst])\n\t"
  71. /*#1 Write row.*/
  72. "movq %%mm3,(%[dst],%[ystride])\n\t"
  73. /*#2 Write row.*/
  74. "movq %%mm5,(%[dst],%[ystride],2)\n\t"
  75. /*#3 Load low residue.*/
  76. "movq 6*8(%[residue]),%%mm1\n\t"
  77. /*#3 Load high residue.*/
  78. "movq 7*8(%[residue]),%%mm2\n\t"
  79. /*#4 Load high residue.*/
  80. "movq 8*8(%[residue]),%%mm3\n\t"
  81. /*#4 Load high residue.*/
  82. "movq 9*8(%[residue]),%%mm4\n\t"
  83. /*#5 Load high residue.*/
  84. "movq 10*8(%[residue]),%%mm5\n\t"
  85. /*#5 Load high residue.*/
  86. "movq 11*8(%[residue]),%%mm6\n\t"
  87. /*#3 Bias low residue.*/
  88. "paddsw %%mm0,%%mm1\n\t"
  89. /*#3 Bias high residue.*/
  90. "paddsw %%mm0,%%mm2\n\t"
  91. /*#3 Pack to byte.*/
  92. "packuswb %%mm2,%%mm1\n\t"
  93. /*#4 Bias low residue.*/
  94. "paddsw %%mm0,%%mm3\n\t"
  95. /*#4 Bias high residue.*/
  96. "paddsw %%mm0,%%mm4\n\t"
  97. /*#4 Pack to byte.*/
  98. "packuswb %%mm4,%%mm3\n\t"
  99. /*#5 Bias low residue.*/
  100. "paddsw %%mm0,%%mm5\n\t"
  101. /*#5 Bias high residue.*/
  102. "paddsw %%mm0,%%mm6\n\t"
  103. /*#5 Pack to byte.*/
  104. "packuswb %%mm6,%%mm5\n\t"
  105. /*#3 Write row.*/
  106. "movq %%mm1,(%[dst],%[ystride3])\n\t"
  107. /*#4 Write row.*/
  108. "movq %%mm3,(%[dst4])\n\t"
  109. /*#5 Write row.*/
  110. "movq %%mm5,(%[dst4],%[ystride])\n\t"
  111. /*#6 Load low residue.*/
  112. "movq 12*8(%[residue]),%%mm1\n\t"
  113. /*#6 Load high residue.*/
  114. "movq 13*8(%[residue]),%%mm2\n\t"
  115. /*#7 Load low residue.*/
  116. "movq 14*8(%[residue]),%%mm3\n\t"
  117. /*#7 Load high residue.*/
  118. "movq 15*8(%[residue]),%%mm4\n\t"
  119. /*#6 Bias low residue.*/
  120. "paddsw %%mm0,%%mm1\n\t"
  121. /*#6 Bias high residue.*/
  122. "paddsw %%mm0,%%mm2\n\t"
  123. /*#6 Pack to byte.*/
  124. "packuswb %%mm2,%%mm1\n\t"
  125. /*#7 Bias low residue.*/
  126. "paddsw %%mm0,%%mm3\n\t"
  127. /*#7 Bias high residue.*/
  128. "paddsw %%mm0,%%mm4\n\t"
  129. /*#7 Pack to byte.*/
  130. "packuswb %%mm4,%%mm3\n\t"
  131. /*#6 Write row.*/
  132. "movq %%mm1,(%[dst4],%[ystride],2)\n\t"
  133. /*#7 Write row.*/
  134. "movq %%mm3,(%[dst4],%[ystride3])\n\t"
  135. :
  136. :[residue]"r"(_residue),
  137. [dst]"r"(_dst),
  138. [dst4]"r"(_dst+(_ystride<<2)),
  139. [ystride]"r"((ptrdiff_t)_ystride),
  140. [ystride3]"r"((ptrdiff_t)_ystride*3)
  141. :"memory"
  142. );
  143. }
  144. void oc_frag_recon_inter_mmx(unsigned char *_dst,const unsigned char *_src,
  145. int _ystride,const ogg_int16_t *_residue){
  146. int i;
  147. /*Zero mm0.*/
  148. __asm__ __volatile__("pxor %%mm0,%%mm0\n\t"::);
  149. for(i=4;i-->0;){
  150. __asm__ __volatile__(
  151. /*#0 Load source.*/
  152. "movq (%[src]),%%mm3\n\t"
  153. /*#1 Load source.*/
  154. "movq (%[src],%[ystride]),%%mm7\n\t"
  155. /*#0 Get copy of src.*/
  156. "movq %%mm3,%%mm4\n\t"
  157. /*#0 Expand high source.*/
  158. "punpckhbw %%mm0,%%mm4\n\t"
  159. /*#0 Expand low source.*/
  160. "punpcklbw %%mm0,%%mm3\n\t"
  161. /*#0 Add residue high.*/
  162. "paddsw 8(%[residue]),%%mm4\n\t"
  163. /*#1 Get copy of src.*/
  164. "movq %%mm7,%%mm2\n\t"
  165. /*#0 Add residue low.*/
  166. "paddsw (%[residue]), %%mm3\n\t"
  167. /*#1 Expand high source.*/
  168. "punpckhbw %%mm0,%%mm2\n\t"
  169. /*#0 Pack final row pixels.*/
  170. "packuswb %%mm4,%%mm3\n\t"
  171. /*#1 Expand low source.*/
  172. "punpcklbw %%mm0,%%mm7\n\t"
  173. /*#1 Add residue low.*/
  174. "paddsw 16(%[residue]),%%mm7\n\t"
  175. /*#1 Add residue high.*/
  176. "paddsw 24(%[residue]),%%mm2\n\t"
  177. /*Advance residue.*/
  178. "lea 32(%[residue]),%[residue]\n\t"
  179. /*#1 Pack final row pixels.*/
  180. "packuswb %%mm2,%%mm7\n\t"
  181. /*Advance src.*/
  182. "lea (%[src],%[ystride],2),%[src]\n\t"
  183. /*#0 Write row.*/
  184. "movq %%mm3,(%[dst])\n\t"
  185. /*#1 Write row.*/
  186. "movq %%mm7,(%[dst],%[ystride])\n\t"
  187. /*Advance dst.*/
  188. "lea (%[dst],%[ystride],2),%[dst]\n\t"
  189. :[residue]"+r"(_residue),[dst]"+r"(_dst),[src]"+r"(_src)
  190. :[ystride]"r"((ptrdiff_t)_ystride)
  191. :"memory"
  192. );
  193. }
  194. }
  195. void oc_frag_recon_inter2_mmx(unsigned char *_dst,const unsigned char *_src1,
  196. const unsigned char *_src2,int _ystride,const ogg_int16_t *_residue){
  197. int i;
  198. /*Zero mm7.*/
  199. __asm__ __volatile__("pxor %%mm7,%%mm7\n\t"::);
  200. for(i=4;i-->0;){
  201. __asm__ __volatile__(
  202. /*#0 Load src1.*/
  203. "movq (%[src1]),%%mm0\n\t"
  204. /*#0 Load src2.*/
  205. "movq (%[src2]),%%mm2\n\t"
  206. /*#0 Copy src1.*/
  207. "movq %%mm0,%%mm1\n\t"
  208. /*#0 Copy src2.*/
  209. "movq %%mm2,%%mm3\n\t"
  210. /*#1 Load src1.*/
  211. "movq (%[src1],%[ystride]),%%mm4\n\t"
  212. /*#0 Unpack lower src1.*/
  213. "punpcklbw %%mm7,%%mm0\n\t"
  214. /*#1 Load src2.*/
  215. "movq (%[src2],%[ystride]),%%mm5\n\t"
  216. /*#0 Unpack higher src1.*/
  217. "punpckhbw %%mm7,%%mm1\n\t"
  218. /*#0 Unpack lower src2.*/
  219. "punpcklbw %%mm7,%%mm2\n\t"
  220. /*#0 Unpack higher src2.*/
  221. "punpckhbw %%mm7,%%mm3\n\t"
  222. /*Advance src1 ptr.*/
  223. "lea (%[src1],%[ystride],2),%[src1]\n\t"
  224. /*Advance src2 ptr.*/
  225. "lea (%[src2],%[ystride],2),%[src2]\n\t"
  226. /*#0 Lower src1+src2.*/
  227. "paddsw %%mm2,%%mm0\n\t"
  228. /*#0 Higher src1+src2.*/
  229. "paddsw %%mm3,%%mm1\n\t"
  230. /*#1 Copy src1.*/
  231. "movq %%mm4,%%mm2\n\t"
  232. /*#0 Build lo average.*/
  233. "psraw $1,%%mm0\n\t"
  234. /*#1 Copy src2.*/
  235. "movq %%mm5,%%mm3\n\t"
  236. /*#1 Unpack lower src1.*/
  237. "punpcklbw %%mm7,%%mm4\n\t"
  238. /*#0 Build hi average.*/
  239. "psraw $1,%%mm1\n\t"
  240. /*#1 Unpack higher src1.*/
  241. "punpckhbw %%mm7,%%mm2\n\t"
  242. /*#0 low+=residue.*/
  243. "paddsw (%[residue]),%%mm0\n\t"
  244. /*#1 Unpack lower src2.*/
  245. "punpcklbw %%mm7,%%mm5\n\t"
  246. /*#0 high+=residue.*/
  247. "paddsw 8(%[residue]),%%mm1\n\t"
  248. /*#1 Unpack higher src2.*/
  249. "punpckhbw %%mm7,%%mm3\n\t"
  250. /*#1 Lower src1+src2.*/
  251. "paddsw %%mm4,%%mm5\n\t"
  252. /*#0 Pack and saturate.*/
  253. "packuswb %%mm1,%%mm0\n\t"
  254. /*#1 Higher src1+src2.*/
  255. "paddsw %%mm2,%%mm3\n\t"
  256. /*#0 Write row.*/
  257. "movq %%mm0,(%[dst])\n\t"
  258. /*#1 Build lo average.*/
  259. "psraw $1,%%mm5\n\t"
  260. /*#1 Build hi average.*/
  261. "psraw $1,%%mm3\n\t"
  262. /*#1 low+=residue.*/
  263. "paddsw 16(%[residue]),%%mm5\n\t"
  264. /*#1 high+=residue.*/
  265. "paddsw 24(%[residue]),%%mm3\n\t"
  266. /*#1 Pack and saturate.*/
  267. "packuswb %%mm3,%%mm5\n\t"
  268. /*#1 Write row ptr.*/
  269. "movq %%mm5,(%[dst],%[ystride])\n\t"
  270. /*Advance residue ptr.*/
  271. "add $32,%[residue]\n\t"
  272. /*Advance dest ptr.*/
  273. "lea (%[dst],%[ystride],2),%[dst]\n\t"
  274. :[dst]"+r"(_dst),[residue]"+r"(_residue),
  275. [src1]"+%r"(_src1),[src2]"+r"(_src2)
  276. :[ystride]"r"((ptrdiff_t)_ystride)
  277. :"memory"
  278. );
  279. }
  280. }
  281. void oc_restore_fpu_mmx(void){
  282. __asm__ __volatile__("emms\n\t");
  283. }
  284. #endif