mmxfrag.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
  4. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  5. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  6. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  7. * *
  8. * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
  9. * by the Xiph.Org Foundation and contributors http://www.xiph.org/ *
  10. * *
  11. ********************************************************************
  12. function:
  13. last mod: $Id$
  14. ********************************************************************/
  15. /*MMX acceleration of fragment reconstruction for motion compensation.
  16. Originally written by Rudolf Marek.
  17. Additional optimization by Nils Pipenbrinck.
  18. Note: Loops are unrolled for best performance.
  19. The iteration each instruction belongs to is marked in the comments as #i.*/
  20. #include <stddef.h>
  21. #include "x86int.h"
  22. #if defined(OC_X86_ASM)
  23. /*Copies an 8x8 block of pixels from _src to _dst, assuming _ystride bytes
  24. between rows.*/
  25. # define OC_FRAG_COPY_MMX(_dst,_src,_ystride) \
  26. do{ \
  27. const unsigned char *src; \
  28. unsigned char *dst; \
  29. src=(_src); \
  30. dst=(_dst); \
  31. __asm mov SRC,src \
  32. __asm mov DST,dst \
  33. __asm mov YSTRIDE,_ystride \
  34. /*src+0*ystride*/ \
  35. __asm movq mm0,[SRC] \
  36. /*src+1*ystride*/ \
  37. __asm movq mm1,[SRC+YSTRIDE] \
  38. /*ystride3=ystride*3*/ \
  39. __asm lea YSTRIDE3,[YSTRIDE+YSTRIDE*2] \
  40. /*src+2*ystride*/ \
  41. __asm movq mm2,[SRC+YSTRIDE*2] \
  42. /*src+3*ystride*/ \
  43. __asm movq mm3,[SRC+YSTRIDE3] \
  44. /*dst+0*ystride*/ \
  45. __asm movq [DST],mm0 \
  46. /*dst+1*ystride*/ \
  47. __asm movq [DST+YSTRIDE],mm1 \
  48. /*Pointer to next 4.*/ \
  49. __asm lea SRC,[SRC+YSTRIDE*4] \
  50. /*dst+2*ystride*/ \
  51. __asm movq [DST+YSTRIDE*2],mm2 \
  52. /*dst+3*ystride*/ \
  53. __asm movq [DST+YSTRIDE3],mm3 \
  54. /*Pointer to next 4.*/ \
  55. __asm lea DST,[DST+YSTRIDE*4] \
  56. /*src+0*ystride*/ \
  57. __asm movq mm0,[SRC] \
  58. /*src+1*ystride*/ \
  59. __asm movq mm1,[SRC+YSTRIDE] \
  60. /*src+2*ystride*/ \
  61. __asm movq mm2,[SRC+YSTRIDE*2] \
  62. /*src+3*ystride*/ \
  63. __asm movq mm3,[SRC+YSTRIDE3] \
  64. /*dst+0*ystride*/ \
  65. __asm movq [DST],mm0 \
  66. /*dst+1*ystride*/ \
  67. __asm movq [DST+YSTRIDE],mm1 \
  68. /*dst+2*ystride*/ \
  69. __asm movq [DST+YSTRIDE*2],mm2 \
  70. /*dst+3*ystride*/ \
  71. __asm movq [DST+YSTRIDE3],mm3 \
  72. } \
  73. while(0)
  74. /*Copies an 8x8 block of pixels from _src to _dst, assuming _ystride bytes
  75. between rows.*/
  76. void oc_frag_copy_mmx(unsigned char *_dst,
  77. const unsigned char *_src,int _ystride){
  78. #define SRC edx
  79. #define DST eax
  80. #define YSTRIDE ecx
  81. #define YSTRIDE3 esi
  82. OC_FRAG_COPY_MMX(_dst,_src,_ystride);
  83. #undef SRC
  84. #undef DST
  85. #undef YSTRIDE
  86. #undef YSTRIDE3
  87. }
  88. /*Copies the fragments specified by the lists of fragment indices from one
  89. frame to another.
  90. _dst_frame: The reference frame to copy to.
  91. _src_frame: The reference frame to copy from.
  92. _ystride: The row stride of the reference frames.
  93. _fragis: A pointer to a list of fragment indices.
  94. _nfragis: The number of fragment indices to copy.
  95. _frag_buf_offs: The offsets of fragments in the reference frames.*/
  96. void oc_frag_copy_list_mmx(unsigned char *_dst_frame,
  97. const unsigned char *_src_frame,int _ystride,
  98. const ptrdiff_t *_fragis,ptrdiff_t _nfragis,const ptrdiff_t *_frag_buf_offs){
  99. ptrdiff_t fragii;
  100. for(fragii=0;fragii<_nfragis;fragii++){
  101. ptrdiff_t frag_buf_off;
  102. frag_buf_off=_frag_buf_offs[_fragis[fragii]];
  103. #define SRC edx
  104. #define DST eax
  105. #define YSTRIDE ecx
  106. #define YSTRIDE3 edi
  107. OC_FRAG_COPY_MMX(_dst_frame+frag_buf_off,
  108. _src_frame+frag_buf_off,_ystride);
  109. #undef SRC
  110. #undef DST
  111. #undef YSTRIDE
  112. #undef YSTRIDE3
  113. }
  114. }
  115. void oc_frag_recon_intra_mmx(unsigned char *_dst,int _ystride,
  116. const ogg_int16_t *_residue){
  117. __asm{
  118. #define DST edx
  119. #define DST4 esi
  120. #define YSTRIDE eax
  121. #define YSTRIDE3 edi
  122. #define RESIDUE ecx
  123. mov DST,_dst
  124. mov YSTRIDE,_ystride
  125. mov RESIDUE,_residue
  126. lea DST4,[DST+YSTRIDE*4]
  127. lea YSTRIDE3,[YSTRIDE+YSTRIDE*2]
  128. /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
  129. pcmpeqw mm0,mm0
  130. /*#0 Load low residue.*/
  131. movq mm1,[0*8+RESIDUE]
  132. /*#0 Load high residue.*/
  133. movq mm2,[1*8+RESIDUE]
  134. /*Set mm0 to 0x8000800080008000.*/
  135. psllw mm0,15
  136. /*#1 Load low residue.*/
  137. movq mm3,[2*8+RESIDUE]
  138. /*#1 Load high residue.*/
  139. movq mm4,[3*8+RESIDUE]
  140. /*Set mm0 to 0x0080008000800080.*/
  141. psrlw mm0,8
  142. /*#2 Load low residue.*/
  143. movq mm5,[4*8+RESIDUE]
  144. /*#2 Load high residue.*/
  145. movq mm6,[5*8+RESIDUE]
  146. /*#0 Bias low residue.*/
  147. paddsw mm1,mm0
  148. /*#0 Bias high residue.*/
  149. paddsw mm2,mm0
  150. /*#0 Pack to byte.*/
  151. packuswb mm1,mm2
  152. /*#1 Bias low residue.*/
  153. paddsw mm3,mm0
  154. /*#1 Bias high residue.*/
  155. paddsw mm4,mm0
  156. /*#1 Pack to byte.*/
  157. packuswb mm3,mm4
  158. /*#2 Bias low residue.*/
  159. paddsw mm5,mm0
  160. /*#2 Bias high residue.*/
  161. paddsw mm6,mm0
  162. /*#2 Pack to byte.*/
  163. packuswb mm5,mm6
  164. /*#0 Write row.*/
  165. movq [DST],mm1
  166. /*#1 Write row.*/
  167. movq [DST+YSTRIDE],mm3
  168. /*#2 Write row.*/
  169. movq [DST+YSTRIDE*2],mm5
  170. /*#3 Load low residue.*/
  171. movq mm1,[6*8+RESIDUE]
  172. /*#3 Load high residue.*/
  173. movq mm2,[7*8+RESIDUE]
  174. /*#4 Load high residue.*/
  175. movq mm3,[8*8+RESIDUE]
  176. /*#4 Load high residue.*/
  177. movq mm4,[9*8+RESIDUE]
  178. /*#5 Load high residue.*/
  179. movq mm5,[10*8+RESIDUE]
  180. /*#5 Load high residue.*/
  181. movq mm6,[11*8+RESIDUE]
  182. /*#3 Bias low residue.*/
  183. paddsw mm1,mm0
  184. /*#3 Bias high residue.*/
  185. paddsw mm2,mm0
  186. /*#3 Pack to byte.*/
  187. packuswb mm1,mm2
  188. /*#4 Bias low residue.*/
  189. paddsw mm3,mm0
  190. /*#4 Bias high residue.*/
  191. paddsw mm4,mm0
  192. /*#4 Pack to byte.*/
  193. packuswb mm3,mm4
  194. /*#5 Bias low residue.*/
  195. paddsw mm5,mm0
  196. /*#5 Bias high residue.*/
  197. paddsw mm6,mm0
  198. /*#5 Pack to byte.*/
  199. packuswb mm5,mm6
  200. /*#3 Write row.*/
  201. movq [DST+YSTRIDE3],mm1
  202. /*#4 Write row.*/
  203. movq [DST4],mm3
  204. /*#5 Write row.*/
  205. movq [DST4+YSTRIDE],mm5
  206. /*#6 Load low residue.*/
  207. movq mm1,[12*8+RESIDUE]
  208. /*#6 Load high residue.*/
  209. movq mm2,[13*8+RESIDUE]
  210. /*#7 Load low residue.*/
  211. movq mm3,[14*8+RESIDUE]
  212. /*#7 Load high residue.*/
  213. movq mm4,[15*8+RESIDUE]
  214. /*#6 Bias low residue.*/
  215. paddsw mm1,mm0
  216. /*#6 Bias high residue.*/
  217. paddsw mm2,mm0
  218. /*#6 Pack to byte.*/
  219. packuswb mm1,mm2
  220. /*#7 Bias low residue.*/
  221. paddsw mm3,mm0
  222. /*#7 Bias high residue.*/
  223. paddsw mm4,mm0
  224. /*#7 Pack to byte.*/
  225. packuswb mm3,mm4
  226. /*#6 Write row.*/
  227. movq [DST4+YSTRIDE*2],mm1
  228. /*#7 Write row.*/
  229. movq [DST4+YSTRIDE3],mm3
  230. #undef DST
  231. #undef DST4
  232. #undef YSTRIDE
  233. #undef YSTRIDE3
  234. #undef RESIDUE
  235. }
  236. }
  237. void oc_frag_recon_inter_mmx(unsigned char *_dst,const unsigned char *_src,
  238. int _ystride,const ogg_int16_t *_residue){
  239. int i;
  240. /*Zero mm0.*/
  241. __asm pxor mm0,mm0;
  242. for(i=4;i-->0;){
  243. __asm{
  244. #define DST edx
  245. #define SRC ecx
  246. #define YSTRIDE edi
  247. #define RESIDUE eax
  248. mov DST,_dst
  249. mov SRC,_src
  250. mov YSTRIDE,_ystride
  251. mov RESIDUE,_residue
  252. /*#0 Load source.*/
  253. movq mm3,[SRC]
  254. /*#1 Load source.*/
  255. movq mm7,[SRC+YSTRIDE]
  256. /*#0 Get copy of src.*/
  257. movq mm4,mm3
  258. /*#0 Expand high source.*/
  259. punpckhbw mm4,mm0
  260. /*#0 Expand low source.*/
  261. punpcklbw mm3,mm0
  262. /*#0 Add residue high.*/
  263. paddsw mm4,[8+RESIDUE]
  264. /*#1 Get copy of src.*/
  265. movq mm2,mm7
  266. /*#0 Add residue low.*/
  267. paddsw mm3,[RESIDUE]
  268. /*#1 Expand high source.*/
  269. punpckhbw mm2,mm0
  270. /*#0 Pack final row pixels.*/
  271. packuswb mm3,mm4
  272. /*#1 Expand low source.*/
  273. punpcklbw mm7,mm0
  274. /*#1 Add residue low.*/
  275. paddsw mm7,[16+RESIDUE]
  276. /*#1 Add residue high.*/
  277. paddsw mm2,[24+RESIDUE]
  278. /*Advance residue.*/
  279. lea RESIDUE,[32+RESIDUE]
  280. /*#1 Pack final row pixels.*/
  281. packuswb mm7,mm2
  282. /*Advance src.*/
  283. lea SRC,[SRC+YSTRIDE*2]
  284. /*#0 Write row.*/
  285. movq [DST],mm3
  286. /*#1 Write row.*/
  287. movq [DST+YSTRIDE],mm7
  288. /*Advance dst.*/
  289. lea DST,[DST+YSTRIDE*2]
  290. mov _residue,RESIDUE
  291. mov _dst,DST
  292. mov _src,SRC
  293. #undef DST
  294. #undef SRC
  295. #undef YSTRIDE
  296. #undef RESIDUE
  297. }
  298. }
  299. }
  300. void oc_frag_recon_inter2_mmx(unsigned char *_dst,const unsigned char *_src1,
  301. const unsigned char *_src2,int _ystride,const ogg_int16_t *_residue){
  302. int i;
  303. /*Zero mm7.*/
  304. __asm pxor mm7,mm7;
  305. for(i=4;i-->0;){
  306. __asm{
  307. #define SRC1 ecx
  308. #define SRC2 edi
  309. #define YSTRIDE esi
  310. #define RESIDUE edx
  311. #define DST eax
  312. mov YSTRIDE,_ystride
  313. mov DST,_dst
  314. mov RESIDUE,_residue
  315. mov SRC1,_src1
  316. mov SRC2,_src2
  317. /*#0 Load src1.*/
  318. movq mm0,[SRC1]
  319. /*#0 Load src2.*/
  320. movq mm2,[SRC2]
  321. /*#0 Copy src1.*/
  322. movq mm1,mm0
  323. /*#0 Copy src2.*/
  324. movq mm3,mm2
  325. /*#1 Load src1.*/
  326. movq mm4,[SRC1+YSTRIDE]
  327. /*#0 Unpack lower src1.*/
  328. punpcklbw mm0,mm7
  329. /*#1 Load src2.*/
  330. movq mm5,[SRC2+YSTRIDE]
  331. /*#0 Unpack higher src1.*/
  332. punpckhbw mm1,mm7
  333. /*#0 Unpack lower src2.*/
  334. punpcklbw mm2,mm7
  335. /*#0 Unpack higher src2.*/
  336. punpckhbw mm3,mm7
  337. /*Advance src1 ptr.*/
  338. lea SRC1,[SRC1+YSTRIDE*2]
  339. /*Advance src2 ptr.*/
  340. lea SRC2,[SRC2+YSTRIDE*2]
  341. /*#0 Lower src1+src2.*/
  342. paddsw mm0,mm2
  343. /*#0 Higher src1+src2.*/
  344. paddsw mm1,mm3
  345. /*#1 Copy src1.*/
  346. movq mm2,mm4
  347. /*#0 Build lo average.*/
  348. psraw mm0,1
  349. /*#1 Copy src2.*/
  350. movq mm3,mm5
  351. /*#1 Unpack lower src1.*/
  352. punpcklbw mm4,mm7
  353. /*#0 Build hi average.*/
  354. psraw mm1,1
  355. /*#1 Unpack higher src1.*/
  356. punpckhbw mm2,mm7
  357. /*#0 low+=residue.*/
  358. paddsw mm0,[RESIDUE]
  359. /*#1 Unpack lower src2.*/
  360. punpcklbw mm5,mm7
  361. /*#0 high+=residue.*/
  362. paddsw mm1,[8+RESIDUE]
  363. /*#1 Unpack higher src2.*/
  364. punpckhbw mm3,mm7
  365. /*#1 Lower src1+src2.*/
  366. paddsw mm5,mm4
  367. /*#0 Pack and saturate.*/
  368. packuswb mm0,mm1
  369. /*#1 Higher src1+src2.*/
  370. paddsw mm3,mm2
  371. /*#0 Write row.*/
  372. movq [DST],mm0
  373. /*#1 Build lo average.*/
  374. psraw mm5,1
  375. /*#1 Build hi average.*/
  376. psraw mm3,1
  377. /*#1 low+=residue.*/
  378. paddsw mm5,[16+RESIDUE]
  379. /*#1 high+=residue.*/
  380. paddsw mm3,[24+RESIDUE]
  381. /*#1 Pack and saturate.*/
  382. packuswb mm5,mm3
  383. /*#1 Write row ptr.*/
  384. movq [DST+YSTRIDE],mm5
  385. /*Advance residue ptr.*/
  386. add RESIDUE,32
  387. /*Advance dest ptr.*/
  388. lea DST,[DST+YSTRIDE*2]
  389. mov _dst,DST
  390. mov _residue,RESIDUE
  391. mov _src1,SRC1
  392. mov _src2,SRC2
  393. #undef SRC1
  394. #undef SRC2
  395. #undef YSTRIDE
  396. #undef RESIDUE
  397. #undef DST
  398. }
  399. }
  400. }
  401. void oc_restore_fpu_mmx(void){
  402. __asm emms;
  403. }
  404. #endif