mmxfdct.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
  4. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  5. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  6. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  7. * *
  8. * THE Theora SOURCE CODE IS COPYRIGHT (C) 1999-2006 *
  9. * by the Xiph.Org Foundation http://www.xiph.org/ *
  10. * *
  11. ********************************************************************/
  12. /*MMX fDCT implementation for x86_32*/
  13. /*$Id: fdct_ses2.c 14579 2008-03-12 06:42:40Z xiphmont $*/
  14. #include "x86enc.h"
  15. #include "x86zigzag.h"
  16. #if defined(OC_X86_ASM)
  17. #define OC_FDCT_STAGE1_8x4 __asm{ \
  18. /*Stage 1:*/ \
  19. /*mm0=t7'=t0-t7*/ \
  20. __asm psubw mm0,mm7 \
  21. __asm paddw mm7,mm7 \
  22. /*mm1=t6'=t1-t6*/ \
  23. __asm psubw mm1, mm6 \
  24. __asm paddw mm6,mm6 \
  25. /*mm2=t5'=t2-t5*/ \
  26. __asm psubw mm2,mm5 \
  27. __asm paddw mm5,mm5 \
  28. /*mm3=t4'=t3-t4*/ \
  29. __asm psubw mm3,mm4 \
  30. __asm paddw mm4,mm4 \
  31. /*mm7=t0'=t0+t7*/ \
  32. __asm paddw mm7,mm0 \
  33. /*mm6=t1'=t1+t6*/ \
  34. __asm paddw mm6,mm1 \
  35. /*mm5=t2'=t2+t5*/ \
  36. __asm paddw mm5,mm2 \
  37. /*mm4=t3'=t3+t4*/ \
  38. __asm paddw mm4,mm3\
  39. }
  40. #define OC_FDCT8x4(_r0,_r1,_r2,_r3,_r4,_r5,_r6,_r7) __asm{ \
  41. /*Stage 2:*/ \
  42. /*mm7=t3''=t0'-t3'*/ \
  43. __asm psubw mm7,mm4 \
  44. __asm paddw mm4,mm4 \
  45. /*mm6=t2''=t1'-t2'*/ \
  46. __asm psubw mm6,mm5 \
  47. __asm movq [Y+_r6],mm7 \
  48. __asm paddw mm5,mm5 \
  49. /*mm1=t5''=t6'-t5'*/ \
  50. __asm psubw mm1,mm2 \
  51. __asm movq [Y+_r2],mm6 \
  52. /*mm4=t0''=t0'+t3'*/ \
  53. __asm paddw mm4,mm7 \
  54. __asm paddw mm2,mm2 \
  55. /*mm5=t1''=t1'+t2'*/ \
  56. __asm movq [Y+_r0],mm4 \
  57. __asm paddw mm5,mm6 \
  58. /*mm2=t6''=t6'+t5'*/ \
  59. __asm paddw mm2,mm1 \
  60. __asm movq [Y+_r4],mm5 \
  61. /*mm0=t7', mm1=t5'', mm2=t6'', mm3=t4'.*/ \
  62. /*mm4, mm5, mm6, mm7 are free.*/ \
  63. /*Stage 3:*/ \
  64. /*mm6={2}x4, mm7={27146,0xB500>>1}x2*/ \
  65. __asm mov A,0x5A806A0A \
  66. __asm pcmpeqb mm6,mm6 \
  67. __asm movd mm7,A \
  68. __asm psrlw mm6,15 \
  69. __asm punpckldq mm7,mm7 \
  70. __asm paddw mm6,mm6 \
  71. /*mm0=0, m2={-1}x4 \
  72. mm5:mm4=t5''*27146+0xB500*/ \
  73. __asm movq mm4,mm1 \
  74. __asm movq mm5,mm1 \
  75. __asm punpcklwd mm4,mm6 \
  76. __asm movq [Y+_r3],mm2 \
  77. __asm pmaddwd mm4,mm7 \
  78. __asm movq [Y+_r7],mm0 \
  79. __asm punpckhwd mm5,mm6 \
  80. __asm pxor mm0,mm0 \
  81. __asm pmaddwd mm5,mm7 \
  82. __asm pcmpeqb mm2,mm2 \
  83. /*mm2=t6'', mm1=t5''+(t5''!=0) \
  84. mm4=(t5''*27146+0xB500>>16)*/ \
  85. __asm pcmpeqw mm0,mm1 \
  86. __asm psrad mm4,16 \
  87. __asm psubw mm0,mm2 \
  88. __asm movq mm2, [Y+_r3] \
  89. __asm psrad mm5,16 \
  90. __asm paddw mm1,mm0 \
  91. __asm packssdw mm4,mm5 \
  92. /*mm4=s=(t5''*27146+0xB500>>16)+t5''+(t5''!=0)>>1*/ \
  93. __asm paddw mm4,mm1 \
  94. __asm movq mm0, [Y+_r7] \
  95. __asm psraw mm4,1 \
  96. __asm movq mm1,mm3 \
  97. /*mm3=t4''=t4'+s*/ \
  98. __asm paddw mm3,mm4 \
  99. /*mm1=t5'''=t4'-s*/ \
  100. __asm psubw mm1,mm4 \
  101. /*mm1=0, mm3={-1}x4 \
  102. mm5:mm4=t6''*27146+0xB500*/ \
  103. __asm movq mm4,mm2 \
  104. __asm movq mm5,mm2 \
  105. __asm punpcklwd mm4,mm6 \
  106. __asm movq [Y+_r5],mm1 \
  107. __asm pmaddwd mm4,mm7 \
  108. __asm movq [Y+_r1],mm3 \
  109. __asm punpckhwd mm5,mm6 \
  110. __asm pxor mm1,mm1 \
  111. __asm pmaddwd mm5,mm7 \
  112. __asm pcmpeqb mm3,mm3 \
  113. /*mm2=t6''+(t6''!=0), mm4=(t6''*27146+0xB500>>16)*/ \
  114. __asm psrad mm4,16 \
  115. __asm pcmpeqw mm1,mm2 \
  116. __asm psrad mm5,16 \
  117. __asm psubw mm1,mm3 \
  118. __asm packssdw mm4,mm5 \
  119. __asm paddw mm2,mm1 \
  120. /*mm1=t1'' \
  121. mm4=s=(t6''*27146+0xB500>>16)+t6''+(t6''!=0)>>1*/ \
  122. __asm paddw mm4,mm2 \
  123. __asm movq mm1,[Y+_r4] \
  124. __asm psraw mm4,1 \
  125. __asm movq mm2,mm0 \
  126. /*mm7={54491-0x7FFF,0x7FFF}x2 \
  127. mm0=t7''=t7'+s*/ \
  128. __asm paddw mm0,mm4 \
  129. /*mm2=t6'''=t7'-s*/ \
  130. __asm psubw mm2,mm4 \
  131. /*Stage 4:*/ \
  132. /*mm0=0, mm2=t0'' \
  133. mm5:mm4=t1''*27146+0xB500*/ \
  134. __asm movq mm4,mm1 \
  135. __asm movq mm5,mm1 \
  136. __asm punpcklwd mm4,mm6 \
  137. __asm movq [Y+_r3],mm2 \
  138. __asm pmaddwd mm4,mm7 \
  139. __asm movq mm2,[Y+_r0] \
  140. __asm punpckhwd mm5,mm6 \
  141. __asm movq [Y+_r7],mm0 \
  142. __asm pmaddwd mm5,mm7 \
  143. __asm pxor mm0,mm0 \
  144. /*mm7={27146,0x4000>>1}x2 \
  145. mm0=s=(t1''*27146+0xB500>>16)+t1''+(t1''!=0)*/ \
  146. __asm psrad mm4,16 \
  147. __asm mov A,0x20006A0A \
  148. __asm pcmpeqw mm0,mm1 \
  149. __asm movd mm7,A \
  150. __asm psrad mm5,16 \
  151. __asm psubw mm0,mm3 \
  152. __asm packssdw mm4,mm5 \
  153. __asm paddw mm0,mm1 \
  154. __asm punpckldq mm7,mm7 \
  155. __asm paddw mm0,mm4 \
  156. /*mm6={0x00000E3D}x2 \
  157. mm1=-(t0''==0), mm5:mm4=t0''*27146+0x4000*/ \
  158. __asm movq mm4,mm2 \
  159. __asm movq mm5,mm2 \
  160. __asm punpcklwd mm4,mm6 \
  161. __asm mov A,0x0E3D \
  162. __asm pmaddwd mm4,mm7 \
  163. __asm punpckhwd mm5,mm6 \
  164. __asm movd mm6,A \
  165. __asm pmaddwd mm5,mm7 \
  166. __asm pxor mm1,mm1 \
  167. __asm punpckldq mm6,mm6 \
  168. __asm pcmpeqw mm1,mm2 \
  169. /*mm4=r=(t0''*27146+0x4000>>16)+t0''+(t0''!=0)*/ \
  170. __asm psrad mm4,16 \
  171. __asm psubw mm1,mm3 \
  172. __asm psrad mm5,16 \
  173. __asm paddw mm2,mm1 \
  174. __asm packssdw mm4,mm5 \
  175. __asm movq mm1,[Y+_r5] \
  176. __asm paddw mm4,mm2 \
  177. /*mm2=t6'', mm0=_y[0]=u=r+s>>1 \
  178. The naive implementation could cause overflow, so we use \
  179. u=(r&s)+((r^s)>>1).*/ \
  180. __asm movq mm2,[Y+_r3] \
  181. __asm movq mm7,mm0 \
  182. __asm pxor mm0,mm4 \
  183. __asm pand mm7,mm4 \
  184. __asm psraw mm0,1 \
  185. __asm mov A,0x7FFF54DC \
  186. __asm paddw mm0,mm7 \
  187. __asm movd mm7,A \
  188. /*mm7={54491-0x7FFF,0x7FFF}x2 \
  189. mm4=_y[4]=v=r-u*/ \
  190. __asm psubw mm4,mm0 \
  191. __asm punpckldq mm7,mm7 \
  192. __asm movq [Y+_r4],mm4 \
  193. /*mm0=0, mm7={36410}x4 \
  194. mm1=(t5'''!=0), mm5:mm4=54491*t5'''+0x0E3D*/ \
  195. __asm movq mm4,mm1 \
  196. __asm movq mm5,mm1 \
  197. __asm punpcklwd mm4,mm1 \
  198. __asm mov A,0x8E3A8E3A \
  199. __asm pmaddwd mm4,mm7 \
  200. __asm movq [Y+_r0],mm0 \
  201. __asm punpckhwd mm5,mm1 \
  202. __asm pxor mm0,mm0 \
  203. __asm pmaddwd mm5,mm7 \
  204. __asm pcmpeqw mm1,mm0 \
  205. __asm movd mm7,A \
  206. __asm psubw mm1,mm3 \
  207. __asm punpckldq mm7,mm7 \
  208. __asm paddd mm4,mm6 \
  209. __asm paddd mm5,mm6 \
  210. /*mm0=0 \
  211. mm3:mm1=36410*t6'''+((t5'''!=0)<<16)*/ \
  212. __asm movq mm6,mm2 \
  213. __asm movq mm3,mm2 \
  214. __asm pmulhw mm6,mm7 \
  215. __asm paddw mm1,mm2 \
  216. __asm pmullw mm3,mm7 \
  217. __asm pxor mm0,mm0 \
  218. __asm paddw mm6,mm1 \
  219. __asm movq mm1,mm3 \
  220. __asm punpckhwd mm3,mm6 \
  221. __asm punpcklwd mm1,mm6 \
  222. /*mm3={-1}x4, mm6={1}x4 \
  223. mm4=_y[5]=u=(54491*t5'''+36410*t6'''+0x0E3D>>16)+(t5'''!=0)*/ \
  224. __asm paddd mm5,mm3 \
  225. __asm paddd mm4,mm1 \
  226. __asm psrad mm5,16 \
  227. __asm pxor mm6,mm6 \
  228. __asm psrad mm4,16 \
  229. __asm pcmpeqb mm3,mm3 \
  230. __asm packssdw mm4,mm5 \
  231. __asm psubw mm6,mm3 \
  232. /*mm1=t7'', mm7={26568,0x3400}x2 \
  233. mm2=s=t6'''-(36410*u>>16)*/ \
  234. __asm movq mm1,mm4 \
  235. __asm mov A,0x340067C8 \
  236. __asm pmulhw mm4,mm7 \
  237. __asm movd mm7,A \
  238. __asm movq [Y+_r5],mm1 \
  239. __asm punpckldq mm7,mm7 \
  240. __asm paddw mm4,mm1 \
  241. __asm movq mm1,[Y+_r7] \
  242. __asm psubw mm2,mm4 \
  243. /*mm6={0x00007B1B}x2 \
  244. mm0=(s!=0), mm5:mm4=s*26568+0x3400*/ \
  245. __asm movq mm4,mm2 \
  246. __asm movq mm5,mm2 \
  247. __asm punpcklwd mm4,mm6 \
  248. __asm pcmpeqw mm0,mm2 \
  249. __asm pmaddwd mm4,mm7 \
  250. __asm mov A,0x7B1B \
  251. __asm punpckhwd mm5,mm6 \
  252. __asm movd mm6,A \
  253. __asm pmaddwd mm5,mm7 \
  254. __asm psubw mm0,mm3 \
  255. __asm punpckldq mm6,mm6 \
  256. /*mm7={64277-0x7FFF,0x7FFF}x2 \
  257. mm2=_y[3]=v=(s*26568+0x3400>>17)+s+(s!=0)*/ \
  258. __asm psrad mm4,17 \
  259. __asm paddw mm2,mm0 \
  260. __asm psrad mm5,17 \
  261. __asm mov A,0x7FFF7B16 \
  262. __asm packssdw mm4,mm5 \
  263. __asm movd mm7,A \
  264. __asm paddw mm2,mm4 \
  265. __asm punpckldq mm7,mm7 \
  266. /*mm0=0, mm7={12785}x4 \
  267. mm1=(t7''!=0), mm2=t4'', mm5:mm4=64277*t7''+0x7B1B*/ \
  268. __asm movq mm4,mm1 \
  269. __asm movq mm5,mm1 \
  270. __asm movq [Y+_r3],mm2 \
  271. __asm punpcklwd mm4,mm1 \
  272. __asm movq mm2,[Y+_r1] \
  273. __asm pmaddwd mm4,mm7 \
  274. __asm mov A,0x31F131F1 \
  275. __asm punpckhwd mm5,mm1 \
  276. __asm pxor mm0,mm0 \
  277. __asm pmaddwd mm5,mm7 \
  278. __asm pcmpeqw mm1,mm0 \
  279. __asm movd mm7,A \
  280. __asm psubw mm1,mm3 \
  281. __asm punpckldq mm7,mm7 \
  282. __asm paddd mm4,mm6 \
  283. __asm paddd mm5,mm6 \
  284. /*mm3:mm1=12785*t4'''+((t7''!=0)<<16)*/ \
  285. __asm movq mm6,mm2 \
  286. __asm movq mm3,mm2 \
  287. __asm pmulhw mm6,mm7 \
  288. __asm pmullw mm3,mm7 \
  289. __asm paddw mm6,mm1 \
  290. __asm movq mm1,mm3 \
  291. __asm punpckhwd mm3,mm6 \
  292. __asm punpcklwd mm1,mm6 \
  293. /*mm3={-1}x4, mm6={1}x4 \
  294. mm4=_y[1]=u=(12785*t4'''+64277*t7''+0x7B1B>>16)+(t7''!=0)*/ \
  295. __asm paddd mm5,mm3 \
  296. __asm paddd mm4,mm1 \
  297. __asm psrad mm5,16 \
  298. __asm pxor mm6,mm6 \
  299. __asm psrad mm4,16 \
  300. __asm pcmpeqb mm3,mm3 \
  301. __asm packssdw mm4,mm5 \
  302. __asm psubw mm6,mm3 \
  303. /*mm1=t3'', mm7={20539,0x3000}x2 \
  304. mm4=s=(12785*u>>16)-t4''*/ \
  305. __asm movq [Y+_r1],mm4 \
  306. __asm pmulhw mm4,mm7 \
  307. __asm mov A,0x3000503B \
  308. __asm movq mm1,[Y+_r6] \
  309. __asm movd mm7,A \
  310. __asm psubw mm4,mm2 \
  311. __asm punpckldq mm7,mm7 \
  312. /*mm6={0x00006CB7}x2 \
  313. mm0=(s!=0), mm5:mm4=s*20539+0x3000*/ \
  314. __asm movq mm5,mm4 \
  315. __asm movq mm2,mm4 \
  316. __asm punpcklwd mm4,mm6 \
  317. __asm pcmpeqw mm0,mm2 \
  318. __asm pmaddwd mm4,mm7 \
  319. __asm mov A,0x6CB7 \
  320. __asm punpckhwd mm5,mm6 \
  321. __asm movd mm6,A \
  322. __asm pmaddwd mm5,mm7 \
  323. __asm psubw mm0,mm3 \
  324. __asm punpckldq mm6,mm6 \
  325. /*mm7={60547-0x7FFF,0x7FFF}x2 \
  326. mm2=_y[7]=v=(s*20539+0x3000>>20)+s+(s!=0)*/ \
  327. __asm psrad mm4,20 \
  328. __asm paddw mm2,mm0 \
  329. __asm psrad mm5,20 \
  330. __asm mov A,0x7FFF6C84 \
  331. __asm packssdw mm4,mm5 \
  332. __asm movd mm7,A \
  333. __asm paddw mm2,mm4 \
  334. __asm punpckldq mm7,mm7 \
  335. /*mm0=0, mm7={25080}x4 \
  336. mm2=t2'', mm5:mm4=60547*t3''+0x6CB7*/ \
  337. __asm movq mm4,mm1 \
  338. __asm movq mm5,mm1 \
  339. __asm movq [Y+_r7],mm2 \
  340. __asm punpcklwd mm4,mm1 \
  341. __asm movq mm2,[Y+_r2] \
  342. __asm pmaddwd mm4,mm7 \
  343. __asm mov A,0x61F861F8 \
  344. __asm punpckhwd mm5,mm1 \
  345. __asm pxor mm0,mm0 \
  346. __asm pmaddwd mm5,mm7 \
  347. __asm movd mm7,A \
  348. __asm pcmpeqw mm1,mm0 \
  349. __asm psubw mm1,mm3 \
  350. __asm punpckldq mm7,mm7 \
  351. __asm paddd mm4,mm6 \
  352. __asm paddd mm5,mm6 \
  353. /*mm3:mm1=25080*t2''+((t3''!=0)<<16)*/ \
  354. __asm movq mm6,mm2 \
  355. __asm movq mm3,mm2 \
  356. __asm pmulhw mm6,mm7 \
  357. __asm pmullw mm3,mm7 \
  358. __asm paddw mm6,mm1 \
  359. __asm movq mm1,mm3 \
  360. __asm punpckhwd mm3,mm6 \
  361. __asm punpcklwd mm1,mm6 \
  362. /*mm1={-1}x4 \
  363. mm4=u=(25080*t2''+60547*t3''+0x6CB7>>16)+(t3''!=0)*/ \
  364. __asm paddd mm5,mm3 \
  365. __asm paddd mm4,mm1 \
  366. __asm psrad mm5,16 \
  367. __asm mov A,0x28005460 \
  368. __asm psrad mm4,16 \
  369. __asm pcmpeqb mm1,mm1 \
  370. __asm packssdw mm4,mm5 \
  371. /*mm5={1}x4, mm6=_y[2]=u, mm7={21600,0x2800}x2 \
  372. mm4=s=(25080*u>>16)-t2''*/ \
  373. __asm movq mm6,mm4 \
  374. __asm pmulhw mm4,mm7 \
  375. __asm pxor mm5,mm5 \
  376. __asm movd mm7,A \
  377. __asm psubw mm5,mm1 \
  378. __asm punpckldq mm7,mm7 \
  379. __asm psubw mm4,mm2 \
  380. /*mm2=s+(s!=0) \
  381. mm4:mm3=s*21600+0x2800*/ \
  382. __asm movq mm3,mm4 \
  383. __asm movq mm2,mm4 \
  384. __asm punpckhwd mm4,mm5 \
  385. __asm pcmpeqw mm0,mm2 \
  386. __asm pmaddwd mm4,mm7 \
  387. __asm psubw mm0,mm1 \
  388. __asm punpcklwd mm3,mm5 \
  389. __asm paddw mm2,mm0 \
  390. __asm pmaddwd mm3,mm7 \
  391. /*mm0=_y[4], mm1=_y[7], mm4=_y[0], mm5=_y[5] \
  392. mm3=_y[6]=v=(s*21600+0x2800>>18)+s+(s!=0)*/ \
  393. __asm movq mm0,[Y+_r4] \
  394. __asm psrad mm4,18 \
  395. __asm movq mm5,[Y+_r5] \
  396. __asm psrad mm3,18 \
  397. __asm movq mm1,[Y+_r7] \
  398. __asm packssdw mm3,mm4 \
  399. __asm movq mm4,[Y+_r0] \
  400. __asm paddw mm3,mm2 \
  401. }
  402. /*On input, mm4=_y[0], mm6=_y[2], mm0=_y[4], mm5=_y[5], mm3=_y[6], mm1=_y[7].
  403. On output, {_y[4],mm1,mm2,mm3} contains the transpose of _y[4...7] and
  404. {mm4,mm5,mm6,mm7} contains the transpose of _y[0...3].*/
  405. #define OC_TRANSPOSE8x4(_r0,_r1,_r2,_r3,_r4,_r5,_r6,_r7) __asm{ \
  406. /*First 4x4 transpose:*/ \
  407. /*mm0 = e3 e2 e1 e0 \
  408. mm5 = f3 f2 f1 f0 \
  409. mm3 = g3 g2 g1 g0 \
  410. mm1 = h3 h2 h1 h0*/ \
  411. __asm movq mm2,mm0 \
  412. __asm punpcklwd mm0,mm5 \
  413. __asm punpckhwd mm2,mm5 \
  414. __asm movq mm5,mm3 \
  415. __asm punpcklwd mm3,mm1 \
  416. __asm punpckhwd mm5,mm1 \
  417. /*mm0 = f1 e1 f0 e0 \
  418. mm2 = f3 e3 f2 e2 \
  419. mm3 = h1 g1 h0 g0 \
  420. mm5 = h3 g3 h2 g2*/ \
  421. __asm movq mm1,mm0 \
  422. __asm punpckldq mm0,mm3 \
  423. __asm movq [Y+_r4],mm0 \
  424. __asm punpckhdq mm1,mm3 \
  425. __asm movq mm0,[Y+_r1] \
  426. __asm movq mm3,mm2 \
  427. __asm punpckldq mm2,mm5 \
  428. __asm punpckhdq mm3,mm5 \
  429. __asm movq mm5,[Y+_r3] \
  430. /*_y[4] = h0 g0 f0 e0 \
  431. mm1 = h1 g1 f1 e1 \
  432. mm2 = h2 g2 f2 e2 \
  433. mm3 = h3 g3 f3 e3*/ \
  434. /*Second 4x4 transpose:*/ \
  435. /*mm4 = a3 a2 a1 a0 \
  436. mm0 = b3 b2 b1 b0 \
  437. mm6 = c3 c2 c1 c0 \
  438. mm5 = d3 d2 d1 d0*/ \
  439. __asm movq mm7,mm4 \
  440. __asm punpcklwd mm4,mm0 \
  441. __asm punpckhwd mm7,mm0 \
  442. __asm movq mm0,mm6 \
  443. __asm punpcklwd mm6,mm5 \
  444. __asm punpckhwd mm0,mm5 \
  445. /*mm4 = b1 a1 b0 a0 \
  446. mm7 = b3 a3 b2 a2 \
  447. mm6 = d1 c1 d0 c0 \
  448. mm0 = d3 c3 d2 c2*/ \
  449. __asm movq mm5,mm4 \
  450. __asm punpckldq mm4,mm6 \
  451. __asm punpckhdq mm5,mm6 \
  452. __asm movq mm6,mm7 \
  453. __asm punpckhdq mm7,mm0 \
  454. __asm punpckldq mm6,mm0 \
  455. /*mm4 = d0 c0 b0 a0 \
  456. mm5 = d1 c1 b1 a1 \
  457. mm6 = d2 c2 b2 a2 \
  458. mm7 = d3 c3 b3 a3*/ \
  459. }
  460. /*MMX implementation of the fDCT.*/
  461. void oc_enc_fdct8x8_mmxext(ogg_int16_t _y[64],const ogg_int16_t _x[64]){
  462. OC_ALIGN8(ogg_int16_t buf[64]);
  463. ogg_int16_t *bufp;
  464. bufp=buf;
  465. __asm{
  466. #define X edx
  467. #define Y eax
  468. #define A ecx
  469. #define BUF esi
  470. /*Add two extra bits of working precision to improve accuracy; any more and
  471. we could overflow.*/
  472. /*We also add biases to correct for some systematic error that remains in
  473. the full fDCT->iDCT round trip.*/
  474. mov X, _x
  475. mov Y, _y
  476. mov BUF, bufp
  477. movq mm0,[0x00+X]
  478. movq mm1,[0x10+X]
  479. movq mm2,[0x20+X]
  480. movq mm3,[0x30+X]
  481. pcmpeqb mm4,mm4
  482. pxor mm7,mm7
  483. movq mm5,mm0
  484. psllw mm0,2
  485. pcmpeqw mm5,mm7
  486. movq mm7,[0x70+X]
  487. psllw mm1,2
  488. psubw mm5,mm4
  489. psllw mm2,2
  490. mov A,1
  491. pslld mm5,16
  492. movd mm6,A
  493. psllq mm5,16
  494. mov A,0x10001
  495. psllw mm3,2
  496. movd mm4,A
  497. punpckhwd mm5,mm6
  498. psubw mm1,mm6
  499. movq mm6,[0x60+X]
  500. paddw mm0,mm5
  501. movq mm5,[0x50+X]
  502. paddw mm0,mm4
  503. movq mm4,[0x40+X]
  504. /*We inline stage1 of the transform here so we can get better instruction
  505. scheduling with the shifts.*/
  506. /*mm0=t7'=t0-t7*/
  507. psllw mm7,2
  508. psubw mm0,mm7
  509. psllw mm6,2
  510. paddw mm7,mm7
  511. /*mm1=t6'=t1-t6*/
  512. psllw mm5,2
  513. psubw mm1,mm6
  514. psllw mm4,2
  515. paddw mm6,mm6
  516. /*mm2=t5'=t2-t5*/
  517. psubw mm2,mm5
  518. paddw mm5,mm5
  519. /*mm3=t4'=t3-t4*/
  520. psubw mm3,mm4
  521. paddw mm4,mm4
  522. /*mm7=t0'=t0+t7*/
  523. paddw mm7,mm0
  524. /*mm6=t1'=t1+t6*/
  525. paddw mm6,mm1
  526. /*mm5=t2'=t2+t5*/
  527. paddw mm5,mm2
  528. /*mm4=t3'=t3+t4*/
  529. paddw mm4,mm3
  530. OC_FDCT8x4(0x00,0x10,0x20,0x30,0x40,0x50,0x60,0x70)
  531. OC_TRANSPOSE8x4(0x00,0x10,0x20,0x30,0x40,0x50,0x60,0x70)
  532. /*Swap out this 8x4 block for the next one.*/
  533. movq mm0,[0x08+X]
  534. movq [0x30+Y],mm7
  535. movq mm7,[0x78+X]
  536. movq [0x50+Y],mm1
  537. movq mm1,[0x18+X]
  538. movq [0x20+Y],mm6
  539. movq mm6,[0x68+X]
  540. movq [0x60+Y],mm2
  541. movq mm2,[0x28+X]
  542. movq [0x10+Y],mm5
  543. movq mm5,[0x58+X]
  544. movq [0x70+Y],mm3
  545. movq mm3,[0x38+X]
  546. /*And increase its working precision, too.*/
  547. psllw mm0,2
  548. movq [0x00+Y],mm4
  549. psllw mm7,2
  550. movq mm4,[0x48+X]
  551. /*We inline stage1 of the transform here so we can get better instruction
  552. scheduling with the shifts.*/
  553. /*mm0=t7'=t0-t7*/
  554. psubw mm0,mm7
  555. psllw mm1,2
  556. paddw mm7,mm7
  557. psllw mm6,2
  558. /*mm1=t6'=t1-t6*/
  559. psubw mm1,mm6
  560. psllw mm2,2
  561. paddw mm6,mm6
  562. psllw mm5,2
  563. /*mm2=t5'=t2-t5*/
  564. psubw mm2,mm5
  565. psllw mm3,2
  566. paddw mm5,mm5
  567. psllw mm4,2
  568. /*mm3=t4'=t3-t4*/
  569. psubw mm3,mm4
  570. paddw mm4,mm4
  571. /*mm7=t0'=t0+t7*/
  572. paddw mm7,mm0
  573. /*mm6=t1'=t1+t6*/
  574. paddw mm6,mm1
  575. /*mm5=t2'=t2+t5*/
  576. paddw mm5,mm2
  577. /*mm4=t3'=t3+t4*/
  578. paddw mm4,mm3
  579. OC_FDCT8x4(0x08,0x18,0x28,0x38,0x48,0x58,0x68,0x78)
  580. OC_TRANSPOSE8x4(0x08,0x18,0x28,0x38,0x48,0x58,0x68,0x78)
  581. /*Here the first 4x4 block of output from the last transpose is the second
  582. 4x4 block of input for the next transform.
  583. We have cleverly arranged that it already be in the appropriate place,
  584. so we only have to do half the stores and loads.*/
  585. movq mm0,[0x00+Y]
  586. movq [0x58+Y],mm1
  587. movq mm1,[0x10+Y]
  588. movq [0x68+Y],mm2
  589. movq mm2,[0x20+Y]
  590. movq [0x78+Y],mm3
  591. movq mm3,[0x30+Y]
  592. OC_FDCT_STAGE1_8x4
  593. OC_FDCT8x4(0x00,0x10,0x20,0x30,0x08,0x18,0x28,0x38)
  594. /*mm0={-2}x4*/
  595. pcmpeqw mm2,mm2
  596. paddw mm2,mm2
  597. /*Round and store the results (no transpose).*/
  598. movq mm7,[Y+0x10]
  599. psubw mm4,mm2
  600. psubw mm6,mm2
  601. psraw mm4,2
  602. psubw mm0,mm2
  603. movq [BUF+0x00],mm4
  604. movq mm4,[Y+0x30]
  605. psraw mm6,2
  606. psubw mm5,mm2
  607. movq [BUF+0x20],mm6
  608. psraw mm0,2
  609. psubw mm3,mm2
  610. movq [BUF+0x40],mm0
  611. psraw mm5,2
  612. psubw mm1,mm2
  613. movq [BUF+0x50],mm5
  614. psraw mm3,2
  615. psubw mm7,mm2
  616. movq [BUF+0x60],mm3
  617. psraw mm1,2
  618. psubw mm4,mm2
  619. movq [BUF+0x70],mm1
  620. psraw mm7,2
  621. movq [BUF+0x10],mm7
  622. psraw mm4,2
  623. movq [BUF+0x30],mm4
  624. /*Load the next block.*/
  625. movq mm0,[0x40+Y]
  626. movq mm7,[0x78+Y]
  627. movq mm1,[0x50+Y]
  628. movq mm6,[0x68+Y]
  629. movq mm2,[0x60+Y]
  630. movq mm5,[0x58+Y]
  631. movq mm3,[0x70+Y]
  632. movq mm4,[0x48+Y]
  633. OC_FDCT_STAGE1_8x4
  634. OC_FDCT8x4(0x40,0x50,0x60,0x70,0x48,0x58,0x68,0x78)
  635. /*mm0={-2}x4*/
  636. pcmpeqw mm2,mm2
  637. paddw mm2,mm2
  638. /*Round and store the results (no transpose).*/
  639. movq mm7,[Y+0x50]
  640. psubw mm4,mm2
  641. psubw mm6,mm2
  642. psraw mm4,2
  643. psubw mm0,mm2
  644. movq [BUF+0x08],mm4
  645. movq mm4,[Y+0x70]
  646. psraw mm6,2
  647. psubw mm5,mm2
  648. movq [BUF+0x28],mm6
  649. psraw mm0,2
  650. psubw mm3,mm2
  651. movq [BUF+0x48],mm0
  652. psraw mm5,2
  653. psubw mm1,mm2
  654. movq [BUF+0x58],mm5
  655. psraw mm3,2
  656. psubw mm7,mm2
  657. movq [BUF+0x68],mm3
  658. psraw mm1,2
  659. psubw mm4,mm2
  660. movq [BUF+0x78],mm1
  661. psraw mm7,2
  662. movq [BUF+0x18],mm7
  663. psraw mm4,2
  664. movq [BUF+0x38],mm4
  665. #define OC_ZZ_LOAD_ROW_LO(_row,_reg) \
  666. __asm movq _reg,[BUF+16*(_row)] \
  667. #define OC_ZZ_LOAD_ROW_HI(_row,_reg) \
  668. __asm movq _reg,[BUF+16*(_row)+8] \
  669. OC_TRANSPOSE_ZIG_ZAG_MMXEXT
  670. #undef OC_ZZ_LOAD_ROW_LO
  671. #undef OC_ZZ_LOAD_ROW_HI
  672. #undef X
  673. #undef Y
  674. #undef A
  675. #undef BUF
  676. }
  677. }
  678. #endif