xor_32.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. #ifndef _ASM_X86_XOR_32_H
  2. #define _ASM_X86_XOR_32_H
  3. /*
  4. * Optimized RAID-5 checksumming functions for MMX and SSE.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2, or (at your option)
  9. * any later version.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * (for example /usr/src/linux/COPYING); if not, write to the Free
  13. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  14. */
  15. /*
  16. * High-speed RAID5 checksumming functions utilizing MMX instructions.
  17. * Copyright (C) 1998 Ingo Molnar.
  18. */
  19. #define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
  20. #define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
  21. #define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
  22. #define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
  23. #define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
  24. #define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
  25. #include <asm/i387.h>
  26. static void
  27. xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
  28. {
  29. unsigned long lines = bytes >> 7;
  30. kernel_fpu_begin();
  31. asm volatile(
  32. #undef BLOCK
  33. #define BLOCK(i) \
  34. LD(i, 0) \
  35. LD(i + 1, 1) \
  36. LD(i + 2, 2) \
  37. LD(i + 3, 3) \
  38. XO1(i, 0) \
  39. ST(i, 0) \
  40. XO1(i+1, 1) \
  41. ST(i+1, 1) \
  42. XO1(i + 2, 2) \
  43. ST(i + 2, 2) \
  44. XO1(i + 3, 3) \
  45. ST(i + 3, 3)
  46. " .align 32 ;\n"
  47. " 1: ;\n"
  48. BLOCK(0)
  49. BLOCK(4)
  50. BLOCK(8)
  51. BLOCK(12)
  52. " addl $128, %1 ;\n"
  53. " addl $128, %2 ;\n"
  54. " decl %0 ;\n"
  55. " jnz 1b ;\n"
  56. : "+r" (lines),
  57. "+r" (p1), "+r" (p2)
  58. :
  59. : "memory");
  60. kernel_fpu_end();
  61. }
  62. static void
  63. xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  64. unsigned long *p3)
  65. {
  66. unsigned long lines = bytes >> 7;
  67. kernel_fpu_begin();
  68. asm volatile(
  69. #undef BLOCK
  70. #define BLOCK(i) \
  71. LD(i, 0) \
  72. LD(i + 1, 1) \
  73. LD(i + 2, 2) \
  74. LD(i + 3, 3) \
  75. XO1(i, 0) \
  76. XO1(i + 1, 1) \
  77. XO1(i + 2, 2) \
  78. XO1(i + 3, 3) \
  79. XO2(i, 0) \
  80. ST(i, 0) \
  81. XO2(i + 1, 1) \
  82. ST(i + 1, 1) \
  83. XO2(i + 2, 2) \
  84. ST(i + 2, 2) \
  85. XO2(i + 3, 3) \
  86. ST(i + 3, 3)
  87. " .align 32 ;\n"
  88. " 1: ;\n"
  89. BLOCK(0)
  90. BLOCK(4)
  91. BLOCK(8)
  92. BLOCK(12)
  93. " addl $128, %1 ;\n"
  94. " addl $128, %2 ;\n"
  95. " addl $128, %3 ;\n"
  96. " decl %0 ;\n"
  97. " jnz 1b ;\n"
  98. : "+r" (lines),
  99. "+r" (p1), "+r" (p2), "+r" (p3)
  100. :
  101. : "memory");
  102. kernel_fpu_end();
  103. }
  104. static void
  105. xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  106. unsigned long *p3, unsigned long *p4)
  107. {
  108. unsigned long lines = bytes >> 7;
  109. kernel_fpu_begin();
  110. asm volatile(
  111. #undef BLOCK
  112. #define BLOCK(i) \
  113. LD(i, 0) \
  114. LD(i + 1, 1) \
  115. LD(i + 2, 2) \
  116. LD(i + 3, 3) \
  117. XO1(i, 0) \
  118. XO1(i + 1, 1) \
  119. XO1(i + 2, 2) \
  120. XO1(i + 3, 3) \
  121. XO2(i, 0) \
  122. XO2(i + 1, 1) \
  123. XO2(i + 2, 2) \
  124. XO2(i + 3, 3) \
  125. XO3(i, 0) \
  126. ST(i, 0) \
  127. XO3(i + 1, 1) \
  128. ST(i + 1, 1) \
  129. XO3(i + 2, 2) \
  130. ST(i + 2, 2) \
  131. XO3(i + 3, 3) \
  132. ST(i + 3, 3)
  133. " .align 32 ;\n"
  134. " 1: ;\n"
  135. BLOCK(0)
  136. BLOCK(4)
  137. BLOCK(8)
  138. BLOCK(12)
  139. " addl $128, %1 ;\n"
  140. " addl $128, %2 ;\n"
  141. " addl $128, %3 ;\n"
  142. " addl $128, %4 ;\n"
  143. " decl %0 ;\n"
  144. " jnz 1b ;\n"
  145. : "+r" (lines),
  146. "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
  147. :
  148. : "memory");
  149. kernel_fpu_end();
  150. }
  151. static void
  152. xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  153. unsigned long *p3, unsigned long *p4, unsigned long *p5)
  154. {
  155. unsigned long lines = bytes >> 7;
  156. kernel_fpu_begin();
  157. /* Make sure GCC forgets anything it knows about p4 or p5,
  158. such that it won't pass to the asm volatile below a
  159. register that is shared with any other variable. That's
  160. because we modify p4 and p5 there, but we can't mark them
  161. as read/write, otherwise we'd overflow the 10-asm-operands
  162. limit of GCC < 3.1. */
  163. asm("" : "+r" (p4), "+r" (p5));
  164. asm volatile(
  165. #undef BLOCK
  166. #define BLOCK(i) \
  167. LD(i, 0) \
  168. LD(i + 1, 1) \
  169. LD(i + 2, 2) \
  170. LD(i + 3, 3) \
  171. XO1(i, 0) \
  172. XO1(i + 1, 1) \
  173. XO1(i + 2, 2) \
  174. XO1(i + 3, 3) \
  175. XO2(i, 0) \
  176. XO2(i + 1, 1) \
  177. XO2(i + 2, 2) \
  178. XO2(i + 3, 3) \
  179. XO3(i, 0) \
  180. XO3(i + 1, 1) \
  181. XO3(i + 2, 2) \
  182. XO3(i + 3, 3) \
  183. XO4(i, 0) \
  184. ST(i, 0) \
  185. XO4(i + 1, 1) \
  186. ST(i + 1, 1) \
  187. XO4(i + 2, 2) \
  188. ST(i + 2, 2) \
  189. XO4(i + 3, 3) \
  190. ST(i + 3, 3)
  191. " .align 32 ;\n"
  192. " 1: ;\n"
  193. BLOCK(0)
  194. BLOCK(4)
  195. BLOCK(8)
  196. BLOCK(12)
  197. " addl $128, %1 ;\n"
  198. " addl $128, %2 ;\n"
  199. " addl $128, %3 ;\n"
  200. " addl $128, %4 ;\n"
  201. " addl $128, %5 ;\n"
  202. " decl %0 ;\n"
  203. " jnz 1b ;\n"
  204. : "+r" (lines),
  205. "+r" (p1), "+r" (p2), "+r" (p3)
  206. : "r" (p4), "r" (p5)
  207. : "memory");
  208. /* p4 and p5 were modified, and now the variables are dead.
  209. Clobber them just to be sure nobody does something stupid
  210. like assuming they have some legal value. */
  211. asm("" : "=r" (p4), "=r" (p5));
  212. kernel_fpu_end();
  213. }
  214. #undef LD
  215. #undef XO1
  216. #undef XO2
  217. #undef XO3
  218. #undef XO4
  219. #undef ST
  220. #undef BLOCK
  221. static void
  222. xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
  223. {
  224. unsigned long lines = bytes >> 6;
  225. kernel_fpu_begin();
  226. asm volatile(
  227. " .align 32 ;\n"
  228. " 1: ;\n"
  229. " movq (%1), %%mm0 ;\n"
  230. " movq 8(%1), %%mm1 ;\n"
  231. " pxor (%2), %%mm0 ;\n"
  232. " movq 16(%1), %%mm2 ;\n"
  233. " movq %%mm0, (%1) ;\n"
  234. " pxor 8(%2), %%mm1 ;\n"
  235. " movq 24(%1), %%mm3 ;\n"
  236. " movq %%mm1, 8(%1) ;\n"
  237. " pxor 16(%2), %%mm2 ;\n"
  238. " movq 32(%1), %%mm4 ;\n"
  239. " movq %%mm2, 16(%1) ;\n"
  240. " pxor 24(%2), %%mm3 ;\n"
  241. " movq 40(%1), %%mm5 ;\n"
  242. " movq %%mm3, 24(%1) ;\n"
  243. " pxor 32(%2), %%mm4 ;\n"
  244. " movq 48(%1), %%mm6 ;\n"
  245. " movq %%mm4, 32(%1) ;\n"
  246. " pxor 40(%2), %%mm5 ;\n"
  247. " movq 56(%1), %%mm7 ;\n"
  248. " movq %%mm5, 40(%1) ;\n"
  249. " pxor 48(%2), %%mm6 ;\n"
  250. " pxor 56(%2), %%mm7 ;\n"
  251. " movq %%mm6, 48(%1) ;\n"
  252. " movq %%mm7, 56(%1) ;\n"
  253. " addl $64, %1 ;\n"
  254. " addl $64, %2 ;\n"
  255. " decl %0 ;\n"
  256. " jnz 1b ;\n"
  257. : "+r" (lines),
  258. "+r" (p1), "+r" (p2)
  259. :
  260. : "memory");
  261. kernel_fpu_end();
  262. }
  263. static void
  264. xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  265. unsigned long *p3)
  266. {
  267. unsigned long lines = bytes >> 6;
  268. kernel_fpu_begin();
  269. asm volatile(
  270. " .align 32,0x90 ;\n"
  271. " 1: ;\n"
  272. " movq (%1), %%mm0 ;\n"
  273. " movq 8(%1), %%mm1 ;\n"
  274. " pxor (%2), %%mm0 ;\n"
  275. " movq 16(%1), %%mm2 ;\n"
  276. " pxor 8(%2), %%mm1 ;\n"
  277. " pxor (%3), %%mm0 ;\n"
  278. " pxor 16(%2), %%mm2 ;\n"
  279. " movq %%mm0, (%1) ;\n"
  280. " pxor 8(%3), %%mm1 ;\n"
  281. " pxor 16(%3), %%mm2 ;\n"
  282. " movq 24(%1), %%mm3 ;\n"
  283. " movq %%mm1, 8(%1) ;\n"
  284. " movq 32(%1), %%mm4 ;\n"
  285. " movq 40(%1), %%mm5 ;\n"
  286. " pxor 24(%2), %%mm3 ;\n"
  287. " movq %%mm2, 16(%1) ;\n"
  288. " pxor 32(%2), %%mm4 ;\n"
  289. " pxor 24(%3), %%mm3 ;\n"
  290. " pxor 40(%2), %%mm5 ;\n"
  291. " movq %%mm3, 24(%1) ;\n"
  292. " pxor 32(%3), %%mm4 ;\n"
  293. " pxor 40(%3), %%mm5 ;\n"
  294. " movq 48(%1), %%mm6 ;\n"
  295. " movq %%mm4, 32(%1) ;\n"
  296. " movq 56(%1), %%mm7 ;\n"
  297. " pxor 48(%2), %%mm6 ;\n"
  298. " movq %%mm5, 40(%1) ;\n"
  299. " pxor 56(%2), %%mm7 ;\n"
  300. " pxor 48(%3), %%mm6 ;\n"
  301. " pxor 56(%3), %%mm7 ;\n"
  302. " movq %%mm6, 48(%1) ;\n"
  303. " movq %%mm7, 56(%1) ;\n"
  304. " addl $64, %1 ;\n"
  305. " addl $64, %2 ;\n"
  306. " addl $64, %3 ;\n"
  307. " decl %0 ;\n"
  308. " jnz 1b ;\n"
  309. : "+r" (lines),
  310. "+r" (p1), "+r" (p2), "+r" (p3)
  311. :
  312. : "memory" );
  313. kernel_fpu_end();
  314. }
  315. static void
  316. xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  317. unsigned long *p3, unsigned long *p4)
  318. {
  319. unsigned long lines = bytes >> 6;
  320. kernel_fpu_begin();
  321. asm volatile(
  322. " .align 32,0x90 ;\n"
  323. " 1: ;\n"
  324. " movq (%1), %%mm0 ;\n"
  325. " movq 8(%1), %%mm1 ;\n"
  326. " pxor (%2), %%mm0 ;\n"
  327. " movq 16(%1), %%mm2 ;\n"
  328. " pxor 8(%2), %%mm1 ;\n"
  329. " pxor (%3), %%mm0 ;\n"
  330. " pxor 16(%2), %%mm2 ;\n"
  331. " pxor 8(%3), %%mm1 ;\n"
  332. " pxor (%4), %%mm0 ;\n"
  333. " movq 24(%1), %%mm3 ;\n"
  334. " pxor 16(%3), %%mm2 ;\n"
  335. " pxor 8(%4), %%mm1 ;\n"
  336. " movq %%mm0, (%1) ;\n"
  337. " movq 32(%1), %%mm4 ;\n"
  338. " pxor 24(%2), %%mm3 ;\n"
  339. " pxor 16(%4), %%mm2 ;\n"
  340. " movq %%mm1, 8(%1) ;\n"
  341. " movq 40(%1), %%mm5 ;\n"
  342. " pxor 32(%2), %%mm4 ;\n"
  343. " pxor 24(%3), %%mm3 ;\n"
  344. " movq %%mm2, 16(%1) ;\n"
  345. " pxor 40(%2), %%mm5 ;\n"
  346. " pxor 32(%3), %%mm4 ;\n"
  347. " pxor 24(%4), %%mm3 ;\n"
  348. " movq %%mm3, 24(%1) ;\n"
  349. " movq 56(%1), %%mm7 ;\n"
  350. " movq 48(%1), %%mm6 ;\n"
  351. " pxor 40(%3), %%mm5 ;\n"
  352. " pxor 32(%4), %%mm4 ;\n"
  353. " pxor 48(%2), %%mm6 ;\n"
  354. " movq %%mm4, 32(%1) ;\n"
  355. " pxor 56(%2), %%mm7 ;\n"
  356. " pxor 40(%4), %%mm5 ;\n"
  357. " pxor 48(%3), %%mm6 ;\n"
  358. " pxor 56(%3), %%mm7 ;\n"
  359. " movq %%mm5, 40(%1) ;\n"
  360. " pxor 48(%4), %%mm6 ;\n"
  361. " pxor 56(%4), %%mm7 ;\n"
  362. " movq %%mm6, 48(%1) ;\n"
  363. " movq %%mm7, 56(%1) ;\n"
  364. " addl $64, %1 ;\n"
  365. " addl $64, %2 ;\n"
  366. " addl $64, %3 ;\n"
  367. " addl $64, %4 ;\n"
  368. " decl %0 ;\n"
  369. " jnz 1b ;\n"
  370. : "+r" (lines),
  371. "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
  372. :
  373. : "memory");
  374. kernel_fpu_end();
  375. }
  376. static void
  377. xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  378. unsigned long *p3, unsigned long *p4, unsigned long *p5)
  379. {
  380. unsigned long lines = bytes >> 6;
  381. kernel_fpu_begin();
  382. /* Make sure GCC forgets anything it knows about p4 or p5,
  383. such that it won't pass to the asm volatile below a
  384. register that is shared with any other variable. That's
  385. because we modify p4 and p5 there, but we can't mark them
  386. as read/write, otherwise we'd overflow the 10-asm-operands
  387. limit of GCC < 3.1. */
  388. asm("" : "+r" (p4), "+r" (p5));
  389. asm volatile(
  390. " .align 32,0x90 ;\n"
  391. " 1: ;\n"
  392. " movq (%1), %%mm0 ;\n"
  393. " movq 8(%1), %%mm1 ;\n"
  394. " pxor (%2), %%mm0 ;\n"
  395. " pxor 8(%2), %%mm1 ;\n"
  396. " movq 16(%1), %%mm2 ;\n"
  397. " pxor (%3), %%mm0 ;\n"
  398. " pxor 8(%3), %%mm1 ;\n"
  399. " pxor 16(%2), %%mm2 ;\n"
  400. " pxor (%4), %%mm0 ;\n"
  401. " pxor 8(%4), %%mm1 ;\n"
  402. " pxor 16(%3), %%mm2 ;\n"
  403. " movq 24(%1), %%mm3 ;\n"
  404. " pxor (%5), %%mm0 ;\n"
  405. " pxor 8(%5), %%mm1 ;\n"
  406. " movq %%mm0, (%1) ;\n"
  407. " pxor 16(%4), %%mm2 ;\n"
  408. " pxor 24(%2), %%mm3 ;\n"
  409. " movq %%mm1, 8(%1) ;\n"
  410. " pxor 16(%5), %%mm2 ;\n"
  411. " pxor 24(%3), %%mm3 ;\n"
  412. " movq 32(%1), %%mm4 ;\n"
  413. " movq %%mm2, 16(%1) ;\n"
  414. " pxor 24(%4), %%mm3 ;\n"
  415. " pxor 32(%2), %%mm4 ;\n"
  416. " movq 40(%1), %%mm5 ;\n"
  417. " pxor 24(%5), %%mm3 ;\n"
  418. " pxor 32(%3), %%mm4 ;\n"
  419. " pxor 40(%2), %%mm5 ;\n"
  420. " movq %%mm3, 24(%1) ;\n"
  421. " pxor 32(%4), %%mm4 ;\n"
  422. " pxor 40(%3), %%mm5 ;\n"
  423. " movq 48(%1), %%mm6 ;\n"
  424. " movq 56(%1), %%mm7 ;\n"
  425. " pxor 32(%5), %%mm4 ;\n"
  426. " pxor 40(%4), %%mm5 ;\n"
  427. " pxor 48(%2), %%mm6 ;\n"
  428. " pxor 56(%2), %%mm7 ;\n"
  429. " movq %%mm4, 32(%1) ;\n"
  430. " pxor 48(%3), %%mm6 ;\n"
  431. " pxor 56(%3), %%mm7 ;\n"
  432. " pxor 40(%5), %%mm5 ;\n"
  433. " pxor 48(%4), %%mm6 ;\n"
  434. " pxor 56(%4), %%mm7 ;\n"
  435. " movq %%mm5, 40(%1) ;\n"
  436. " pxor 48(%5), %%mm6 ;\n"
  437. " pxor 56(%5), %%mm7 ;\n"
  438. " movq %%mm6, 48(%1) ;\n"
  439. " movq %%mm7, 56(%1) ;\n"
  440. " addl $64, %1 ;\n"
  441. " addl $64, %2 ;\n"
  442. " addl $64, %3 ;\n"
  443. " addl $64, %4 ;\n"
  444. " addl $64, %5 ;\n"
  445. " decl %0 ;\n"
  446. " jnz 1b ;\n"
  447. : "+r" (lines),
  448. "+r" (p1), "+r" (p2), "+r" (p3)
  449. : "r" (p4), "r" (p5)
  450. : "memory");
  451. /* p4 and p5 were modified, and now the variables are dead.
  452. Clobber them just to be sure nobody does something stupid
  453. like assuming they have some legal value. */
  454. asm("" : "=r" (p4), "=r" (p5));
  455. kernel_fpu_end();
  456. }
  457. static struct xor_block_template xor_block_pII_mmx = {
  458. .name = "pII_mmx",
  459. .do_2 = xor_pII_mmx_2,
  460. .do_3 = xor_pII_mmx_3,
  461. .do_4 = xor_pII_mmx_4,
  462. .do_5 = xor_pII_mmx_5,
  463. };
  464. static struct xor_block_template xor_block_p5_mmx = {
  465. .name = "p5_mmx",
  466. .do_2 = xor_p5_mmx_2,
  467. .do_3 = xor_p5_mmx_3,
  468. .do_4 = xor_p5_mmx_4,
  469. .do_5 = xor_p5_mmx_5,
  470. };
  471. /*
  472. * Cache avoiding checksumming functions utilizing KNI instructions
  473. * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
  474. */
  475. #define XMMS_SAVE \
  476. do { \
  477. preempt_disable(); \
  478. cr0 = read_cr0(); \
  479. clts(); \
  480. asm volatile( \
  481. "movups %%xmm0,(%0) ;\n\t" \
  482. "movups %%xmm1,0x10(%0) ;\n\t" \
  483. "movups %%xmm2,0x20(%0) ;\n\t" \
  484. "movups %%xmm3,0x30(%0) ;\n\t" \
  485. : \
  486. : "r" (xmm_save) \
  487. : "memory"); \
  488. } while (0)
  489. #define XMMS_RESTORE \
  490. do { \
  491. asm volatile( \
  492. "sfence ;\n\t" \
  493. "movups (%0),%%xmm0 ;\n\t" \
  494. "movups 0x10(%0),%%xmm1 ;\n\t" \
  495. "movups 0x20(%0),%%xmm2 ;\n\t" \
  496. "movups 0x30(%0),%%xmm3 ;\n\t" \
  497. : \
  498. : "r" (xmm_save) \
  499. : "memory"); \
  500. write_cr0(cr0); \
  501. preempt_enable(); \
  502. } while (0)
  503. #define ALIGN16 __attribute__((aligned(16)))
  504. #define OFFS(x) "16*("#x")"
  505. #define PF_OFFS(x) "256+16*("#x")"
  506. #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
  507. #define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
  508. #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
  509. #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
  510. #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
  511. #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
  512. #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
  513. #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
  514. #define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
  515. #define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
  516. #define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
  517. #define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
  518. #define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
  519. static void
  520. xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
  521. {
  522. unsigned long lines = bytes >> 8;
  523. char xmm_save[16*4] ALIGN16;
  524. int cr0;
  525. XMMS_SAVE;
  526. asm volatile(
  527. #undef BLOCK
  528. #define BLOCK(i) \
  529. LD(i, 0) \
  530. LD(i + 1, 1) \
  531. PF1(i) \
  532. PF1(i + 2) \
  533. LD(i + 2, 2) \
  534. LD(i + 3, 3) \
  535. PF0(i + 4) \
  536. PF0(i + 6) \
  537. XO1(i, 0) \
  538. XO1(i + 1, 1) \
  539. XO1(i + 2, 2) \
  540. XO1(i + 3, 3) \
  541. ST(i, 0) \
  542. ST(i + 1, 1) \
  543. ST(i + 2, 2) \
  544. ST(i + 3, 3) \
  545. PF0(0)
  546. PF0(2)
  547. " .align 32 ;\n"
  548. " 1: ;\n"
  549. BLOCK(0)
  550. BLOCK(4)
  551. BLOCK(8)
  552. BLOCK(12)
  553. " addl $256, %1 ;\n"
  554. " addl $256, %2 ;\n"
  555. " decl %0 ;\n"
  556. " jnz 1b ;\n"
  557. : "+r" (lines),
  558. "+r" (p1), "+r" (p2)
  559. :
  560. : "memory");
  561. XMMS_RESTORE;
  562. }
  563. static void
  564. xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  565. unsigned long *p3)
  566. {
  567. unsigned long lines = bytes >> 8;
  568. char xmm_save[16*4] ALIGN16;
  569. int cr0;
  570. XMMS_SAVE;
  571. asm volatile(
  572. #undef BLOCK
  573. #define BLOCK(i) \
  574. PF1(i) \
  575. PF1(i + 2) \
  576. LD(i,0) \
  577. LD(i + 1, 1) \
  578. LD(i + 2, 2) \
  579. LD(i + 3, 3) \
  580. PF2(i) \
  581. PF2(i + 2) \
  582. PF0(i + 4) \
  583. PF0(i + 6) \
  584. XO1(i,0) \
  585. XO1(i + 1, 1) \
  586. XO1(i + 2, 2) \
  587. XO1(i + 3, 3) \
  588. XO2(i,0) \
  589. XO2(i + 1, 1) \
  590. XO2(i + 2, 2) \
  591. XO2(i + 3, 3) \
  592. ST(i,0) \
  593. ST(i + 1, 1) \
  594. ST(i + 2, 2) \
  595. ST(i + 3, 3) \
  596. PF0(0)
  597. PF0(2)
  598. " .align 32 ;\n"
  599. " 1: ;\n"
  600. BLOCK(0)
  601. BLOCK(4)
  602. BLOCK(8)
  603. BLOCK(12)
  604. " addl $256, %1 ;\n"
  605. " addl $256, %2 ;\n"
  606. " addl $256, %3 ;\n"
  607. " decl %0 ;\n"
  608. " jnz 1b ;\n"
  609. : "+r" (lines),
  610. "+r" (p1), "+r"(p2), "+r"(p3)
  611. :
  612. : "memory" );
  613. XMMS_RESTORE;
  614. }
  615. static void
  616. xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  617. unsigned long *p3, unsigned long *p4)
  618. {
  619. unsigned long lines = bytes >> 8;
  620. char xmm_save[16*4] ALIGN16;
  621. int cr0;
  622. XMMS_SAVE;
  623. asm volatile(
  624. #undef BLOCK
  625. #define BLOCK(i) \
  626. PF1(i) \
  627. PF1(i + 2) \
  628. LD(i,0) \
  629. LD(i + 1, 1) \
  630. LD(i + 2, 2) \
  631. LD(i + 3, 3) \
  632. PF2(i) \
  633. PF2(i + 2) \
  634. XO1(i,0) \
  635. XO1(i + 1, 1) \
  636. XO1(i + 2, 2) \
  637. XO1(i + 3, 3) \
  638. PF3(i) \
  639. PF3(i + 2) \
  640. PF0(i + 4) \
  641. PF0(i + 6) \
  642. XO2(i,0) \
  643. XO2(i + 1, 1) \
  644. XO2(i + 2, 2) \
  645. XO2(i + 3, 3) \
  646. XO3(i,0) \
  647. XO3(i + 1, 1) \
  648. XO3(i + 2, 2) \
  649. XO3(i + 3, 3) \
  650. ST(i,0) \
  651. ST(i + 1, 1) \
  652. ST(i + 2, 2) \
  653. ST(i + 3, 3) \
  654. PF0(0)
  655. PF0(2)
  656. " .align 32 ;\n"
  657. " 1: ;\n"
  658. BLOCK(0)
  659. BLOCK(4)
  660. BLOCK(8)
  661. BLOCK(12)
  662. " addl $256, %1 ;\n"
  663. " addl $256, %2 ;\n"
  664. " addl $256, %3 ;\n"
  665. " addl $256, %4 ;\n"
  666. " decl %0 ;\n"
  667. " jnz 1b ;\n"
  668. : "+r" (lines),
  669. "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
  670. :
  671. : "memory" );
  672. XMMS_RESTORE;
  673. }
  674. static void
  675. xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
  676. unsigned long *p3, unsigned long *p4, unsigned long *p5)
  677. {
  678. unsigned long lines = bytes >> 8;
  679. char xmm_save[16*4] ALIGN16;
  680. int cr0;
  681. XMMS_SAVE;
  682. /* Make sure GCC forgets anything it knows about p4 or p5,
  683. such that it won't pass to the asm volatile below a
  684. register that is shared with any other variable. That's
  685. because we modify p4 and p5 there, but we can't mark them
  686. as read/write, otherwise we'd overflow the 10-asm-operands
  687. limit of GCC < 3.1. */
  688. asm("" : "+r" (p4), "+r" (p5));
  689. asm volatile(
  690. #undef BLOCK
  691. #define BLOCK(i) \
  692. PF1(i) \
  693. PF1(i + 2) \
  694. LD(i,0) \
  695. LD(i + 1, 1) \
  696. LD(i + 2, 2) \
  697. LD(i + 3, 3) \
  698. PF2(i) \
  699. PF2(i + 2) \
  700. XO1(i,0) \
  701. XO1(i + 1, 1) \
  702. XO1(i + 2, 2) \
  703. XO1(i + 3, 3) \
  704. PF3(i) \
  705. PF3(i + 2) \
  706. XO2(i,0) \
  707. XO2(i + 1, 1) \
  708. XO2(i + 2, 2) \
  709. XO2(i + 3, 3) \
  710. PF4(i) \
  711. PF4(i + 2) \
  712. PF0(i + 4) \
  713. PF0(i + 6) \
  714. XO3(i,0) \
  715. XO3(i + 1, 1) \
  716. XO3(i + 2, 2) \
  717. XO3(i + 3, 3) \
  718. XO4(i,0) \
  719. XO4(i + 1, 1) \
  720. XO4(i + 2, 2) \
  721. XO4(i + 3, 3) \
  722. ST(i,0) \
  723. ST(i + 1, 1) \
  724. ST(i + 2, 2) \
  725. ST(i + 3, 3) \
  726. PF0(0)
  727. PF0(2)
  728. " .align 32 ;\n"
  729. " 1: ;\n"
  730. BLOCK(0)
  731. BLOCK(4)
  732. BLOCK(8)
  733. BLOCK(12)
  734. " addl $256, %1 ;\n"
  735. " addl $256, %2 ;\n"
  736. " addl $256, %3 ;\n"
  737. " addl $256, %4 ;\n"
  738. " addl $256, %5 ;\n"
  739. " decl %0 ;\n"
  740. " jnz 1b ;\n"
  741. : "+r" (lines),
  742. "+r" (p1), "+r" (p2), "+r" (p3)
  743. : "r" (p4), "r" (p5)
  744. : "memory");
  745. /* p4 and p5 were modified, and now the variables are dead.
  746. Clobber them just to be sure nobody does something stupid
  747. like assuming they have some legal value. */
  748. asm("" : "=r" (p4), "=r" (p5));
  749. XMMS_RESTORE;
  750. }
  751. static struct xor_block_template xor_block_pIII_sse = {
  752. .name = "pIII_sse",
  753. .do_2 = xor_sse_2,
  754. .do_3 = xor_sse_3,
  755. .do_4 = xor_sse_4,
  756. .do_5 = xor_sse_5,
  757. };
  758. /* Also try the generic routines. */
  759. #include <asm-generic/xor.h>
  760. #undef XOR_TRY_TEMPLATES
  761. #define XOR_TRY_TEMPLATES \
  762. do { \
  763. xor_speed(&xor_block_8regs); \
  764. xor_speed(&xor_block_8regs_p); \
  765. xor_speed(&xor_block_32regs); \
  766. xor_speed(&xor_block_32regs_p); \
  767. if (cpu_has_xmm) \
  768. xor_speed(&xor_block_pIII_sse); \
  769. if (cpu_has_mmx) { \
  770. xor_speed(&xor_block_pII_mmx); \
  771. xor_speed(&xor_block_p5_mmx); \
  772. } \
  773. } while (0)
  774. /* We force the use of the SSE xor block because it can write around L2.
  775. We may also be able to load into the L1 only depending on how the cpu
  776. deals with a load to a line that is being prefetched. */
  777. #define XOR_SELECT_TEMPLATE(FASTEST) \
  778. (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
  779. #endif /* _ASM_X86_XOR_32_H */