sse2.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /* -*- linux-c -*- ------------------------------------------------------- *
  2. *
  3. * Copyright 2002 H. Peter Anvin - All Rights Reserved
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
  8. * Boston MA 02111-1307, USA; either version 2 of the License, or
  9. * (at your option) any later version; incorporated herein by reference.
  10. *
  11. * ----------------------------------------------------------------------- */
  12. /*
  13. * raid6/sse2.c
  14. *
  15. * SSE-2 implementation of RAID-6 syndrome functions
  16. *
  17. */
  18. #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
  19. #include <linux/raid/pq.h>
  20. #include "x86.h"
  21. static const struct raid6_sse_constants {
  22. u64 x1d[2];
  23. } raid6_sse_constants __attribute__((aligned(16))) = {
  24. { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
  25. };
  26. static int raid6_have_sse2(void)
  27. {
  28. /* Not really boot_cpu but "all_cpus" */
  29. return boot_cpu_has(X86_FEATURE_MMX) &&
  30. boot_cpu_has(X86_FEATURE_FXSR) &&
  31. boot_cpu_has(X86_FEATURE_XMM) &&
  32. boot_cpu_has(X86_FEATURE_XMM2);
  33. }
  34. /*
  35. * Plain SSE2 implementation
  36. */
  37. static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
  38. {
  39. u8 **dptr = (u8 **)ptrs;
  40. u8 *p, *q;
  41. int d, z, z0;
  42. z0 = disks - 3; /* Highest data disk */
  43. p = dptr[z0+1]; /* XOR parity */
  44. q = dptr[z0+2]; /* RS syndrome */
  45. kernel_fpu_begin();
  46. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  47. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  48. for ( d = 0 ; d < bytes ; d += 16 ) {
  49. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  50. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  51. asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
  52. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  53. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
  54. for ( z = z0-2 ; z >= 0 ; z-- ) {
  55. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  56. asm volatile("pcmpgtb %xmm4,%xmm5");
  57. asm volatile("paddb %xmm4,%xmm4");
  58. asm volatile("pand %xmm0,%xmm5");
  59. asm volatile("pxor %xmm5,%xmm4");
  60. asm volatile("pxor %xmm5,%xmm5");
  61. asm volatile("pxor %xmm6,%xmm2");
  62. asm volatile("pxor %xmm6,%xmm4");
  63. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
  64. }
  65. asm volatile("pcmpgtb %xmm4,%xmm5");
  66. asm volatile("paddb %xmm4,%xmm4");
  67. asm volatile("pand %xmm0,%xmm5");
  68. asm volatile("pxor %xmm5,%xmm4");
  69. asm volatile("pxor %xmm5,%xmm5");
  70. asm volatile("pxor %xmm6,%xmm2");
  71. asm volatile("pxor %xmm6,%xmm4");
  72. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  73. asm volatile("pxor %xmm2,%xmm2");
  74. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  75. asm volatile("pxor %xmm4,%xmm4");
  76. }
  77. asm volatile("sfence" : : : "memory");
  78. kernel_fpu_end();
  79. }
  80. const struct raid6_calls raid6_sse2x1 = {
  81. raid6_sse21_gen_syndrome,
  82. raid6_have_sse2,
  83. "sse2x1",
  84. 1 /* Has cache hints */
  85. };
  86. /*
  87. * Unrolled-by-2 SSE2 implementation
  88. */
  89. static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
  90. {
  91. u8 **dptr = (u8 **)ptrs;
  92. u8 *p, *q;
  93. int d, z, z0;
  94. z0 = disks - 3; /* Highest data disk */
  95. p = dptr[z0+1]; /* XOR parity */
  96. q = dptr[z0+2]; /* RS syndrome */
  97. kernel_fpu_begin();
  98. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  99. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  100. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  101. /* We uniformly assume a single prefetch covers at least 32 bytes */
  102. for ( d = 0 ; d < bytes ; d += 32 ) {
  103. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  104. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  105. asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
  106. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  107. asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
  108. for ( z = z0-1 ; z >= 0 ; z-- ) {
  109. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  110. asm volatile("pcmpgtb %xmm4,%xmm5");
  111. asm volatile("pcmpgtb %xmm6,%xmm7");
  112. asm volatile("paddb %xmm4,%xmm4");
  113. asm volatile("paddb %xmm6,%xmm6");
  114. asm volatile("pand %xmm0,%xmm5");
  115. asm volatile("pand %xmm0,%xmm7");
  116. asm volatile("pxor %xmm5,%xmm4");
  117. asm volatile("pxor %xmm7,%xmm6");
  118. asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
  119. asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
  120. asm volatile("pxor %xmm5,%xmm2");
  121. asm volatile("pxor %xmm7,%xmm3");
  122. asm volatile("pxor %xmm5,%xmm4");
  123. asm volatile("pxor %xmm7,%xmm6");
  124. asm volatile("pxor %xmm5,%xmm5");
  125. asm volatile("pxor %xmm7,%xmm7");
  126. }
  127. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  128. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  129. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  130. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  131. }
  132. asm volatile("sfence" : : : "memory");
  133. kernel_fpu_end();
  134. }
  135. const struct raid6_calls raid6_sse2x2 = {
  136. raid6_sse22_gen_syndrome,
  137. raid6_have_sse2,
  138. "sse2x2",
  139. 1 /* Has cache hints */
  140. };
  141. #endif
  142. #if defined(__x86_64__) && !defined(__arch_um__)
  143. /*
  144. * Unrolled-by-4 SSE2 implementation
  145. */
  146. static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
  147. {
  148. u8 **dptr = (u8 **)ptrs;
  149. u8 *p, *q;
  150. int d, z, z0;
  151. z0 = disks - 3; /* Highest data disk */
  152. p = dptr[z0+1]; /* XOR parity */
  153. q = dptr[z0+2]; /* RS syndrome */
  154. kernel_fpu_begin();
  155. asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
  156. asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
  157. asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
  158. asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
  159. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  160. asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
  161. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  162. asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
  163. asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
  164. asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
  165. asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
  166. asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
  167. asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
  168. for ( d = 0 ; d < bytes ; d += 64 ) {
  169. for ( z = z0 ; z >= 0 ; z-- ) {
  170. /* The second prefetch seems to improve performance... */
  171. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  172. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
  173. asm volatile("pcmpgtb %xmm4,%xmm5");
  174. asm volatile("pcmpgtb %xmm6,%xmm7");
  175. asm volatile("pcmpgtb %xmm12,%xmm13");
  176. asm volatile("pcmpgtb %xmm14,%xmm15");
  177. asm volatile("paddb %xmm4,%xmm4");
  178. asm volatile("paddb %xmm6,%xmm6");
  179. asm volatile("paddb %xmm12,%xmm12");
  180. asm volatile("paddb %xmm14,%xmm14");
  181. asm volatile("pand %xmm0,%xmm5");
  182. asm volatile("pand %xmm0,%xmm7");
  183. asm volatile("pand %xmm0,%xmm13");
  184. asm volatile("pand %xmm0,%xmm15");
  185. asm volatile("pxor %xmm5,%xmm4");
  186. asm volatile("pxor %xmm7,%xmm6");
  187. asm volatile("pxor %xmm13,%xmm12");
  188. asm volatile("pxor %xmm15,%xmm14");
  189. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  190. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  191. asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
  192. asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
  193. asm volatile("pxor %xmm5,%xmm2");
  194. asm volatile("pxor %xmm7,%xmm3");
  195. asm volatile("pxor %xmm13,%xmm10");
  196. asm volatile("pxor %xmm15,%xmm11");
  197. asm volatile("pxor %xmm5,%xmm4");
  198. asm volatile("pxor %xmm7,%xmm6");
  199. asm volatile("pxor %xmm13,%xmm12");
  200. asm volatile("pxor %xmm15,%xmm14");
  201. asm volatile("pxor %xmm5,%xmm5");
  202. asm volatile("pxor %xmm7,%xmm7");
  203. asm volatile("pxor %xmm13,%xmm13");
  204. asm volatile("pxor %xmm15,%xmm15");
  205. }
  206. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  207. asm volatile("pxor %xmm2,%xmm2");
  208. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  209. asm volatile("pxor %xmm3,%xmm3");
  210. asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
  211. asm volatile("pxor %xmm10,%xmm10");
  212. asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
  213. asm volatile("pxor %xmm11,%xmm11");
  214. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  215. asm volatile("pxor %xmm4,%xmm4");
  216. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  217. asm volatile("pxor %xmm6,%xmm6");
  218. asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
  219. asm volatile("pxor %xmm12,%xmm12");
  220. asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
  221. asm volatile("pxor %xmm14,%xmm14");
  222. }
  223. asm volatile("sfence" : : : "memory");
  224. kernel_fpu_end();
  225. }
  226. const struct raid6_calls raid6_sse2x4 = {
  227. raid6_sse24_gen_syndrome,
  228. raid6_have_sse2,
  229. "sse2x4",
  230. 1 /* Has cache hints */
  231. };
  232. #endif