raid6sse2.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /* -*- linux-c -*- ------------------------------------------------------- *
  2. *
  3. * Copyright 2002 H. Peter Anvin - All Rights Reserved
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
  8. * Bostom MA 02111-1307, USA; either version 2 of the License, or
  9. * (at your option) any later version; incorporated herein by reference.
  10. *
  11. * ----------------------------------------------------------------------- */
  12. /*
  13. * raid6sse2.c
  14. *
  15. * SSE-2 implementation of RAID-6 syndrome functions
  16. *
  17. */
  18. #if defined(__i386__) || defined(__x86_64__)
  19. #include "raid6.h"
  20. #include "raid6x86.h"
  21. static const struct raid6_sse_constants {
  22. u64 x1d[2];
  23. } raid6_sse_constants __attribute__((aligned(16))) = {
  24. { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
  25. };
  26. static int raid6_have_sse2(void)
  27. {
  28. #ifdef __KERNEL__
  29. /* Not really boot_cpu but "all_cpus" */
  30. return boot_cpu_has(X86_FEATURE_MMX) &&
  31. boot_cpu_has(X86_FEATURE_FXSR) &&
  32. boot_cpu_has(X86_FEATURE_XMM) &&
  33. boot_cpu_has(X86_FEATURE_XMM2);
  34. #else
  35. /* User space test code */
  36. u32 features = cpuid_features();
  37. return ( (features & (15<<23)) == (15<<23) );
  38. #endif
  39. }
  40. /*
  41. * Plain SSE2 implementation
  42. */
  43. static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
  44. {
  45. u8 **dptr = (u8 **)ptrs;
  46. u8 *p, *q;
  47. int d, z, z0;
  48. raid6_sse_save_t sa;
  49. z0 = disks - 3; /* Highest data disk */
  50. p = dptr[z0+1]; /* XOR parity */
  51. q = dptr[z0+2]; /* RS syndrome */
  52. raid6_before_sse2(&sa);
  53. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  54. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  55. for ( d = 0 ; d < bytes ; d += 16 ) {
  56. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  57. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  58. asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
  59. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  60. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
  61. for ( z = z0-2 ; z >= 0 ; z-- ) {
  62. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  63. asm volatile("pcmpgtb %xmm4,%xmm5");
  64. asm volatile("paddb %xmm4,%xmm4");
  65. asm volatile("pand %xmm0,%xmm5");
  66. asm volatile("pxor %xmm5,%xmm4");
  67. asm volatile("pxor %xmm5,%xmm5");
  68. asm volatile("pxor %xmm6,%xmm2");
  69. asm volatile("pxor %xmm6,%xmm4");
  70. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
  71. }
  72. asm volatile("pcmpgtb %xmm4,%xmm5");
  73. asm volatile("paddb %xmm4,%xmm4");
  74. asm volatile("pand %xmm0,%xmm5");
  75. asm volatile("pxor %xmm5,%xmm4");
  76. asm volatile("pxor %xmm5,%xmm5");
  77. asm volatile("pxor %xmm6,%xmm2");
  78. asm volatile("pxor %xmm6,%xmm4");
  79. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  80. asm volatile("pxor %xmm2,%xmm2");
  81. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  82. asm volatile("pxor %xmm4,%xmm4");
  83. }
  84. raid6_after_sse2(&sa);
  85. asm volatile("sfence" : : : "memory");
  86. }
  87. const struct raid6_calls raid6_sse2x1 = {
  88. raid6_sse21_gen_syndrome,
  89. raid6_have_sse2,
  90. "sse2x1",
  91. 1 /* Has cache hints */
  92. };
  93. /*
  94. * Unrolled-by-2 SSE2 implementation
  95. */
  96. static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
  97. {
  98. u8 **dptr = (u8 **)ptrs;
  99. u8 *p, *q;
  100. int d, z, z0;
  101. raid6_sse_save_t sa;
  102. z0 = disks - 3; /* Highest data disk */
  103. p = dptr[z0+1]; /* XOR parity */
  104. q = dptr[z0+2]; /* RS syndrome */
  105. raid6_before_sse2(&sa);
  106. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  107. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  108. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  109. /* We uniformly assume a single prefetch covers at least 32 bytes */
  110. for ( d = 0 ; d < bytes ; d += 32 ) {
  111. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  112. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  113. asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
  114. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  115. asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
  116. for ( z = z0-1 ; z >= 0 ; z-- ) {
  117. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  118. asm volatile("pcmpgtb %xmm4,%xmm5");
  119. asm volatile("pcmpgtb %xmm6,%xmm7");
  120. asm volatile("paddb %xmm4,%xmm4");
  121. asm volatile("paddb %xmm6,%xmm6");
  122. asm volatile("pand %xmm0,%xmm5");
  123. asm volatile("pand %xmm0,%xmm7");
  124. asm volatile("pxor %xmm5,%xmm4");
  125. asm volatile("pxor %xmm7,%xmm6");
  126. asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
  127. asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
  128. asm volatile("pxor %xmm5,%xmm2");
  129. asm volatile("pxor %xmm7,%xmm3");
  130. asm volatile("pxor %xmm5,%xmm4");
  131. asm volatile("pxor %xmm7,%xmm6");
  132. asm volatile("pxor %xmm5,%xmm5");
  133. asm volatile("pxor %xmm7,%xmm7");
  134. }
  135. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  136. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  137. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  138. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  139. }
  140. raid6_after_sse2(&sa);
  141. asm volatile("sfence" : : : "memory");
  142. }
  143. const struct raid6_calls raid6_sse2x2 = {
  144. raid6_sse22_gen_syndrome,
  145. raid6_have_sse2,
  146. "sse2x2",
  147. 1 /* Has cache hints */
  148. };
  149. #endif
  150. #ifdef __x86_64__
  151. /*
  152. * Unrolled-by-4 SSE2 implementation
  153. */
  154. static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
  155. {
  156. u8 **dptr = (u8 **)ptrs;
  157. u8 *p, *q;
  158. int d, z, z0;
  159. raid6_sse16_save_t sa;
  160. z0 = disks - 3; /* Highest data disk */
  161. p = dptr[z0+1]; /* XOR parity */
  162. q = dptr[z0+2]; /* RS syndrome */
  163. raid6_before_sse16(&sa);
  164. asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
  165. asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
  166. asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
  167. asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
  168. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  169. asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
  170. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  171. asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
  172. asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
  173. asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
  174. asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
  175. asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
  176. asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
  177. for ( d = 0 ; d < bytes ; d += 64 ) {
  178. for ( z = z0 ; z >= 0 ; z-- ) {
  179. /* The second prefetch seems to improve performance... */
  180. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  181. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
  182. asm volatile("pcmpgtb %xmm4,%xmm5");
  183. asm volatile("pcmpgtb %xmm6,%xmm7");
  184. asm volatile("pcmpgtb %xmm12,%xmm13");
  185. asm volatile("pcmpgtb %xmm14,%xmm15");
  186. asm volatile("paddb %xmm4,%xmm4");
  187. asm volatile("paddb %xmm6,%xmm6");
  188. asm volatile("paddb %xmm12,%xmm12");
  189. asm volatile("paddb %xmm14,%xmm14");
  190. asm volatile("pand %xmm0,%xmm5");
  191. asm volatile("pand %xmm0,%xmm7");
  192. asm volatile("pand %xmm0,%xmm13");
  193. asm volatile("pand %xmm0,%xmm15");
  194. asm volatile("pxor %xmm5,%xmm4");
  195. asm volatile("pxor %xmm7,%xmm6");
  196. asm volatile("pxor %xmm13,%xmm12");
  197. asm volatile("pxor %xmm15,%xmm14");
  198. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  199. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  200. asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
  201. asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
  202. asm volatile("pxor %xmm5,%xmm2");
  203. asm volatile("pxor %xmm7,%xmm3");
  204. asm volatile("pxor %xmm13,%xmm10");
  205. asm volatile("pxor %xmm15,%xmm11");
  206. asm volatile("pxor %xmm5,%xmm4");
  207. asm volatile("pxor %xmm7,%xmm6");
  208. asm volatile("pxor %xmm13,%xmm12");
  209. asm volatile("pxor %xmm15,%xmm14");
  210. asm volatile("pxor %xmm5,%xmm5");
  211. asm volatile("pxor %xmm7,%xmm7");
  212. asm volatile("pxor %xmm13,%xmm13");
  213. asm volatile("pxor %xmm15,%xmm15");
  214. }
  215. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  216. asm volatile("pxor %xmm2,%xmm2");
  217. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  218. asm volatile("pxor %xmm3,%xmm3");
  219. asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
  220. asm volatile("pxor %xmm10,%xmm10");
  221. asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
  222. asm volatile("pxor %xmm11,%xmm11");
  223. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  224. asm volatile("pxor %xmm4,%xmm4");
  225. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  226. asm volatile("pxor %xmm6,%xmm6");
  227. asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
  228. asm volatile("pxor %xmm12,%xmm12");
  229. asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
  230. asm volatile("pxor %xmm14,%xmm14");
  231. }
  232. asm volatile("sfence" : : : "memory");
  233. raid6_after_sse16(&sa);
  234. }
  235. const struct raid6_calls raid6_sse2x4 = {
  236. raid6_sse24_gen_syndrome,
  237. raid6_have_sse2,
  238. "sse2x4",
  239. 1 /* Has cache hints */
  240. };
  241. #endif