xor_avx.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. #ifndef _ASM_X86_XOR_AVX_H
  2. #define _ASM_X86_XOR_AVX_H
  3. /*
  4. * Optimized RAID-5 checksumming functions for AVX
  5. *
  6. * Copyright (C) 2012 Intel Corporation
  7. * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
  8. *
  9. * Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; version 2
  14. * of the License.
  15. */
  16. #ifdef CONFIG_AS_AVX
  17. #include <linux/compiler.h>
  18. #include <asm/i387.h>
  19. #define ALIGN32 __aligned(32)
  20. #define YMM_SAVED_REGS 4
  21. #define YMMS_SAVE \
  22. do { \
  23. preempt_disable(); \
  24. cr0 = read_cr0(); \
  25. clts(); \
  26. asm volatile("vmovaps %%ymm0, %0" : "=m" (ymm_save[0]) : : "memory"); \
  27. asm volatile("vmovaps %%ymm1, %0" : "=m" (ymm_save[32]) : : "memory"); \
  28. asm volatile("vmovaps %%ymm2, %0" : "=m" (ymm_save[64]) : : "memory"); \
  29. asm volatile("vmovaps %%ymm3, %0" : "=m" (ymm_save[96]) : : "memory"); \
  30. } while (0);
  31. #define YMMS_RESTORE \
  32. do { \
  33. asm volatile("sfence" : : : "memory"); \
  34. asm volatile("vmovaps %0, %%ymm3" : : "m" (ymm_save[96])); \
  35. asm volatile("vmovaps %0, %%ymm2" : : "m" (ymm_save[64])); \
  36. asm volatile("vmovaps %0, %%ymm1" : : "m" (ymm_save[32])); \
  37. asm volatile("vmovaps %0, %%ymm0" : : "m" (ymm_save[0])); \
  38. write_cr0(cr0); \
  39. preempt_enable(); \
  40. } while (0);
  41. #define BLOCK4(i) \
  42. BLOCK(32 * i, 0) \
  43. BLOCK(32 * (i + 1), 1) \
  44. BLOCK(32 * (i + 2), 2) \
  45. BLOCK(32 * (i + 3), 3)
  46. #define BLOCK16() \
  47. BLOCK4(0) \
  48. BLOCK4(4) \
  49. BLOCK4(8) \
  50. BLOCK4(12)
  51. static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
  52. {
  53. unsigned long cr0, lines = bytes >> 9;
  54. char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
  55. YMMS_SAVE
  56. while (lines--) {
  57. #undef BLOCK
  58. #define BLOCK(i, reg) \
  59. do { \
  60. asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p1[i / sizeof(*p1)])); \
  61. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  62. "m" (p0[i / sizeof(*p0)])); \
  63. asm volatile("vmovdqa %%ymm" #reg ", %0" : \
  64. "=m" (p0[i / sizeof(*p0)])); \
  65. } while (0);
  66. BLOCK16()
  67. p0 = (unsigned long *)((uintptr_t)p0 + 512);
  68. p1 = (unsigned long *)((uintptr_t)p1 + 512);
  69. }
  70. YMMS_RESTORE
  71. }
  72. static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
  73. unsigned long *p2)
  74. {
  75. unsigned long cr0, lines = bytes >> 9;
  76. char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
  77. YMMS_SAVE
  78. while (lines--) {
  79. #undef BLOCK
  80. #define BLOCK(i, reg) \
  81. do { \
  82. asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \
  83. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  84. "m" (p1[i / sizeof(*p1)])); \
  85. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  86. "m" (p0[i / sizeof(*p0)])); \
  87. asm volatile("vmovdqa %%ymm" #reg ", %0" : \
  88. "=m" (p0[i / sizeof(*p0)])); \
  89. } while (0);
  90. BLOCK16()
  91. p0 = (unsigned long *)((uintptr_t)p0 + 512);
  92. p1 = (unsigned long *)((uintptr_t)p1 + 512);
  93. p2 = (unsigned long *)((uintptr_t)p2 + 512);
  94. }
  95. YMMS_RESTORE
  96. }
  97. static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
  98. unsigned long *p2, unsigned long *p3)
  99. {
  100. unsigned long cr0, lines = bytes >> 9;
  101. char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
  102. YMMS_SAVE
  103. while (lines--) {
  104. #undef BLOCK
  105. #define BLOCK(i, reg) \
  106. do { \
  107. asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p3[i / sizeof(*p3)])); \
  108. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  109. "m" (p2[i / sizeof(*p2)])); \
  110. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  111. "m" (p1[i / sizeof(*p1)])); \
  112. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  113. "m" (p0[i / sizeof(*p0)])); \
  114. asm volatile("vmovdqa %%ymm" #reg ", %0" : \
  115. "=m" (p0[i / sizeof(*p0)])); \
  116. } while (0);
  117. BLOCK16();
  118. p0 = (unsigned long *)((uintptr_t)p0 + 512);
  119. p1 = (unsigned long *)((uintptr_t)p1 + 512);
  120. p2 = (unsigned long *)((uintptr_t)p2 + 512);
  121. p3 = (unsigned long *)((uintptr_t)p3 + 512);
  122. }
  123. YMMS_RESTORE
  124. }
  125. static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
  126. unsigned long *p2, unsigned long *p3, unsigned long *p4)
  127. {
  128. unsigned long cr0, lines = bytes >> 9;
  129. char ymm_save[32 * YMM_SAVED_REGS] ALIGN32;
  130. YMMS_SAVE
  131. while (lines--) {
  132. #undef BLOCK
  133. #define BLOCK(i, reg) \
  134. do { \
  135. asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p4[i / sizeof(*p4)])); \
  136. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  137. "m" (p3[i / sizeof(*p3)])); \
  138. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  139. "m" (p2[i / sizeof(*p2)])); \
  140. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  141. "m" (p1[i / sizeof(*p1)])); \
  142. asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
  143. "m" (p0[i / sizeof(*p0)])); \
  144. asm volatile("vmovdqa %%ymm" #reg ", %0" : \
  145. "=m" (p0[i / sizeof(*p0)])); \
  146. } while (0);
  147. BLOCK16()
  148. p0 = (unsigned long *)((uintptr_t)p0 + 512);
  149. p1 = (unsigned long *)((uintptr_t)p1 + 512);
  150. p2 = (unsigned long *)((uintptr_t)p2 + 512);
  151. p3 = (unsigned long *)((uintptr_t)p3 + 512);
  152. p4 = (unsigned long *)((uintptr_t)p4 + 512);
  153. }
  154. YMMS_RESTORE
  155. }
  156. static struct xor_block_template xor_block_avx = {
  157. .name = "avx",
  158. .do_2 = xor_avx_2,
  159. .do_3 = xor_avx_3,
  160. .do_4 = xor_avx_4,
  161. .do_5 = xor_avx_5,
  162. };
  163. #define AVX_XOR_SPEED \
  164. do { \
  165. if (cpu_has_avx) \
  166. xor_speed(&xor_block_avx); \
  167. } while (0)
  168. #define AVX_SELECT(FASTEST) \
  169. (cpu_has_avx ? &xor_block_avx : FASTEST)
  170. #else
  171. #define AVX_XOR_SPEED {}
  172. #define AVX_SELECT(FASTEST) (FASTEST)
  173. #endif
  174. #endif