csum-copy_64.S 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file COPYING in the main directory of this archive
  6. * for more details. No warranty for anything given at all.
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/dwarf2.h>
  10. #include <asm/errno.h>
  11. #include <asm/asm.h>
  12. /*
  13. * Checksum copy with exception handling.
  14. * On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the
  15. * destination is zeroed.
  16. *
  17. * Input
  18. * rdi source
  19. * rsi destination
  20. * edx len (32bit)
  21. * ecx sum (32bit)
  22. * r8 src_err_ptr (int)
  23. * r9 dst_err_ptr (int)
  24. *
  25. * Output
  26. * eax 64bit sum. undefined in case of exception.
  27. *
  28. * Wrappers need to take care of valid exception sum and zeroing.
  29. * They also should align source or destination to 8 bytes.
  30. */
  31. .macro source
  32. 10:
  33. _ASM_EXTABLE(10b, .Lbad_source)
  34. .endm
  35. .macro dest
  36. 20:
  37. _ASM_EXTABLE(20b, .Lbad_dest)
  38. .endm
  39. .macro ignore L=.Lignore
  40. 30:
  41. _ASM_EXTABLE(30b, \L)
  42. .endm
  43. ENTRY(csum_partial_copy_generic)
  44. CFI_STARTPROC
  45. cmpl $3*64, %edx
  46. jle .Lignore
  47. .Lignore:
  48. subq $7*8, %rsp
  49. CFI_ADJUST_CFA_OFFSET 7*8
  50. movq %rbx, 2*8(%rsp)
  51. CFI_REL_OFFSET rbx, 2*8
  52. movq %r12, 3*8(%rsp)
  53. CFI_REL_OFFSET r12, 3*8
  54. movq %r14, 4*8(%rsp)
  55. CFI_REL_OFFSET r14, 4*8
  56. movq %r13, 5*8(%rsp)
  57. CFI_REL_OFFSET r13, 5*8
  58. movq %rbp, 6*8(%rsp)
  59. CFI_REL_OFFSET rbp, 6*8
  60. movq %r8, (%rsp)
  61. movq %r9, 1*8(%rsp)
  62. movl %ecx, %eax
  63. movl %edx, %ecx
  64. xorl %r9d, %r9d
  65. movq %rcx, %r12
  66. shrq $6, %r12
  67. jz .Lhandle_tail /* < 64 */
  68. clc
  69. /* main loop. clear in 64 byte blocks */
  70. /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
  71. /* r11: temp3, rdx: temp4, r12 loopcnt */
  72. /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
  73. .p2align 4
  74. .Lloop:
  75. source
  76. movq (%rdi), %rbx
  77. source
  78. movq 8(%rdi), %r8
  79. source
  80. movq 16(%rdi), %r11
  81. source
  82. movq 24(%rdi), %rdx
  83. source
  84. movq 32(%rdi), %r10
  85. source
  86. movq 40(%rdi), %rbp
  87. source
  88. movq 48(%rdi), %r14
  89. source
  90. movq 56(%rdi), %r13
  91. ignore 2f
  92. prefetcht0 5*64(%rdi)
  93. 2:
  94. adcq %rbx, %rax
  95. adcq %r8, %rax
  96. adcq %r11, %rax
  97. adcq %rdx, %rax
  98. adcq %r10, %rax
  99. adcq %rbp, %rax
  100. adcq %r14, %rax
  101. adcq %r13, %rax
  102. decl %r12d
  103. dest
  104. movq %rbx, (%rsi)
  105. dest
  106. movq %r8, 8(%rsi)
  107. dest
  108. movq %r11, 16(%rsi)
  109. dest
  110. movq %rdx, 24(%rsi)
  111. dest
  112. movq %r10, 32(%rsi)
  113. dest
  114. movq %rbp, 40(%rsi)
  115. dest
  116. movq %r14, 48(%rsi)
  117. dest
  118. movq %r13, 56(%rsi)
  119. 3:
  120. leaq 64(%rdi), %rdi
  121. leaq 64(%rsi), %rsi
  122. jnz .Lloop
  123. adcq %r9, %rax
  124. /* do last up to 56 bytes */
  125. .Lhandle_tail:
  126. /* ecx: count */
  127. movl %ecx, %r10d
  128. andl $63, %ecx
  129. shrl $3, %ecx
  130. jz .Lfold
  131. clc
  132. .p2align 4
  133. .Lloop_8:
  134. source
  135. movq (%rdi), %rbx
  136. adcq %rbx, %rax
  137. decl %ecx
  138. dest
  139. movq %rbx, (%rsi)
  140. leaq 8(%rsi), %rsi /* preserve carry */
  141. leaq 8(%rdi), %rdi
  142. jnz .Lloop_8
  143. adcq %r9, %rax /* add in carry */
  144. .Lfold:
  145. /* reduce checksum to 32bits */
  146. movl %eax, %ebx
  147. shrq $32, %rax
  148. addl %ebx, %eax
  149. adcl %r9d, %eax
  150. /* do last up to 6 bytes */
  151. .Lhandle_7:
  152. movl %r10d, %ecx
  153. andl $7, %ecx
  154. shrl $1, %ecx
  155. jz .Lhandle_1
  156. movl $2, %edx
  157. xorl %ebx, %ebx
  158. clc
  159. .p2align 4
  160. .Lloop_1:
  161. source
  162. movw (%rdi), %bx
  163. adcl %ebx, %eax
  164. decl %ecx
  165. dest
  166. movw %bx, (%rsi)
  167. leaq 2(%rdi), %rdi
  168. leaq 2(%rsi), %rsi
  169. jnz .Lloop_1
  170. adcl %r9d, %eax /* add in carry */
  171. /* handle last odd byte */
  172. .Lhandle_1:
  173. testl $1, %r10d
  174. jz .Lende
  175. xorl %ebx, %ebx
  176. source
  177. movb (%rdi), %bl
  178. dest
  179. movb %bl, (%rsi)
  180. addl %ebx, %eax
  181. adcl %r9d, %eax /* carry */
  182. CFI_REMEMBER_STATE
  183. .Lende:
  184. movq 2*8(%rsp), %rbx
  185. CFI_RESTORE rbx
  186. movq 3*8(%rsp), %r12
  187. CFI_RESTORE r12
  188. movq 4*8(%rsp), %r14
  189. CFI_RESTORE r14
  190. movq 5*8(%rsp), %r13
  191. CFI_RESTORE r13
  192. movq 6*8(%rsp), %rbp
  193. CFI_RESTORE rbp
  194. addq $7*8, %rsp
  195. CFI_ADJUST_CFA_OFFSET -7*8
  196. ret
  197. CFI_RESTORE_STATE
  198. /* Exception handlers. Very simple, zeroing is done in the wrappers */
  199. .Lbad_source:
  200. movq (%rsp), %rax
  201. testq %rax, %rax
  202. jz .Lende
  203. movl $-EFAULT, (%rax)
  204. jmp .Lende
  205. .Lbad_dest:
  206. movq 8(%rsp), %rax
  207. testq %rax, %rax
  208. jz .Lende
  209. movl $-EFAULT, (%rax)
  210. jmp .Lende
  211. CFI_ENDPROC
  212. ENDPROC(csum_partial_copy_generic)