memmove_64.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * Normally compiler builtins are used, but sometimes the compiler calls out
  3. * of line code. Based on asm-i386/string.h.
  4. *
  5. * This assembly file is re-written from memmove_64.c file.
  6. * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  7. */
  8. #define _STRING_C
  9. #include <linux/linkage.h>
  10. #include <asm/dwarf2.h>
  11. #include <asm/cpufeature.h>
  12. #undef memmove
  13. /*
  14. * Implement memmove(). This can handle overlap between src and dst.
  15. *
  16. * Input:
  17. * rdi: dest
  18. * rsi: src
  19. * rdx: count
  20. *
  21. * Output:
  22. * rax: dest
  23. */
  24. ENTRY(memmove)
  25. CFI_STARTPROC
  26. /* Handle more 32bytes in loop */
  27. mov %rdi, %rax
  28. cmp $0x20, %rdx
  29. jb 1f
  30. /* Decide forward/backward copy mode */
  31. cmp %rdi, %rsi
  32. jge .Lmemmove_begin_forward
  33. mov %rsi, %r8
  34. add %rdx, %r8
  35. cmp %rdi, %r8
  36. jg 2f
  37. .Lmemmove_begin_forward:
  38. /*
  39. * movsq instruction have many startup latency
  40. * so we handle small size by general register.
  41. */
  42. cmp $680, %rdx
  43. jb 3f
  44. /*
  45. * movsq instruction is only good for aligned case.
  46. */
  47. cmpb %dil, %sil
  48. je 4f
  49. 3:
  50. sub $0x20, %rdx
  51. /*
  52. * We gobble 32byts forward in each loop.
  53. */
  54. 5:
  55. sub $0x20, %rdx
  56. movq 0*8(%rsi), %r11
  57. movq 1*8(%rsi), %r10
  58. movq 2*8(%rsi), %r9
  59. movq 3*8(%rsi), %r8
  60. leaq 4*8(%rsi), %rsi
  61. movq %r11, 0*8(%rdi)
  62. movq %r10, 1*8(%rdi)
  63. movq %r9, 2*8(%rdi)
  64. movq %r8, 3*8(%rdi)
  65. leaq 4*8(%rdi), %rdi
  66. jae 5b
  67. addq $0x20, %rdx
  68. jmp 1f
  69. /*
  70. * Handle data forward by movsq.
  71. */
  72. .p2align 4
  73. 4:
  74. movq %rdx, %rcx
  75. movq -8(%rsi, %rdx), %r11
  76. lea -8(%rdi, %rdx), %r10
  77. shrq $3, %rcx
  78. rep movsq
  79. movq %r11, (%r10)
  80. jmp 13f
  81. .Lmemmove_end_forward:
  82. /*
  83. * Handle data backward by movsq.
  84. */
  85. .p2align 4
  86. 7:
  87. movq %rdx, %rcx
  88. movq (%rsi), %r11
  89. movq %rdi, %r10
  90. leaq -8(%rsi, %rdx), %rsi
  91. leaq -8(%rdi, %rdx), %rdi
  92. shrq $3, %rcx
  93. std
  94. rep movsq
  95. cld
  96. movq %r11, (%r10)
  97. jmp 13f
  98. /*
  99. * Start to prepare for backward copy.
  100. */
  101. .p2align 4
  102. 2:
  103. cmp $680, %rdx
  104. jb 6f
  105. cmp %dil, %sil
  106. je 7b
  107. 6:
  108. /*
  109. * Calculate copy position to tail.
  110. */
  111. addq %rdx, %rsi
  112. addq %rdx, %rdi
  113. subq $0x20, %rdx
  114. /*
  115. * We gobble 32byts backward in each loop.
  116. */
  117. 8:
  118. subq $0x20, %rdx
  119. movq -1*8(%rsi), %r11
  120. movq -2*8(%rsi), %r10
  121. movq -3*8(%rsi), %r9
  122. movq -4*8(%rsi), %r8
  123. leaq -4*8(%rsi), %rsi
  124. movq %r11, -1*8(%rdi)
  125. movq %r10, -2*8(%rdi)
  126. movq %r9, -3*8(%rdi)
  127. movq %r8, -4*8(%rdi)
  128. leaq -4*8(%rdi), %rdi
  129. jae 8b
  130. /*
  131. * Calculate copy position to head.
  132. */
  133. addq $0x20, %rdx
  134. subq %rdx, %rsi
  135. subq %rdx, %rdi
  136. 1:
  137. cmpq $16, %rdx
  138. jb 9f
  139. /*
  140. * Move data from 16 bytes to 31 bytes.
  141. */
  142. movq 0*8(%rsi), %r11
  143. movq 1*8(%rsi), %r10
  144. movq -2*8(%rsi, %rdx), %r9
  145. movq -1*8(%rsi, %rdx), %r8
  146. movq %r11, 0*8(%rdi)
  147. movq %r10, 1*8(%rdi)
  148. movq %r9, -2*8(%rdi, %rdx)
  149. movq %r8, -1*8(%rdi, %rdx)
  150. jmp 13f
  151. .p2align 4
  152. 9:
  153. cmpq $8, %rdx
  154. jb 10f
  155. /*
  156. * Move data from 8 bytes to 15 bytes.
  157. */
  158. movq 0*8(%rsi), %r11
  159. movq -1*8(%rsi, %rdx), %r10
  160. movq %r11, 0*8(%rdi)
  161. movq %r10, -1*8(%rdi, %rdx)
  162. jmp 13f
  163. 10:
  164. cmpq $4, %rdx
  165. jb 11f
  166. /*
  167. * Move data from 4 bytes to 7 bytes.
  168. */
  169. movl (%rsi), %r11d
  170. movl -4(%rsi, %rdx), %r10d
  171. movl %r11d, (%rdi)
  172. movl %r10d, -4(%rdi, %rdx)
  173. jmp 13f
  174. 11:
  175. cmp $2, %rdx
  176. jb 12f
  177. /*
  178. * Move data from 2 bytes to 3 bytes.
  179. */
  180. movw (%rsi), %r11w
  181. movw -2(%rsi, %rdx), %r10w
  182. movw %r11w, (%rdi)
  183. movw %r10w, -2(%rdi, %rdx)
  184. jmp 13f
  185. 12:
  186. cmp $1, %rdx
  187. jb 13f
  188. /*
  189. * Move data for 1 byte.
  190. */
  191. movb (%rsi), %r11b
  192. movb %r11b, (%rdi)
  193. 13:
  194. retq
  195. CFI_ENDPROC
  196. .section .altinstr_replacement,"ax"
  197. .Lmemmove_begin_forward_efs:
  198. /* Forward moving data. */
  199. movq %rdx, %rcx
  200. rep movsb
  201. retq
  202. .Lmemmove_end_forward_efs:
  203. .previous
  204. .section .altinstructions,"a"
  205. .align 8
  206. .quad .Lmemmove_begin_forward
  207. .quad .Lmemmove_begin_forward_efs
  208. .word X86_FEATURE_ERMS
  209. .byte .Lmemmove_end_forward-.Lmemmove_begin_forward
  210. .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
  211. .previous
  212. ENDPROC(memmove)