copy_user_64.S 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
  3. * Copyright 2002 Andi Kleen, SuSE Labs.
  4. * Subject to the GNU Public License v2.
  5. *
  6. * Functions to copy from and to user space.
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/dwarf2.h>
  10. #define FIX_ALIGNMENT 1
  11. #include <asm/current.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/thread_info.h>
  14. #include <asm/cpufeature.h>
  15. #include <asm/alternative-asm.h>
  16. #include <asm/asm.h>
  17. #include <asm/smap.h>
  18. /*
  19. * By placing feature2 after feature1 in altinstructions section, we logically
  20. * implement:
  21. * If CPU has feature2, jmp to alt2 is used
  22. * else if CPU has feature1, jmp to alt1 is used
  23. * else jmp to orig is used.
  24. */
  25. .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
  26. 0:
  27. .byte 0xe9 /* 32bit jump */
  28. .long \orig-1f /* by default jump to orig */
  29. 1:
  30. .section .altinstr_replacement,"ax"
  31. 2: .byte 0xe9 /* near jump with 32bit immediate */
  32. .long \alt1-1b /* offset */ /* or alternatively to alt1 */
  33. 3: .byte 0xe9 /* near jump with 32bit immediate */
  34. .long \alt2-1b /* offset */ /* or alternatively to alt2 */
  35. .previous
  36. .section .altinstructions,"a"
  37. altinstruction_entry 0b,2b,\feature1,5,5
  38. altinstruction_entry 0b,3b,\feature2,5,5
  39. .previous
  40. .endm
  41. .macro ALIGN_DESTINATION
  42. #ifdef FIX_ALIGNMENT
  43. /* check for bad alignment of destination */
  44. movl %edi,%ecx
  45. andl $7,%ecx
  46. jz 102f /* already aligned */
  47. subl $8,%ecx
  48. negl %ecx
  49. subl %ecx,%edx
  50. 100: movb (%rsi),%al
  51. 101: movb %al,(%rdi)
  52. incq %rsi
  53. incq %rdi
  54. decl %ecx
  55. jnz 100b
  56. 102:
  57. .section .fixup,"ax"
  58. 103: addl %ecx,%edx /* ecx is zerorest also */
  59. jmp copy_user_handle_tail
  60. .previous
  61. _ASM_EXTABLE(100b,103b)
  62. _ASM_EXTABLE(101b,103b)
  63. #endif
  64. .endm
  65. /* Standard copy_to_user with segment limit checking */
  66. ENTRY(_copy_to_user)
  67. CFI_STARTPROC
  68. GET_THREAD_INFO(%rax)
  69. movq %rdi,%rcx
  70. addq %rdx,%rcx
  71. jc bad_to_user
  72. cmpq TI_addr_limit(%rax),%rcx
  73. ja bad_to_user
  74. ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
  75. copy_user_generic_unrolled,copy_user_generic_string, \
  76. copy_user_enhanced_fast_string
  77. CFI_ENDPROC
  78. ENDPROC(_copy_to_user)
  79. /* Standard copy_from_user with segment limit checking */
  80. ENTRY(_copy_from_user)
  81. CFI_STARTPROC
  82. GET_THREAD_INFO(%rax)
  83. movq %rsi,%rcx
  84. addq %rdx,%rcx
  85. jc bad_from_user
  86. cmpq TI_addr_limit(%rax),%rcx
  87. ja bad_from_user
  88. ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
  89. copy_user_generic_unrolled,copy_user_generic_string, \
  90. copy_user_enhanced_fast_string
  91. CFI_ENDPROC
  92. ENDPROC(_copy_from_user)
  93. .section .fixup,"ax"
  94. /* must zero dest */
  95. ENTRY(bad_from_user)
  96. bad_from_user:
  97. CFI_STARTPROC
  98. movl %edx,%ecx
  99. xorl %eax,%eax
  100. rep
  101. stosb
  102. bad_to_user:
  103. movl %edx,%eax
  104. ret
  105. CFI_ENDPROC
  106. ENDPROC(bad_from_user)
  107. .previous
  108. /*
  109. * copy_user_generic_unrolled - memory copy with exception handling.
  110. * This version is for CPUs like P4 that don't have efficient micro
  111. * code for rep movsq
  112. *
  113. * Input:
  114. * rdi destination
  115. * rsi source
  116. * rdx count
  117. *
  118. * Output:
  119. * eax uncopied bytes or 0 if successful.
  120. */
  121. ENTRY(copy_user_generic_unrolled)
  122. CFI_STARTPROC
  123. ASM_STAC
  124. cmpl $8,%edx
  125. jb 20f /* less then 8 bytes, go to byte copy loop */
  126. ALIGN_DESTINATION
  127. movl %edx,%ecx
  128. andl $63,%edx
  129. shrl $6,%ecx
  130. jz 17f
  131. 1: movq (%rsi),%r8
  132. 2: movq 1*8(%rsi),%r9
  133. 3: movq 2*8(%rsi),%r10
  134. 4: movq 3*8(%rsi),%r11
  135. 5: movq %r8,(%rdi)
  136. 6: movq %r9,1*8(%rdi)
  137. 7: movq %r10,2*8(%rdi)
  138. 8: movq %r11,3*8(%rdi)
  139. 9: movq 4*8(%rsi),%r8
  140. 10: movq 5*8(%rsi),%r9
  141. 11: movq 6*8(%rsi),%r10
  142. 12: movq 7*8(%rsi),%r11
  143. 13: movq %r8,4*8(%rdi)
  144. 14: movq %r9,5*8(%rdi)
  145. 15: movq %r10,6*8(%rdi)
  146. 16: movq %r11,7*8(%rdi)
  147. leaq 64(%rsi),%rsi
  148. leaq 64(%rdi),%rdi
  149. decl %ecx
  150. jnz 1b
  151. 17: movl %edx,%ecx
  152. andl $7,%edx
  153. shrl $3,%ecx
  154. jz 20f
  155. 18: movq (%rsi),%r8
  156. 19: movq %r8,(%rdi)
  157. leaq 8(%rsi),%rsi
  158. leaq 8(%rdi),%rdi
  159. decl %ecx
  160. jnz 18b
  161. 20: andl %edx,%edx
  162. jz 23f
  163. movl %edx,%ecx
  164. 21: movb (%rsi),%al
  165. 22: movb %al,(%rdi)
  166. incq %rsi
  167. incq %rdi
  168. decl %ecx
  169. jnz 21b
  170. 23: xor %eax,%eax
  171. ASM_CLAC
  172. ret
  173. .section .fixup,"ax"
  174. 30: shll $6,%ecx
  175. addl %ecx,%edx
  176. jmp 60f
  177. 40: lea (%rdx,%rcx,8),%rdx
  178. jmp 60f
  179. 50: movl %ecx,%edx
  180. 60: jmp copy_user_handle_tail /* ecx is zerorest also */
  181. .previous
  182. _ASM_EXTABLE(1b,30b)
  183. _ASM_EXTABLE(2b,30b)
  184. _ASM_EXTABLE(3b,30b)
  185. _ASM_EXTABLE(4b,30b)
  186. _ASM_EXTABLE(5b,30b)
  187. _ASM_EXTABLE(6b,30b)
  188. _ASM_EXTABLE(7b,30b)
  189. _ASM_EXTABLE(8b,30b)
  190. _ASM_EXTABLE(9b,30b)
  191. _ASM_EXTABLE(10b,30b)
  192. _ASM_EXTABLE(11b,30b)
  193. _ASM_EXTABLE(12b,30b)
  194. _ASM_EXTABLE(13b,30b)
  195. _ASM_EXTABLE(14b,30b)
  196. _ASM_EXTABLE(15b,30b)
  197. _ASM_EXTABLE(16b,30b)
  198. _ASM_EXTABLE(18b,40b)
  199. _ASM_EXTABLE(19b,40b)
  200. _ASM_EXTABLE(21b,50b)
  201. _ASM_EXTABLE(22b,50b)
  202. CFI_ENDPROC
  203. ENDPROC(copy_user_generic_unrolled)
  204. /* Some CPUs run faster using the string copy instructions.
  205. * This is also a lot simpler. Use them when possible.
  206. *
  207. * Only 4GB of copy is supported. This shouldn't be a problem
  208. * because the kernel normally only writes from/to page sized chunks
  209. * even if user space passed a longer buffer.
  210. * And more would be dangerous because both Intel and AMD have
  211. * errata with rep movsq > 4GB. If someone feels the need to fix
  212. * this please consider this.
  213. *
  214. * Input:
  215. * rdi destination
  216. * rsi source
  217. * rdx count
  218. *
  219. * Output:
  220. * eax uncopied bytes or 0 if successful.
  221. */
  222. ENTRY(copy_user_generic_string)
  223. CFI_STARTPROC
  224. ASM_STAC
  225. andl %edx,%edx
  226. jz 4f
  227. cmpl $8,%edx
  228. jb 2f /* less than 8 bytes, go to byte copy loop */
  229. ALIGN_DESTINATION
  230. movl %edx,%ecx
  231. shrl $3,%ecx
  232. andl $7,%edx
  233. 1: rep
  234. movsq
  235. 2: movl %edx,%ecx
  236. 3: rep
  237. movsb
  238. 4: xorl %eax,%eax
  239. ASM_CLAC
  240. ret
  241. .section .fixup,"ax"
  242. 11: lea (%rdx,%rcx,8),%rcx
  243. 12: movl %ecx,%edx /* ecx is zerorest also */
  244. jmp copy_user_handle_tail
  245. .previous
  246. _ASM_EXTABLE(1b,11b)
  247. _ASM_EXTABLE(3b,12b)
  248. CFI_ENDPROC
  249. ENDPROC(copy_user_generic_string)
  250. /*
  251. * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
  252. * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
  253. *
  254. * Input:
  255. * rdi destination
  256. * rsi source
  257. * rdx count
  258. *
  259. * Output:
  260. * eax uncopied bytes or 0 if successful.
  261. */
  262. ENTRY(copy_user_enhanced_fast_string)
  263. CFI_STARTPROC
  264. ASM_STAC
  265. andl %edx,%edx
  266. jz 2f
  267. movl %edx,%ecx
  268. 1: rep
  269. movsb
  270. 2: xorl %eax,%eax
  271. ASM_CLAC
  272. ret
  273. .section .fixup,"ax"
  274. 12: movl %ecx,%edx /* ecx is zerorest also */
  275. jmp copy_user_handle_tail
  276. .previous
  277. _ASM_EXTABLE(1b,12b)
  278. CFI_ENDPROC
  279. ENDPROC(copy_user_enhanced_fast_string)