uaccess_64.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. #ifndef _ASM_X86_UACCESS_64_H
  2. #define _ASM_X86_UACCESS_64_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/lockdep.h>
  9. #include <asm/alternative.h>
  10. #include <asm/cpufeature.h>
  11. #include <asm/page.h>
  12. /*
  13. * Copy To/From Userspace
  14. */
  15. /* Handles exceptions in both to and from, but doesn't do access_ok */
  16. __must_check unsigned long
  17. copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
  18. __must_check unsigned long
  19. copy_user_generic_string(void *to, const void *from, unsigned len);
  20. __must_check unsigned long
  21. copy_user_generic_unrolled(void *to, const void *from, unsigned len);
  22. static __always_inline __must_check unsigned long
  23. copy_user_generic(void *to, const void *from, unsigned len)
  24. {
  25. unsigned ret;
  26. /*
  27. * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
  28. * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
  29. * Otherwise, use copy_user_generic_unrolled.
  30. */
  31. alternative_call_2(copy_user_generic_unrolled,
  32. copy_user_generic_string,
  33. X86_FEATURE_REP_GOOD,
  34. copy_user_enhanced_fast_string,
  35. X86_FEATURE_ERMS,
  36. ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
  37. "=d" (len)),
  38. "1" (to), "2" (from), "3" (len)
  39. : "memory", "rcx", "r8", "r9", "r10", "r11");
  40. return ret;
  41. }
  42. __must_check unsigned long
  43. copy_in_user(void __user *to, const void __user *from, unsigned len);
  44. static __always_inline __must_check
  45. int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
  46. {
  47. int ret = 0;
  48. if (!__builtin_constant_p(size))
  49. return copy_user_generic(dst, (__force void *)src, size);
  50. switch (size) {
  51. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  52. ret, "b", "b", "=q", 1);
  53. return ret;
  54. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  55. ret, "w", "w", "=r", 2);
  56. return ret;
  57. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  58. ret, "l", "k", "=r", 4);
  59. return ret;
  60. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  61. ret, "q", "", "=r", 8);
  62. return ret;
  63. case 10:
  64. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  65. ret, "q", "", "=r", 10);
  66. if (unlikely(ret))
  67. return ret;
  68. __get_user_asm(*(u16 *)(8 + (char *)dst),
  69. (u16 __user *)(8 + (char __user *)src),
  70. ret, "w", "w", "=r", 2);
  71. return ret;
  72. case 16:
  73. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  74. ret, "q", "", "=r", 16);
  75. if (unlikely(ret))
  76. return ret;
  77. __get_user_asm(*(u64 *)(8 + (char *)dst),
  78. (u64 __user *)(8 + (char __user *)src),
  79. ret, "q", "", "=r", 8);
  80. return ret;
  81. default:
  82. return copy_user_generic(dst, (__force void *)src, size);
  83. }
  84. }
  85. static __always_inline __must_check
  86. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  87. {
  88. might_fault();
  89. return __copy_from_user_nocheck(dst, src, size);
  90. }
  91. static __always_inline __must_check
  92. int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
  93. {
  94. int ret = 0;
  95. if (!__builtin_constant_p(size))
  96. return copy_user_generic((__force void *)dst, src, size);
  97. switch (size) {
  98. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  99. ret, "b", "b", "iq", 1);
  100. return ret;
  101. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  102. ret, "w", "w", "ir", 2);
  103. return ret;
  104. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  105. ret, "l", "k", "ir", 4);
  106. return ret;
  107. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  108. ret, "q", "", "er", 8);
  109. return ret;
  110. case 10:
  111. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  112. ret, "q", "", "er", 10);
  113. if (unlikely(ret))
  114. return ret;
  115. asm("":::"memory");
  116. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  117. ret, "w", "w", "ir", 2);
  118. return ret;
  119. case 16:
  120. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  121. ret, "q", "", "er", 16);
  122. if (unlikely(ret))
  123. return ret;
  124. asm("":::"memory");
  125. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  126. ret, "q", "", "er", 8);
  127. return ret;
  128. default:
  129. return copy_user_generic((__force void *)dst, src, size);
  130. }
  131. }
  132. static __always_inline __must_check
  133. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  134. {
  135. might_fault();
  136. return __copy_to_user_nocheck(dst, src, size);
  137. }
  138. static __always_inline __must_check
  139. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  140. {
  141. int ret = 0;
  142. might_fault();
  143. if (!__builtin_constant_p(size))
  144. return copy_user_generic((__force void *)dst,
  145. (__force void *)src, size);
  146. switch (size) {
  147. case 1: {
  148. u8 tmp;
  149. __get_user_asm(tmp, (u8 __user *)src,
  150. ret, "b", "b", "=q", 1);
  151. if (likely(!ret))
  152. __put_user_asm(tmp, (u8 __user *)dst,
  153. ret, "b", "b", "iq", 1);
  154. return ret;
  155. }
  156. case 2: {
  157. u16 tmp;
  158. __get_user_asm(tmp, (u16 __user *)src,
  159. ret, "w", "w", "=r", 2);
  160. if (likely(!ret))
  161. __put_user_asm(tmp, (u16 __user *)dst,
  162. ret, "w", "w", "ir", 2);
  163. return ret;
  164. }
  165. case 4: {
  166. u32 tmp;
  167. __get_user_asm(tmp, (u32 __user *)src,
  168. ret, "l", "k", "=r", 4);
  169. if (likely(!ret))
  170. __put_user_asm(tmp, (u32 __user *)dst,
  171. ret, "l", "k", "ir", 4);
  172. return ret;
  173. }
  174. case 8: {
  175. u64 tmp;
  176. __get_user_asm(tmp, (u64 __user *)src,
  177. ret, "q", "", "=r", 8);
  178. if (likely(!ret))
  179. __put_user_asm(tmp, (u64 __user *)dst,
  180. ret, "q", "", "er", 8);
  181. return ret;
  182. }
  183. default:
  184. return copy_user_generic((__force void *)dst,
  185. (__force void *)src, size);
  186. }
  187. }
  188. static __must_check __always_inline int
  189. __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
  190. {
  191. return __copy_from_user_nocheck(dst, (__force const void *)src, size);
  192. }
  193. static __must_check __always_inline int
  194. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  195. {
  196. return __copy_to_user_nocheck((__force void *)dst, src, size);
  197. }
  198. extern long __copy_user_nocache(void *dst, const void __user *src,
  199. unsigned size, int zerorest);
  200. static inline int
  201. __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
  202. {
  203. might_fault();
  204. return __copy_user_nocache(dst, src, size, 1);
  205. }
  206. static inline int
  207. __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
  208. unsigned size)
  209. {
  210. return __copy_user_nocache(dst, src, size, 0);
  211. }
  212. unsigned long
  213. copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
  214. #endif /* _ASM_X86_UACCESS_64_H */