uaccess_64.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. #ifndef _ASM_X86_UACCESS_64_H
  2. #define _ASM_X86_UACCESS_64_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/lockdep.h>
  9. #include <asm/alternative.h>
  10. #include <asm/cpufeature.h>
  11. #include <asm/page.h>
  12. /*
  13. * Copy To/From Userspace
  14. */
  15. /* Handles exceptions in both to and from, but doesn't do access_ok */
  16. __must_check unsigned long
  17. copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
  18. __must_check unsigned long
  19. copy_user_generic_string(void *to, const void *from, unsigned len);
  20. __must_check unsigned long
  21. copy_user_generic_unrolled(void *to, const void *from, unsigned len);
  22. static __always_inline __must_check unsigned long
  23. copy_user_generic(void *to, const void *from, unsigned len)
  24. {
  25. unsigned ret;
  26. /*
  27. * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
  28. * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
  29. * Otherwise, use copy_user_generic_unrolled.
  30. */
  31. alternative_call_2(copy_user_generic_unrolled,
  32. copy_user_generic_string,
  33. X86_FEATURE_REP_GOOD,
  34. copy_user_enhanced_fast_string,
  35. X86_FEATURE_ERMS,
  36. ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
  37. "=d" (len)),
  38. "1" (to), "2" (from), "3" (len)
  39. : "memory", "rcx", "r8", "r9", "r10", "r11");
  40. return ret;
  41. }
  42. __must_check unsigned long
  43. _copy_to_user(void __user *to, const void *from, unsigned len);
  44. __must_check unsigned long
  45. _copy_from_user(void *to, const void __user *from, unsigned len);
  46. __must_check unsigned long
  47. copy_in_user(void __user *to, const void __user *from, unsigned len);
  48. static inline unsigned long __must_check copy_from_user(void *to,
  49. const void __user *from,
  50. unsigned long n)
  51. {
  52. int sz = __compiletime_object_size(to);
  53. might_fault();
  54. if (likely(sz == -1 || sz >= n))
  55. n = _copy_from_user(to, from, n);
  56. #ifdef CONFIG_DEBUG_VM
  57. else
  58. WARN(1, "Buffer overflow detected!\n");
  59. #endif
  60. return n;
  61. }
  62. static __always_inline __must_check
  63. int copy_to_user(void __user *dst, const void *src, unsigned size)
  64. {
  65. might_fault();
  66. return _copy_to_user(dst, src, size);
  67. }
  68. static __always_inline __must_check
  69. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  70. {
  71. int ret = 0;
  72. might_fault();
  73. if (!__builtin_constant_p(size))
  74. return copy_user_generic(dst, (__force void *)src, size);
  75. switch (size) {
  76. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  77. ret, "b", "b", "=q", 1);
  78. return ret;
  79. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  80. ret, "w", "w", "=r", 2);
  81. return ret;
  82. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  83. ret, "l", "k", "=r", 4);
  84. return ret;
  85. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  86. ret, "q", "", "=r", 8);
  87. return ret;
  88. case 10:
  89. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  90. ret, "q", "", "=r", 10);
  91. if (unlikely(ret))
  92. return ret;
  93. __get_user_asm(*(u16 *)(8 + (char *)dst),
  94. (u16 __user *)(8 + (char __user *)src),
  95. ret, "w", "w", "=r", 2);
  96. return ret;
  97. case 16:
  98. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  99. ret, "q", "", "=r", 16);
  100. if (unlikely(ret))
  101. return ret;
  102. __get_user_asm(*(u64 *)(8 + (char *)dst),
  103. (u64 __user *)(8 + (char __user *)src),
  104. ret, "q", "", "=r", 8);
  105. return ret;
  106. default:
  107. return copy_user_generic(dst, (__force void *)src, size);
  108. }
  109. }
  110. static __always_inline __must_check
  111. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  112. {
  113. int ret = 0;
  114. might_fault();
  115. if (!__builtin_constant_p(size))
  116. return copy_user_generic((__force void *)dst, src, size);
  117. switch (size) {
  118. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  119. ret, "b", "b", "iq", 1);
  120. return ret;
  121. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  122. ret, "w", "w", "ir", 2);
  123. return ret;
  124. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  125. ret, "l", "k", "ir", 4);
  126. return ret;
  127. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  128. ret, "q", "", "er", 8);
  129. return ret;
  130. case 10:
  131. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  132. ret, "q", "", "er", 10);
  133. if (unlikely(ret))
  134. return ret;
  135. asm("":::"memory");
  136. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  137. ret, "w", "w", "ir", 2);
  138. return ret;
  139. case 16:
  140. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  141. ret, "q", "", "er", 16);
  142. if (unlikely(ret))
  143. return ret;
  144. asm("":::"memory");
  145. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  146. ret, "q", "", "er", 8);
  147. return ret;
  148. default:
  149. return copy_user_generic((__force void *)dst, src, size);
  150. }
  151. }
  152. static __always_inline __must_check
  153. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  154. {
  155. int ret = 0;
  156. might_fault();
  157. if (!__builtin_constant_p(size))
  158. return copy_user_generic((__force void *)dst,
  159. (__force void *)src, size);
  160. switch (size) {
  161. case 1: {
  162. u8 tmp;
  163. __get_user_asm(tmp, (u8 __user *)src,
  164. ret, "b", "b", "=q", 1);
  165. if (likely(!ret))
  166. __put_user_asm(tmp, (u8 __user *)dst,
  167. ret, "b", "b", "iq", 1);
  168. return ret;
  169. }
  170. case 2: {
  171. u16 tmp;
  172. __get_user_asm(tmp, (u16 __user *)src,
  173. ret, "w", "w", "=r", 2);
  174. if (likely(!ret))
  175. __put_user_asm(tmp, (u16 __user *)dst,
  176. ret, "w", "w", "ir", 2);
  177. return ret;
  178. }
  179. case 4: {
  180. u32 tmp;
  181. __get_user_asm(tmp, (u32 __user *)src,
  182. ret, "l", "k", "=r", 4);
  183. if (likely(!ret))
  184. __put_user_asm(tmp, (u32 __user *)dst,
  185. ret, "l", "k", "ir", 4);
  186. return ret;
  187. }
  188. case 8: {
  189. u64 tmp;
  190. __get_user_asm(tmp, (u64 __user *)src,
  191. ret, "q", "", "=r", 8);
  192. if (likely(!ret))
  193. __put_user_asm(tmp, (u64 __user *)dst,
  194. ret, "q", "", "er", 8);
  195. return ret;
  196. }
  197. default:
  198. return copy_user_generic((__force void *)dst,
  199. (__force void *)src, size);
  200. }
  201. }
  202. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  203. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  204. static __must_check __always_inline int
  205. __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
  206. {
  207. return copy_user_generic(dst, (__force const void *)src, size);
  208. }
  209. static __must_check __always_inline int
  210. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  211. {
  212. return copy_user_generic((__force void *)dst, src, size);
  213. }
  214. extern long __copy_user_nocache(void *dst, const void __user *src,
  215. unsigned size, int zerorest);
  216. static inline int
  217. __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
  218. {
  219. might_sleep();
  220. return __copy_user_nocache(dst, src, size, 1);
  221. }
  222. static inline int
  223. __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
  224. unsigned size)
  225. {
  226. return __copy_user_nocache(dst, src, size, 0);
  227. }
  228. unsigned long
  229. copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
  230. #endif /* _ASM_X86_UACCESS_64_H */