uaccess_64.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. #ifndef _ASM_X86_UACCESS_64_H
  2. #define _ASM_X86_UACCESS_64_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/prefetch.h>
  9. #include <linux/lockdep.h>
  10. #include <asm/alternative.h>
  11. #include <asm/cpufeature.h>
  12. #include <asm/page.h>
  13. /*
  14. * Copy To/From Userspace
  15. */
  16. /* Handles exceptions in both to and from, but doesn't do access_ok */
  17. __must_check unsigned long
  18. copy_user_generic_string(void *to, const void *from, unsigned len);
  19. __must_check unsigned long
  20. copy_user_generic_unrolled(void *to, const void *from, unsigned len);
  21. static __always_inline __must_check unsigned long
  22. copy_user_generic(void *to, const void *from, unsigned len)
  23. {
  24. unsigned ret;
  25. alternative_call(copy_user_generic_unrolled,
  26. copy_user_generic_string,
  27. X86_FEATURE_REP_GOOD,
  28. ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
  29. "=d" (len)),
  30. "1" (to), "2" (from), "3" (len)
  31. : "memory", "rcx", "r8", "r9", "r10", "r11");
  32. return ret;
  33. }
  34. __must_check unsigned long
  35. _copy_to_user(void __user *to, const void *from, unsigned len);
  36. __must_check unsigned long
  37. _copy_from_user(void *to, const void __user *from, unsigned len);
  38. __must_check unsigned long
  39. copy_in_user(void __user *to, const void __user *from, unsigned len);
  40. static inline unsigned long __must_check copy_from_user(void *to,
  41. const void __user *from,
  42. unsigned long n)
  43. {
  44. int sz = __compiletime_object_size(to);
  45. int ret = -EFAULT;
  46. might_fault();
  47. if (likely(sz == -1 || sz >= n))
  48. ret = _copy_from_user(to, from, n);
  49. #ifdef CONFIG_DEBUG_VM
  50. else
  51. WARN(1, "Buffer overflow detected!\n");
  52. #endif
  53. return ret;
  54. }
  55. static __always_inline __must_check
  56. int copy_to_user(void __user *dst, const void *src, unsigned size)
  57. {
  58. might_fault();
  59. return _copy_to_user(dst, src, size);
  60. }
  61. static __always_inline __must_check
  62. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  63. {
  64. int ret = 0;
  65. might_fault();
  66. if (!__builtin_constant_p(size))
  67. return copy_user_generic(dst, (__force void *)src, size);
  68. switch (size) {
  69. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  70. ret, "b", "b", "=q", 1);
  71. return ret;
  72. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  73. ret, "w", "w", "=r", 2);
  74. return ret;
  75. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  76. ret, "l", "k", "=r", 4);
  77. return ret;
  78. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  79. ret, "q", "", "=r", 8);
  80. return ret;
  81. case 10:
  82. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  83. ret, "q", "", "=r", 10);
  84. if (unlikely(ret))
  85. return ret;
  86. __get_user_asm(*(u16 *)(8 + (char *)dst),
  87. (u16 __user *)(8 + (char __user *)src),
  88. ret, "w", "w", "=r", 2);
  89. return ret;
  90. case 16:
  91. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  92. ret, "q", "", "=r", 16);
  93. if (unlikely(ret))
  94. return ret;
  95. __get_user_asm(*(u64 *)(8 + (char *)dst),
  96. (u64 __user *)(8 + (char __user *)src),
  97. ret, "q", "", "=r", 8);
  98. return ret;
  99. default:
  100. return copy_user_generic(dst, (__force void *)src, size);
  101. }
  102. }
  103. static __always_inline __must_check
  104. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  105. {
  106. int ret = 0;
  107. might_fault();
  108. if (!__builtin_constant_p(size))
  109. return copy_user_generic((__force void *)dst, src, size);
  110. switch (size) {
  111. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  112. ret, "b", "b", "iq", 1);
  113. return ret;
  114. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  115. ret, "w", "w", "ir", 2);
  116. return ret;
  117. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  118. ret, "l", "k", "ir", 4);
  119. return ret;
  120. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  121. ret, "q", "", "er", 8);
  122. return ret;
  123. case 10:
  124. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  125. ret, "q", "", "er", 10);
  126. if (unlikely(ret))
  127. return ret;
  128. asm("":::"memory");
  129. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  130. ret, "w", "w", "ir", 2);
  131. return ret;
  132. case 16:
  133. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  134. ret, "q", "", "er", 16);
  135. if (unlikely(ret))
  136. return ret;
  137. asm("":::"memory");
  138. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  139. ret, "q", "", "er", 8);
  140. return ret;
  141. default:
  142. return copy_user_generic((__force void *)dst, src, size);
  143. }
  144. }
  145. static __always_inline __must_check
  146. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  147. {
  148. int ret = 0;
  149. might_fault();
  150. if (!__builtin_constant_p(size))
  151. return copy_user_generic((__force void *)dst,
  152. (__force void *)src, size);
  153. switch (size) {
  154. case 1: {
  155. u8 tmp;
  156. __get_user_asm(tmp, (u8 __user *)src,
  157. ret, "b", "b", "=q", 1);
  158. if (likely(!ret))
  159. __put_user_asm(tmp, (u8 __user *)dst,
  160. ret, "b", "b", "iq", 1);
  161. return ret;
  162. }
  163. case 2: {
  164. u16 tmp;
  165. __get_user_asm(tmp, (u16 __user *)src,
  166. ret, "w", "w", "=r", 2);
  167. if (likely(!ret))
  168. __put_user_asm(tmp, (u16 __user *)dst,
  169. ret, "w", "w", "ir", 2);
  170. return ret;
  171. }
  172. case 4: {
  173. u32 tmp;
  174. __get_user_asm(tmp, (u32 __user *)src,
  175. ret, "l", "k", "=r", 4);
  176. if (likely(!ret))
  177. __put_user_asm(tmp, (u32 __user *)dst,
  178. ret, "l", "k", "ir", 4);
  179. return ret;
  180. }
  181. case 8: {
  182. u64 tmp;
  183. __get_user_asm(tmp, (u64 __user *)src,
  184. ret, "q", "", "=r", 8);
  185. if (likely(!ret))
  186. __put_user_asm(tmp, (u64 __user *)dst,
  187. ret, "q", "", "er", 8);
  188. return ret;
  189. }
  190. default:
  191. return copy_user_generic((__force void *)dst,
  192. (__force void *)src, size);
  193. }
  194. }
  195. __must_check long
  196. strncpy_from_user(char *dst, const char __user *src, long count);
  197. __must_check long
  198. __strncpy_from_user(char *dst, const char __user *src, long count);
  199. __must_check long strnlen_user(const char __user *str, long n);
  200. __must_check long __strnlen_user(const char __user *str, long n);
  201. __must_check long strlen_user(const char __user *str);
  202. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  203. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  204. static __must_check __always_inline int
  205. __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
  206. {
  207. return copy_user_generic(dst, (__force const void *)src, size);
  208. }
  209. static __must_check __always_inline int
  210. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  211. {
  212. return copy_user_generic((__force void *)dst, src, size);
  213. }
  214. extern long __copy_user_nocache(void *dst, const void __user *src,
  215. unsigned size, int zerorest);
  216. static inline int
  217. __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
  218. {
  219. might_sleep();
  220. return __copy_user_nocache(dst, src, size, 1);
  221. }
  222. static inline int
  223. __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
  224. unsigned size)
  225. {
  226. return __copy_user_nocache(dst, src, size, 0);
  227. }
  228. unsigned long
  229. copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
  230. #endif /* _ASM_X86_UACCESS_64_H */