uaccess_64.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. #ifndef __X86_64_UACCESS_H
  2. #define __X86_64_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/prefetch.h>
  9. #include <asm/page.h>
  10. #define ARCH_HAS_SEARCH_EXTABLE
  11. extern void __put_user_1(void);
  12. extern void __put_user_2(void);
  13. extern void __put_user_4(void);
  14. extern void __put_user_8(void);
  15. extern void __put_user_bad(void);
  16. #define __put_user_x(size, ret, x, ptr) \
  17. asm volatile("call __put_user_" #size \
  18. :"=a" (ret) \
  19. :"c" (ptr),"a" (x) \
  20. :"ebx")
  21. #define put_user(x, ptr) \
  22. __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  23. #define __get_user(x, ptr) \
  24. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  25. #define __put_user(x, ptr) \
  26. __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  27. #define __get_user_unaligned __get_user
  28. #define __put_user_unaligned __put_user
  29. #define __put_user_check(x, ptr, size) \
  30. ({ \
  31. int __pu_err; \
  32. typeof(*(ptr)) __user *__pu_addr = (ptr); \
  33. switch (size) { \
  34. case 1: \
  35. __put_user_x(1, __pu_err, x, __pu_addr); \
  36. break; \
  37. case 2: \
  38. __put_user_x(2, __pu_err, x, __pu_addr); \
  39. break; \
  40. case 4: \
  41. __put_user_x(4, __pu_err, x, __pu_addr); \
  42. break; \
  43. case 8: \
  44. __put_user_x(8, __pu_err, x, __pu_addr); \
  45. break; \
  46. default: \
  47. __put_user_bad(); \
  48. } \
  49. __pu_err; \
  50. })
  51. #define __get_user_nocheck(x, ptr, size) \
  52. ({ \
  53. int __gu_err; \
  54. unsigned long __gu_val; \
  55. __get_user_size(__gu_val, (ptr), (size), __gu_err); \
  56. (x) = (__force typeof(*(ptr)))__gu_val; \
  57. __gu_err; \
  58. })
  59. #define __get_user_size(x, ptr, size, retval) \
  60. do { \
  61. retval = 0; \
  62. __chk_user_ptr(ptr); \
  63. switch (size) { \
  64. case 1: \
  65. __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
  66. break; \
  67. case 2: \
  68. __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
  69. break; \
  70. case 4: \
  71. __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
  72. break; \
  73. case 8: \
  74. __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
  75. break; \
  76. default: \
  77. (x) = __get_user_bad(); \
  78. } \
  79. } while (0)
  80. #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
  81. asm volatile("1: mov"itype" %2,%"rtype"1\n" \
  82. "2:\n" \
  83. ".section .fixup, \"ax\"\n" \
  84. "3: mov %3,%0\n" \
  85. " xor"itype" %"rtype"1,%"rtype"1\n" \
  86. " jmp 2b\n" \
  87. ".previous\n" \
  88. _ASM_EXTABLE(1b, 3b) \
  89. : "=r" (err), ltype (x) \
  90. : "m" (__m(addr)), "i"(errno), "0"(err))
  91. /*
  92. * Copy To/From Userspace
  93. */
  94. /* Handles exceptions in both to and from, but doesn't do access_ok */
  95. __must_check unsigned long
  96. copy_user_generic(void *to, const void *from, unsigned len);
  97. __must_check unsigned long
  98. copy_to_user(void __user *to, const void *from, unsigned len);
  99. __must_check unsigned long
  100. copy_from_user(void *to, const void __user *from, unsigned len);
  101. __must_check unsigned long
  102. copy_in_user(void __user *to, const void __user *from, unsigned len);
  103. static __always_inline __must_check
  104. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  105. {
  106. int ret = 0;
  107. if (!__builtin_constant_p(size))
  108. return copy_user_generic(dst, (__force void *)src, size);
  109. switch (size) {
  110. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  111. ret, "b", "b", "=q", 1);
  112. return ret;
  113. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  114. ret, "w", "w", "=r", 2);
  115. return ret;
  116. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  117. ret, "l", "k", "=r", 4);
  118. return ret;
  119. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  120. ret, "q", "", "=r", 8);
  121. return ret;
  122. case 10:
  123. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  124. ret, "q", "", "=r", 16);
  125. if (unlikely(ret))
  126. return ret;
  127. __get_user_asm(*(u16 *)(8 + (char *)dst),
  128. (u16 __user *)(8 + (char __user *)src),
  129. ret, "w", "w", "=r", 2);
  130. return ret;
  131. case 16:
  132. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  133. ret, "q", "", "=r", 16);
  134. if (unlikely(ret))
  135. return ret;
  136. __get_user_asm(*(u64 *)(8 + (char *)dst),
  137. (u64 __user *)(8 + (char __user *)src),
  138. ret, "q", "", "=r", 8);
  139. return ret;
  140. default:
  141. return copy_user_generic(dst, (__force void *)src, size);
  142. }
  143. }
  144. static __always_inline __must_check
  145. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  146. {
  147. int ret = 0;
  148. if (!__builtin_constant_p(size))
  149. return copy_user_generic((__force void *)dst, src, size);
  150. switch (size) {
  151. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  152. ret, "b", "b", "iq", 1);
  153. return ret;
  154. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  155. ret, "w", "w", "ir", 2);
  156. return ret;
  157. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  158. ret, "l", "k", "ir", 4);
  159. return ret;
  160. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  161. ret, "q", "", "ir", 8);
  162. return ret;
  163. case 10:
  164. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  165. ret, "q", "", "ir", 10);
  166. if (unlikely(ret))
  167. return ret;
  168. asm("":::"memory");
  169. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  170. ret, "w", "w", "ir", 2);
  171. return ret;
  172. case 16:
  173. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  174. ret, "q", "", "ir", 16);
  175. if (unlikely(ret))
  176. return ret;
  177. asm("":::"memory");
  178. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  179. ret, "q", "", "ir", 8);
  180. return ret;
  181. default:
  182. return copy_user_generic((__force void *)dst, src, size);
  183. }
  184. }
  185. static __always_inline __must_check
  186. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  187. {
  188. int ret = 0;
  189. if (!__builtin_constant_p(size))
  190. return copy_user_generic((__force void *)dst,
  191. (__force void *)src, size);
  192. switch (size) {
  193. case 1: {
  194. u8 tmp;
  195. __get_user_asm(tmp, (u8 __user *)src,
  196. ret, "b", "b", "=q", 1);
  197. if (likely(!ret))
  198. __put_user_asm(tmp, (u8 __user *)dst,
  199. ret, "b", "b", "iq", 1);
  200. return ret;
  201. }
  202. case 2: {
  203. u16 tmp;
  204. __get_user_asm(tmp, (u16 __user *)src,
  205. ret, "w", "w", "=r", 2);
  206. if (likely(!ret))
  207. __put_user_asm(tmp, (u16 __user *)dst,
  208. ret, "w", "w", "ir", 2);
  209. return ret;
  210. }
  211. case 4: {
  212. u32 tmp;
  213. __get_user_asm(tmp, (u32 __user *)src,
  214. ret, "l", "k", "=r", 4);
  215. if (likely(!ret))
  216. __put_user_asm(tmp, (u32 __user *)dst,
  217. ret, "l", "k", "ir", 4);
  218. return ret;
  219. }
  220. case 8: {
  221. u64 tmp;
  222. __get_user_asm(tmp, (u64 __user *)src,
  223. ret, "q", "", "=r", 8);
  224. if (likely(!ret))
  225. __put_user_asm(tmp, (u64 __user *)dst,
  226. ret, "q", "", "ir", 8);
  227. return ret;
  228. }
  229. default:
  230. return copy_user_generic((__force void *)dst,
  231. (__force void *)src, size);
  232. }
  233. }
  234. __must_check long
  235. strncpy_from_user(char *dst, const char __user *src, long count);
  236. __must_check long
  237. __strncpy_from_user(char *dst, const char __user *src, long count);
  238. __must_check long strnlen_user(const char __user *str, long n);
  239. __must_check long __strnlen_user(const char __user *str, long n);
  240. __must_check long strlen_user(const char __user *str);
  241. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  242. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  243. __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
  244. unsigned size);
  245. static __must_check __always_inline int
  246. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  247. {
  248. return copy_user_generic((__force void *)dst, src, size);
  249. }
  250. #define ARCH_HAS_NOCACHE_UACCESS 1
  251. extern long __copy_user_nocache(void *dst, const void __user *src,
  252. unsigned size, int zerorest);
  253. static inline int __copy_from_user_nocache(void *dst, const void __user *src,
  254. unsigned size)
  255. {
  256. might_sleep();
  257. return __copy_user_nocache(dst, src, size, 1);
  258. }
  259. static inline int __copy_from_user_inatomic_nocache(void *dst,
  260. const void __user *src,
  261. unsigned size)
  262. {
  263. return __copy_user_nocache(dst, src, size, 0);
  264. }
  265. #endif /* __X86_64_UACCESS_H */