uaccess_64.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. #ifndef __X86_64_UACCESS_H
  2. #define __X86_64_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/prefetch.h>
  9. #include <asm/page.h>
  10. #define __addr_ok(addr) (!((unsigned long)(addr) & \
  11. (current_thread_info()->addr_limit.seg)))
  12. #define ARCH_HAS_SEARCH_EXTABLE
  13. /* Careful: we have to cast the result to the type of the pointer
  14. * for sign reasons */
  15. #define get_user(x, ptr) \
  16. ({ \
  17. unsigned long __val_gu; \
  18. int __ret_gu; \
  19. __chk_user_ptr(ptr); \
  20. switch (sizeof(*(ptr))) { \
  21. case 1: \
  22. __get_user_x(1, __ret_gu, __val_gu, ptr); \
  23. break; \
  24. case 2: \
  25. __get_user_x(2, __ret_gu, __val_gu, ptr); \
  26. break; \
  27. case 4: \
  28. __get_user_x(4, __ret_gu, __val_gu, ptr); \
  29. break; \
  30. case 8: \
  31. __get_user_x(8, __ret_gu, __val_gu, ptr); \
  32. break; \
  33. default: \
  34. __get_user_bad(); \
  35. break; \
  36. } \
  37. (x) = (__force typeof(*(ptr)))__val_gu; \
  38. __ret_gu; \
  39. })
  40. extern void __put_user_1(void);
  41. extern void __put_user_2(void);
  42. extern void __put_user_4(void);
  43. extern void __put_user_8(void);
  44. extern void __put_user_bad(void);
  45. #define __put_user_x(size, ret, x, ptr) \
  46. asm volatile("call __put_user_" #size \
  47. :"=a" (ret) \
  48. :"c" (ptr),"a" (x) \
  49. :"ebx")
  50. #define put_user(x, ptr) \
  51. __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  52. #define __get_user(x, ptr) \
  53. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  54. #define __put_user(x, ptr) \
  55. __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  56. #define __get_user_unaligned __get_user
  57. #define __put_user_unaligned __put_user
  58. #define __put_user_nocheck(x, ptr, size) \
  59. ({ \
  60. int __pu_err; \
  61. __put_user_size((x), (ptr), (size), __pu_err); \
  62. __pu_err; \
  63. })
  64. #define __put_user_check(x, ptr, size) \
  65. ({ \
  66. int __pu_err; \
  67. typeof(*(ptr)) __user *__pu_addr = (ptr); \
  68. switch (size) { \
  69. case 1: \
  70. __put_user_x(1, __pu_err, x, __pu_addr); \
  71. break; \
  72. case 2: \
  73. __put_user_x(2, __pu_err, x, __pu_addr); \
  74. break; \
  75. case 4: \
  76. __put_user_x(4, __pu_err, x, __pu_addr); \
  77. break; \
  78. case 8: \
  79. __put_user_x(8, __pu_err, x, __pu_addr); \
  80. break; \
  81. default: \
  82. __put_user_bad(); \
  83. } \
  84. __pu_err; \
  85. })
  86. #define __put_user_size(x, ptr, size, retval) \
  87. do { \
  88. retval = 0; \
  89. __chk_user_ptr(ptr); \
  90. switch (size) { \
  91. case 1: \
  92. __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
  93. break; \
  94. case 2: \
  95. __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
  96. break; \
  97. case 4: \
  98. __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
  99. break; \
  100. case 8: \
  101. __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
  102. break; \
  103. default: \
  104. __put_user_bad(); \
  105. } \
  106. } while (0)
  107. /* FIXME: this hack is definitely wrong -AK */
  108. struct __large_struct { unsigned long buf[100]; };
  109. #define __m(x) (*(struct __large_struct __user *)(x))
  110. /*
  111. * Tell gcc we read from memory instead of writing: this is because
  112. * we do not write to any memory gcc knows about, so there are no
  113. * aliasing issues.
  114. */
  115. #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
  116. asm volatile("1: mov"itype" %"rtype"1,%2\n" \
  117. "2:\n" \
  118. ".section .fixup, \"ax\"\n" \
  119. "3: mov %3,%0\n" \
  120. " jmp 2b\n" \
  121. ".previous\n" \
  122. _ASM_EXTABLE(1b, 3b) \
  123. : "=r"(err) \
  124. : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
  125. #define __get_user_nocheck(x, ptr, size) \
  126. ({ \
  127. int __gu_err; \
  128. unsigned long __gu_val; \
  129. __get_user_size(__gu_val, (ptr), (size), __gu_err); \
  130. (x) = (__force typeof(*(ptr)))__gu_val; \
  131. __gu_err; \
  132. })
  133. #define __get_user_size(x, ptr, size, retval) \
  134. do { \
  135. retval = 0; \
  136. __chk_user_ptr(ptr); \
  137. switch (size) { \
  138. case 1: \
  139. __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
  140. break; \
  141. case 2: \
  142. __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
  143. break; \
  144. case 4: \
  145. __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
  146. break; \
  147. case 8: \
  148. __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
  149. break; \
  150. default: \
  151. (x) = __get_user_bad(); \
  152. } \
  153. } while (0)
  154. #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
  155. asm volatile("1: mov"itype" %2,%"rtype"1\n" \
  156. "2:\n" \
  157. ".section .fixup, \"ax\"\n" \
  158. "3: mov %3,%0\n" \
  159. " xor"itype" %"rtype"1,%"rtype"1\n" \
  160. " jmp 2b\n" \
  161. ".previous\n" \
  162. _ASM_EXTABLE(1b, 3b) \
  163. : "=r" (err), ltype (x) \
  164. : "m" (__m(addr)), "i"(errno), "0"(err))
  165. /*
  166. * Copy To/From Userspace
  167. */
  168. /* Handles exceptions in both to and from, but doesn't do access_ok */
  169. __must_check unsigned long
  170. copy_user_generic(void *to, const void *from, unsigned len);
  171. __must_check unsigned long
  172. copy_to_user(void __user *to, const void *from, unsigned len);
  173. __must_check unsigned long
  174. copy_from_user(void *to, const void __user *from, unsigned len);
  175. __must_check unsigned long
  176. copy_in_user(void __user *to, const void __user *from, unsigned len);
  177. static __always_inline __must_check
  178. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  179. {
  180. int ret = 0;
  181. if (!__builtin_constant_p(size))
  182. return copy_user_generic(dst, (__force void *)src, size);
  183. switch (size) {
  184. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  185. ret, "b", "b", "=q", 1);
  186. return ret;
  187. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  188. ret, "w", "w", "=r", 2);
  189. return ret;
  190. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  191. ret, "l", "k", "=r", 4);
  192. return ret;
  193. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  194. ret, "q", "", "=r", 8);
  195. return ret;
  196. case 10:
  197. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  198. ret, "q", "", "=r", 16);
  199. if (unlikely(ret))
  200. return ret;
  201. __get_user_asm(*(u16 *)(8 + (char *)dst),
  202. (u16 __user *)(8 + (char __user *)src),
  203. ret, "w", "w", "=r", 2);
  204. return ret;
  205. case 16:
  206. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  207. ret, "q", "", "=r", 16);
  208. if (unlikely(ret))
  209. return ret;
  210. __get_user_asm(*(u64 *)(8 + (char *)dst),
  211. (u64 __user *)(8 + (char __user *)src),
  212. ret, "q", "", "=r", 8);
  213. return ret;
  214. default:
  215. return copy_user_generic(dst, (__force void *)src, size);
  216. }
  217. }
  218. static __always_inline __must_check
  219. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  220. {
  221. int ret = 0;
  222. if (!__builtin_constant_p(size))
  223. return copy_user_generic((__force void *)dst, src, size);
  224. switch (size) {
  225. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  226. ret, "b", "b", "iq", 1);
  227. return ret;
  228. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  229. ret, "w", "w", "ir", 2);
  230. return ret;
  231. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  232. ret, "l", "k", "ir", 4);
  233. return ret;
  234. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  235. ret, "q", "", "ir", 8);
  236. return ret;
  237. case 10:
  238. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  239. ret, "q", "", "ir", 10);
  240. if (unlikely(ret))
  241. return ret;
  242. asm("":::"memory");
  243. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  244. ret, "w", "w", "ir", 2);
  245. return ret;
  246. case 16:
  247. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  248. ret, "q", "", "ir", 16);
  249. if (unlikely(ret))
  250. return ret;
  251. asm("":::"memory");
  252. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  253. ret, "q", "", "ir", 8);
  254. return ret;
  255. default:
  256. return copy_user_generic((__force void *)dst, src, size);
  257. }
  258. }
  259. static __always_inline __must_check
  260. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  261. {
  262. int ret = 0;
  263. if (!__builtin_constant_p(size))
  264. return copy_user_generic((__force void *)dst,
  265. (__force void *)src, size);
  266. switch (size) {
  267. case 1: {
  268. u8 tmp;
  269. __get_user_asm(tmp, (u8 __user *)src,
  270. ret, "b", "b", "=q", 1);
  271. if (likely(!ret))
  272. __put_user_asm(tmp, (u8 __user *)dst,
  273. ret, "b", "b", "iq", 1);
  274. return ret;
  275. }
  276. case 2: {
  277. u16 tmp;
  278. __get_user_asm(tmp, (u16 __user *)src,
  279. ret, "w", "w", "=r", 2);
  280. if (likely(!ret))
  281. __put_user_asm(tmp, (u16 __user *)dst,
  282. ret, "w", "w", "ir", 2);
  283. return ret;
  284. }
  285. case 4: {
  286. u32 tmp;
  287. __get_user_asm(tmp, (u32 __user *)src,
  288. ret, "l", "k", "=r", 4);
  289. if (likely(!ret))
  290. __put_user_asm(tmp, (u32 __user *)dst,
  291. ret, "l", "k", "ir", 4);
  292. return ret;
  293. }
  294. case 8: {
  295. u64 tmp;
  296. __get_user_asm(tmp, (u64 __user *)src,
  297. ret, "q", "", "=r", 8);
  298. if (likely(!ret))
  299. __put_user_asm(tmp, (u64 __user *)dst,
  300. ret, "q", "", "ir", 8);
  301. return ret;
  302. }
  303. default:
  304. return copy_user_generic((__force void *)dst,
  305. (__force void *)src, size);
  306. }
  307. }
  308. __must_check long
  309. strncpy_from_user(char *dst, const char __user *src, long count);
  310. __must_check long
  311. __strncpy_from_user(char *dst, const char __user *src, long count);
  312. __must_check long strnlen_user(const char __user *str, long n);
  313. __must_check long __strnlen_user(const char __user *str, long n);
  314. __must_check long strlen_user(const char __user *str);
  315. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  316. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  317. __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
  318. unsigned size);
  319. static __must_check __always_inline int
  320. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  321. {
  322. return copy_user_generic((__force void *)dst, src, size);
  323. }
  324. #define ARCH_HAS_NOCACHE_UACCESS 1
  325. extern long __copy_user_nocache(void *dst, const void __user *src,
  326. unsigned size, int zerorest);
  327. static inline int __copy_from_user_nocache(void *dst, const void __user *src,
  328. unsigned size)
  329. {
  330. might_sleep();
  331. return __copy_user_nocache(dst, src, size, 1);
  332. }
  333. static inline int __copy_from_user_inatomic_nocache(void *dst,
  334. const void __user *src,
  335. unsigned size)
  336. {
  337. return __copy_user_nocache(dst, src, size, 0);
  338. }
  339. #endif /* __X86_64_UACCESS_H */