uaccess_64.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. #ifndef __X86_64_UACCESS_H
  2. #define __X86_64_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/prefetch.h>
  9. #include <asm/page.h>
  10. #define VERIFY_READ 0
  11. #define VERIFY_WRITE 1
  12. /*
  13. * The fs value determines whether argument validity checking should be
  14. * performed or not. If get_fs() == USER_DS, checking is performed, with
  15. * get_fs() == KERNEL_DS, checking is bypassed.
  16. *
  17. * For historical reasons, these macros are grossly misnamed.
  18. */
  19. #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  20. #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
  21. #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
  22. #define get_ds() (KERNEL_DS)
  23. #define get_fs() (current_thread_info()->addr_limit)
  24. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  25. #define segment_eq(a, b) ((a).seg == (b).seg)
  26. #define __addr_ok(addr) (!((unsigned long)(addr) & \
  27. (current_thread_info()->addr_limit.seg)))
  28. /*
  29. * Uhhuh, this needs 65-bit arithmetic. We have a carry..
  30. */
  31. #define __range_not_ok(addr, size) \
  32. ({ \
  33. unsigned long flag, roksum; \
  34. __chk_user_ptr(addr); \
  35. asm("# range_ok\n\r" \
  36. "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
  37. : "=&r" (flag), "=r" (roksum) \
  38. : "1" (addr), "g" ((long)(size)), \
  39. "g" (current_thread_info()->addr_limit.seg)); \
  40. flag; \
  41. })
  42. #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
  43. /*
  44. * The exception table consists of pairs of addresses: the first is the
  45. * address of an instruction that is allowed to fault, and the second is
  46. * the address at which the program should continue. No registers are
  47. * modified, so it is entirely up to the continuation code to figure out
  48. * what to do.
  49. *
  50. * All the routines below use bits of fixup code that are out of line
  51. * with the main instruction path. This means when everything is well,
  52. * we don't even have to jump over them. Further, they do not intrude
  53. * on our cache or tlb entries.
  54. */
  55. struct exception_table_entry {
  56. unsigned long insn, fixup;
  57. };
  58. extern int fixup_exception(struct pt_regs *regs);
  59. #define ARCH_HAS_SEARCH_EXTABLE
  60. /*
  61. * These are the main single-value transfer routines. They automatically
  62. * use the right size if we just have the right pointer type.
  63. *
  64. * This gets kind of ugly. We want to return _two_ values in "get_user()"
  65. * and yet we don't want to do any pointers, because that is too much
  66. * of a performance impact. Thus we have a few rather ugly macros here,
  67. * and hide all the ugliness from the user.
  68. *
  69. * The "__xxx" versions of the user access functions are versions that
  70. * do not verify the address space, that must have been done previously
  71. * with a separate "access_ok()" call (this is used when we do multiple
  72. * accesses to the same area of user memory).
  73. */
  74. #define __get_user_x(size, ret, x, ptr) \
  75. asm volatile("call __get_user_" #size \
  76. : "=a" (ret),"=d" (x) \
  77. : "0" (ptr)) \
  78. /* Careful: we have to cast the result to the type of the pointer
  79. * for sign reasons */
  80. #define get_user(x, ptr) \
  81. ({ \
  82. unsigned long __val_gu; \
  83. int __ret_gu; \
  84. __chk_user_ptr(ptr); \
  85. switch (sizeof(*(ptr))) { \
  86. case 1: \
  87. __get_user_x(1, __ret_gu, __val_gu, ptr); \
  88. break; \
  89. case 2: \
  90. __get_user_x(2, __ret_gu, __val_gu, ptr); \
  91. break; \
  92. case 4: \
  93. __get_user_x(4, __ret_gu, __val_gu, ptr); \
  94. break; \
  95. case 8: \
  96. __get_user_x(8, __ret_gu, __val_gu, ptr); \
  97. break; \
  98. default: \
  99. __get_user_bad(); \
  100. break; \
  101. } \
  102. (x) = (__force typeof(*(ptr)))__val_gu; \
  103. __ret_gu; \
  104. })
  105. extern void __put_user_1(void);
  106. extern void __put_user_2(void);
  107. extern void __put_user_4(void);
  108. extern void __put_user_8(void);
  109. extern void __put_user_bad(void);
  110. #define __put_user_x(size, ret, x, ptr) \
  111. asm volatile("call __put_user_" #size \
  112. :"=a" (ret) \
  113. :"c" (ptr),"d" (x) \
  114. :"r8")
  115. #define put_user(x, ptr) \
  116. __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  117. #define __get_user(x, ptr) \
  118. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  119. #define __put_user(x, ptr) \
  120. __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  121. #define __get_user_unaligned __get_user
  122. #define __put_user_unaligned __put_user
  123. #define __put_user_nocheck(x, ptr, size) \
  124. ({ \
  125. int __pu_err; \
  126. __put_user_size((x), (ptr), (size), __pu_err); \
  127. __pu_err; \
  128. })
  129. #define __put_user_check(x, ptr, size) \
  130. ({ \
  131. int __pu_err; \
  132. typeof(*(ptr)) __user *__pu_addr = (ptr); \
  133. switch (size) { \
  134. case 1: \
  135. __put_user_x(1, __pu_err, x, __pu_addr); \
  136. break; \
  137. case 2: \
  138. __put_user_x(2, __pu_err, x, __pu_addr); \
  139. break; \
  140. case 4: \
  141. __put_user_x(4, __pu_err, x, __pu_addr); \
  142. break; \
  143. case 8: \
  144. __put_user_x(8, __pu_err, x, __pu_addr); \
  145. break; \
  146. default: \
  147. __put_user_bad(); \
  148. } \
  149. __pu_err; \
  150. })
  151. #define __put_user_size(x, ptr, size, retval) \
  152. do { \
  153. retval = 0; \
  154. __chk_user_ptr(ptr); \
  155. switch (size) { \
  156. case 1: \
  157. __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
  158. break; \
  159. case 2: \
  160. __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
  161. break; \
  162. case 4: \
  163. __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
  164. break; \
  165. case 8: \
  166. __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
  167. break; \
  168. default: \
  169. __put_user_bad(); \
  170. } \
  171. } while (0)
  172. /* FIXME: this hack is definitely wrong -AK */
  173. struct __large_struct { unsigned long buf[100]; };
  174. #define __m(x) (*(struct __large_struct __user *)(x))
  175. /*
  176. * Tell gcc we read from memory instead of writing: this is because
  177. * we do not write to any memory gcc knows about, so there are no
  178. * aliasing issues.
  179. */
  180. #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
  181. asm volatile("1: mov"itype" %"rtype"1,%2\n" \
  182. "2:\n" \
  183. ".section .fixup, \"ax\"\n" \
  184. "3: mov %3,%0\n" \
  185. " jmp 2b\n" \
  186. ".previous\n" \
  187. _ASM_EXTABLE(1b, 3b) \
  188. : "=r"(err) \
  189. : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
  190. #define __get_user_nocheck(x, ptr, size) \
  191. ({ \
  192. int __gu_err; \
  193. unsigned long __gu_val; \
  194. __get_user_size(__gu_val, (ptr), (size), __gu_err); \
  195. (x) = (__force typeof(*(ptr)))__gu_val; \
  196. __gu_err; \
  197. })
  198. extern int __get_user_1(void);
  199. extern int __get_user_2(void);
  200. extern int __get_user_4(void);
  201. extern int __get_user_8(void);
  202. extern int __get_user_bad(void);
  203. #define __get_user_size(x, ptr, size, retval) \
  204. do { \
  205. retval = 0; \
  206. __chk_user_ptr(ptr); \
  207. switch (size) { \
  208. case 1: \
  209. __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
  210. break; \
  211. case 2: \
  212. __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
  213. break; \
  214. case 4: \
  215. __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
  216. break; \
  217. case 8: \
  218. __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
  219. break; \
  220. default: \
  221. (x) = __get_user_bad(); \
  222. } \
  223. } while (0)
  224. #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
  225. asm volatile("1: mov"itype" %2,%"rtype"1\n" \
  226. "2:\n" \
  227. ".section .fixup, \"ax\"\n" \
  228. "3: mov %3,%0\n" \
  229. " xor"itype" %"rtype"1,%"rtype"1\n" \
  230. " jmp 2b\n" \
  231. ".previous\n" \
  232. _ASM_EXTABLE(1b, 3b) \
  233. : "=r" (err), ltype (x) \
  234. : "m" (__m(addr)), "i"(errno), "0"(err))
  235. /*
  236. * Copy To/From Userspace
  237. */
  238. /* Handles exceptions in both to and from, but doesn't do access_ok */
  239. __must_check unsigned long
  240. copy_user_generic(void *to, const void *from, unsigned len);
  241. __must_check unsigned long
  242. copy_to_user(void __user *to, const void *from, unsigned len);
  243. __must_check unsigned long
  244. copy_from_user(void *to, const void __user *from, unsigned len);
  245. __must_check unsigned long
  246. copy_in_user(void __user *to, const void __user *from, unsigned len);
  247. static __always_inline __must_check
  248. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  249. {
  250. int ret = 0;
  251. if (!__builtin_constant_p(size))
  252. return copy_user_generic(dst, (__force void *)src, size);
  253. switch (size) {
  254. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  255. ret, "b", "b", "=q", 1);
  256. return ret;
  257. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  258. ret, "w", "w", "=r", 2);
  259. return ret;
  260. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  261. ret, "l", "k", "=r", 4);
  262. return ret;
  263. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  264. ret, "q", "", "=r", 8);
  265. return ret;
  266. case 10:
  267. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  268. ret, "q", "", "=r", 16);
  269. if (unlikely(ret))
  270. return ret;
  271. __get_user_asm(*(u16 *)(8 + (char *)dst),
  272. (u16 __user *)(8 + (char __user *)src),
  273. ret, "w", "w", "=r", 2);
  274. return ret;
  275. case 16:
  276. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  277. ret, "q", "", "=r", 16);
  278. if (unlikely(ret))
  279. return ret;
  280. __get_user_asm(*(u64 *)(8 + (char *)dst),
  281. (u64 __user *)(8 + (char __user *)src),
  282. ret, "q", "", "=r", 8);
  283. return ret;
  284. default:
  285. return copy_user_generic(dst, (__force void *)src, size);
  286. }
  287. }
  288. static __always_inline __must_check
  289. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  290. {
  291. int ret = 0;
  292. if (!__builtin_constant_p(size))
  293. return copy_user_generic((__force void *)dst, src, size);
  294. switch (size) {
  295. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  296. ret, "b", "b", "iq", 1);
  297. return ret;
  298. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  299. ret, "w", "w", "ir", 2);
  300. return ret;
  301. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  302. ret, "l", "k", "ir", 4);
  303. return ret;
  304. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  305. ret, "q", "", "ir", 8);
  306. return ret;
  307. case 10:
  308. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  309. ret, "q", "", "ir", 10);
  310. if (unlikely(ret))
  311. return ret;
  312. asm("":::"memory");
  313. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  314. ret, "w", "w", "ir", 2);
  315. return ret;
  316. case 16:
  317. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  318. ret, "q", "", "ir", 16);
  319. if (unlikely(ret))
  320. return ret;
  321. asm("":::"memory");
  322. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  323. ret, "q", "", "ir", 8);
  324. return ret;
  325. default:
  326. return copy_user_generic((__force void *)dst, src, size);
  327. }
  328. }
  329. static __always_inline __must_check
  330. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  331. {
  332. int ret = 0;
  333. if (!__builtin_constant_p(size))
  334. return copy_user_generic((__force void *)dst,
  335. (__force void *)src, size);
  336. switch (size) {
  337. case 1: {
  338. u8 tmp;
  339. __get_user_asm(tmp, (u8 __user *)src,
  340. ret, "b", "b", "=q", 1);
  341. if (likely(!ret))
  342. __put_user_asm(tmp, (u8 __user *)dst,
  343. ret, "b", "b", "iq", 1);
  344. return ret;
  345. }
  346. case 2: {
  347. u16 tmp;
  348. __get_user_asm(tmp, (u16 __user *)src,
  349. ret, "w", "w", "=r", 2);
  350. if (likely(!ret))
  351. __put_user_asm(tmp, (u16 __user *)dst,
  352. ret, "w", "w", "ir", 2);
  353. return ret;
  354. }
  355. case 4: {
  356. u32 tmp;
  357. __get_user_asm(tmp, (u32 __user *)src,
  358. ret, "l", "k", "=r", 4);
  359. if (likely(!ret))
  360. __put_user_asm(tmp, (u32 __user *)dst,
  361. ret, "l", "k", "ir", 4);
  362. return ret;
  363. }
  364. case 8: {
  365. u64 tmp;
  366. __get_user_asm(tmp, (u64 __user *)src,
  367. ret, "q", "", "=r", 8);
  368. if (likely(!ret))
  369. __put_user_asm(tmp, (u64 __user *)dst,
  370. ret, "q", "", "ir", 8);
  371. return ret;
  372. }
  373. default:
  374. return copy_user_generic((__force void *)dst,
  375. (__force void *)src, size);
  376. }
  377. }
  378. __must_check long
  379. strncpy_from_user(char *dst, const char __user *src, long count);
  380. __must_check long
  381. __strncpy_from_user(char *dst, const char __user *src, long count);
  382. __must_check long strnlen_user(const char __user *str, long n);
  383. __must_check long __strnlen_user(const char __user *str, long n);
  384. __must_check long strlen_user(const char __user *str);
  385. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  386. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  387. __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
  388. unsigned size);
  389. static __must_check __always_inline int
  390. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  391. {
  392. return copy_user_generic((__force void *)dst, src, size);
  393. }
  394. #define ARCH_HAS_NOCACHE_UACCESS 1
  395. extern long __copy_user_nocache(void *dst, const void __user *src,
  396. unsigned size, int zerorest);
  397. static inline int __copy_from_user_nocache(void *dst, const void __user *src,
  398. unsigned size)
  399. {
  400. might_sleep();
  401. return __copy_user_nocache(dst, src, size, 1);
  402. }
  403. static inline int __copy_from_user_inatomic_nocache(void *dst,
  404. const void __user *src,
  405. unsigned size)
  406. {
  407. return __copy_user_nocache(dst, src, size, 0);
  408. }
  409. #endif /* __X86_64_UACCESS_H */