uaccess_64.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. #ifndef __X86_64_UACCESS_H
  2. #define __X86_64_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/prefetch.h>
  9. #include <asm/page.h>
  10. #define VERIFY_READ 0
  11. #define VERIFY_WRITE 1
  12. /*
  13. * The fs value determines whether argument validity checking should be
  14. * performed or not. If get_fs() == USER_DS, checking is performed, with
  15. * get_fs() == KERNEL_DS, checking is bypassed.
  16. *
  17. * For historical reasons, these macros are grossly misnamed.
  18. */
  19. #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  20. #define KERNEL_DS MAKE_MM_SEG(-1UL)
  21. #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
  22. #define get_ds() (KERNEL_DS)
  23. #define get_fs() (current_thread_info()->addr_limit)
  24. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  25. #define segment_eq(a, b) ((a).seg == (b).seg)
  26. #define __addr_ok(addr) (!((unsigned long)(addr) & \
  27. (current_thread_info()->addr_limit.seg)))
  28. /*
  29. * Uhhuh, this needs 65-bit arithmetic. We have a carry..
  30. */
  31. #define __range_not_ok(addr, size) \
  32. ({ \
  33. unsigned long flag, roksum; \
  34. __chk_user_ptr(addr); \
  35. asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
  36. : "=&r" (flag), "=r" (roksum) \
  37. : "1" (addr), "g" ((long)(size)), \
  38. "rm" (current_thread_info()->addr_limit.seg)); \
  39. flag; \
  40. })
  41. #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
  42. /*
  43. * The exception table consists of pairs of addresses: the first is the
  44. * address of an instruction that is allowed to fault, and the second is
  45. * the address at which the program should continue. No registers are
  46. * modified, so it is entirely up to the continuation code to figure out
  47. * what to do.
  48. *
  49. * All the routines below use bits of fixup code that are out of line
  50. * with the main instruction path. This means when everything is well,
  51. * we don't even have to jump over them. Further, they do not intrude
  52. * on our cache or tlb entries.
  53. */
  54. struct exception_table_entry {
  55. unsigned long insn, fixup;
  56. };
  57. extern int fixup_exception(struct pt_regs *regs);
  58. #define ARCH_HAS_SEARCH_EXTABLE
  59. /*
  60. * These are the main single-value transfer routines. They automatically
  61. * use the right size if we just have the right pointer type.
  62. *
  63. * This gets kind of ugly. We want to return _two_ values in "get_user()"
  64. * and yet we don't want to do any pointers, because that is too much
  65. * of a performance impact. Thus we have a few rather ugly macros here,
  66. * and hide all the ugliness from the user.
  67. *
  68. * The "__xxx" versions of the user access functions are versions that
  69. * do not verify the address space, that must have been done previously
  70. * with a separate "access_ok()" call (this is used when we do multiple
  71. * accesses to the same area of user memory).
  72. */
  73. #define __get_user_x(size, ret, x, ptr) \
  74. asm volatile("call __get_user_" #size \
  75. : "=a" (ret),"=d" (x) \
  76. : "0" (ptr)) \
  77. /* Careful: we have to cast the result to the type of the pointer
  78. * for sign reasons */
  79. #define get_user(x, ptr) \
  80. ({ \
  81. unsigned long __val_gu; \
  82. int __ret_gu; \
  83. __chk_user_ptr(ptr); \
  84. switch (sizeof(*(ptr))) { \
  85. case 1: \
  86. __get_user_x(1, __ret_gu, __val_gu, ptr); \
  87. break; \
  88. case 2: \
  89. __get_user_x(2, __ret_gu, __val_gu, ptr); \
  90. break; \
  91. case 4: \
  92. __get_user_x(4, __ret_gu, __val_gu, ptr); \
  93. break; \
  94. case 8: \
  95. __get_user_x(8, __ret_gu, __val_gu, ptr); \
  96. break; \
  97. default: \
  98. __get_user_bad(); \
  99. break; \
  100. } \
  101. (x) = (__force typeof(*(ptr)))__val_gu; \
  102. __ret_gu; \
  103. })
  104. extern void __put_user_1(void);
  105. extern void __put_user_2(void);
  106. extern void __put_user_4(void);
  107. extern void __put_user_8(void);
  108. extern void __put_user_bad(void);
  109. #define __put_user_x(size, ret, x, ptr) \
  110. asm volatile("call __put_user_" #size \
  111. :"=a" (ret) \
  112. :"c" (ptr),"a" (x) \
  113. :"ebx")
  114. #define put_user(x, ptr) \
  115. __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  116. #define __get_user(x, ptr) \
  117. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  118. #define __put_user(x, ptr) \
  119. __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  120. #define __get_user_unaligned __get_user
  121. #define __put_user_unaligned __put_user
  122. #define __put_user_nocheck(x, ptr, size) \
  123. ({ \
  124. int __pu_err; \
  125. __put_user_size((x), (ptr), (size), __pu_err); \
  126. __pu_err; \
  127. })
  128. #define __put_user_check(x, ptr, size) \
  129. ({ \
  130. int __pu_err; \
  131. typeof(*(ptr)) __user *__pu_addr = (ptr); \
  132. switch (size) { \
  133. case 1: \
  134. __put_user_x(1, __pu_err, x, __pu_addr); \
  135. break; \
  136. case 2: \
  137. __put_user_x(2, __pu_err, x, __pu_addr); \
  138. break; \
  139. case 4: \
  140. __put_user_x(4, __pu_err, x, __pu_addr); \
  141. break; \
  142. case 8: \
  143. __put_user_x(8, __pu_err, x, __pu_addr); \
  144. break; \
  145. default: \
  146. __put_user_bad(); \
  147. } \
  148. __pu_err; \
  149. })
  150. #define __put_user_size(x, ptr, size, retval) \
  151. do { \
  152. retval = 0; \
  153. __chk_user_ptr(ptr); \
  154. switch (size) { \
  155. case 1: \
  156. __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
  157. break; \
  158. case 2: \
  159. __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
  160. break; \
  161. case 4: \
  162. __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
  163. break; \
  164. case 8: \
  165. __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
  166. break; \
  167. default: \
  168. __put_user_bad(); \
  169. } \
  170. } while (0)
  171. /* FIXME: this hack is definitely wrong -AK */
  172. struct __large_struct { unsigned long buf[100]; };
  173. #define __m(x) (*(struct __large_struct __user *)(x))
  174. /*
  175. * Tell gcc we read from memory instead of writing: this is because
  176. * we do not write to any memory gcc knows about, so there are no
  177. * aliasing issues.
  178. */
  179. #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
  180. asm volatile("1: mov"itype" %"rtype"1,%2\n" \
  181. "2:\n" \
  182. ".section .fixup, \"ax\"\n" \
  183. "3: mov %3,%0\n" \
  184. " jmp 2b\n" \
  185. ".previous\n" \
  186. _ASM_EXTABLE(1b, 3b) \
  187. : "=r"(err) \
  188. : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
  189. #define __get_user_nocheck(x, ptr, size) \
  190. ({ \
  191. int __gu_err; \
  192. unsigned long __gu_val; \
  193. __get_user_size(__gu_val, (ptr), (size), __gu_err); \
  194. (x) = (__force typeof(*(ptr)))__gu_val; \
  195. __gu_err; \
  196. })
  197. extern int __get_user_1(void);
  198. extern int __get_user_2(void);
  199. extern int __get_user_4(void);
  200. extern int __get_user_8(void);
  201. extern int __get_user_bad(void);
  202. #define __get_user_size(x, ptr, size, retval) \
  203. do { \
  204. retval = 0; \
  205. __chk_user_ptr(ptr); \
  206. switch (size) { \
  207. case 1: \
  208. __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
  209. break; \
  210. case 2: \
  211. __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
  212. break; \
  213. case 4: \
  214. __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
  215. break; \
  216. case 8: \
  217. __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
  218. break; \
  219. default: \
  220. (x) = __get_user_bad(); \
  221. } \
  222. } while (0)
  223. #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
  224. asm volatile("1: mov"itype" %2,%"rtype"1\n" \
  225. "2:\n" \
  226. ".section .fixup, \"ax\"\n" \
  227. "3: mov %3,%0\n" \
  228. " xor"itype" %"rtype"1,%"rtype"1\n" \
  229. " jmp 2b\n" \
  230. ".previous\n" \
  231. _ASM_EXTABLE(1b, 3b) \
  232. : "=r" (err), ltype (x) \
  233. : "m" (__m(addr)), "i"(errno), "0"(err))
  234. /*
  235. * Copy To/From Userspace
  236. */
  237. /* Handles exceptions in both to and from, but doesn't do access_ok */
  238. __must_check unsigned long
  239. copy_user_generic(void *to, const void *from, unsigned len);
  240. __must_check unsigned long
  241. copy_to_user(void __user *to, const void *from, unsigned len);
  242. __must_check unsigned long
  243. copy_from_user(void *to, const void __user *from, unsigned len);
  244. __must_check unsigned long
  245. copy_in_user(void __user *to, const void __user *from, unsigned len);
  246. static __always_inline __must_check
  247. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  248. {
  249. int ret = 0;
  250. if (!__builtin_constant_p(size))
  251. return copy_user_generic(dst, (__force void *)src, size);
  252. switch (size) {
  253. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  254. ret, "b", "b", "=q", 1);
  255. return ret;
  256. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  257. ret, "w", "w", "=r", 2);
  258. return ret;
  259. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  260. ret, "l", "k", "=r", 4);
  261. return ret;
  262. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  263. ret, "q", "", "=r", 8);
  264. return ret;
  265. case 10:
  266. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  267. ret, "q", "", "=r", 16);
  268. if (unlikely(ret))
  269. return ret;
  270. __get_user_asm(*(u16 *)(8 + (char *)dst),
  271. (u16 __user *)(8 + (char __user *)src),
  272. ret, "w", "w", "=r", 2);
  273. return ret;
  274. case 16:
  275. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  276. ret, "q", "", "=r", 16);
  277. if (unlikely(ret))
  278. return ret;
  279. __get_user_asm(*(u64 *)(8 + (char *)dst),
  280. (u64 __user *)(8 + (char __user *)src),
  281. ret, "q", "", "=r", 8);
  282. return ret;
  283. default:
  284. return copy_user_generic(dst, (__force void *)src, size);
  285. }
  286. }
  287. static __always_inline __must_check
  288. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  289. {
  290. int ret = 0;
  291. if (!__builtin_constant_p(size))
  292. return copy_user_generic((__force void *)dst, src, size);
  293. switch (size) {
  294. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  295. ret, "b", "b", "iq", 1);
  296. return ret;
  297. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  298. ret, "w", "w", "ir", 2);
  299. return ret;
  300. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  301. ret, "l", "k", "ir", 4);
  302. return ret;
  303. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  304. ret, "q", "", "ir", 8);
  305. return ret;
  306. case 10:
  307. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  308. ret, "q", "", "ir", 10);
  309. if (unlikely(ret))
  310. return ret;
  311. asm("":::"memory");
  312. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  313. ret, "w", "w", "ir", 2);
  314. return ret;
  315. case 16:
  316. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  317. ret, "q", "", "ir", 16);
  318. if (unlikely(ret))
  319. return ret;
  320. asm("":::"memory");
  321. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  322. ret, "q", "", "ir", 8);
  323. return ret;
  324. default:
  325. return copy_user_generic((__force void *)dst, src, size);
  326. }
  327. }
  328. static __always_inline __must_check
  329. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  330. {
  331. int ret = 0;
  332. if (!__builtin_constant_p(size))
  333. return copy_user_generic((__force void *)dst,
  334. (__force void *)src, size);
  335. switch (size) {
  336. case 1: {
  337. u8 tmp;
  338. __get_user_asm(tmp, (u8 __user *)src,
  339. ret, "b", "b", "=q", 1);
  340. if (likely(!ret))
  341. __put_user_asm(tmp, (u8 __user *)dst,
  342. ret, "b", "b", "iq", 1);
  343. return ret;
  344. }
  345. case 2: {
  346. u16 tmp;
  347. __get_user_asm(tmp, (u16 __user *)src,
  348. ret, "w", "w", "=r", 2);
  349. if (likely(!ret))
  350. __put_user_asm(tmp, (u16 __user *)dst,
  351. ret, "w", "w", "ir", 2);
  352. return ret;
  353. }
  354. case 4: {
  355. u32 tmp;
  356. __get_user_asm(tmp, (u32 __user *)src,
  357. ret, "l", "k", "=r", 4);
  358. if (likely(!ret))
  359. __put_user_asm(tmp, (u32 __user *)dst,
  360. ret, "l", "k", "ir", 4);
  361. return ret;
  362. }
  363. case 8: {
  364. u64 tmp;
  365. __get_user_asm(tmp, (u64 __user *)src,
  366. ret, "q", "", "=r", 8);
  367. if (likely(!ret))
  368. __put_user_asm(tmp, (u64 __user *)dst,
  369. ret, "q", "", "ir", 8);
  370. return ret;
  371. }
  372. default:
  373. return copy_user_generic((__force void *)dst,
  374. (__force void *)src, size);
  375. }
  376. }
  377. __must_check long
  378. strncpy_from_user(char *dst, const char __user *src, long count);
  379. __must_check long
  380. __strncpy_from_user(char *dst, const char __user *src, long count);
  381. __must_check long strnlen_user(const char __user *str, long n);
  382. __must_check long __strnlen_user(const char __user *str, long n);
  383. __must_check long strlen_user(const char __user *str);
  384. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  385. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  386. __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
  387. unsigned size);
  388. static __must_check __always_inline int
  389. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  390. {
  391. return copy_user_generic((__force void *)dst, src, size);
  392. }
  393. #define ARCH_HAS_NOCACHE_UACCESS 1
  394. extern long __copy_user_nocache(void *dst, const void __user *src,
  395. unsigned size, int zerorest);
  396. static inline int __copy_from_user_nocache(void *dst, const void __user *src,
  397. unsigned size)
  398. {
  399. might_sleep();
  400. return __copy_user_nocache(dst, src, size, 1);
  401. }
  402. static inline int __copy_from_user_inatomic_nocache(void *dst,
  403. const void __user *src,
  404. unsigned size)
  405. {
  406. return __copy_user_nocache(dst, src, size, 0);
  407. }
  408. #endif /* __X86_64_UACCESS_H */