uaccess.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. #ifndef _ARCH_POWERPC_UACCESS_H
  2. #define _ARCH_POWERPC_UACCESS_H
  3. #ifdef __KERNEL__
  4. #ifndef __ASSEMBLY__
  5. #include <linux/sched.h>
  6. #include <linux/errno.h>
  7. #include <asm/processor.h>
  8. #define VERIFY_READ 0
  9. #define VERIFY_WRITE 1
  10. /*
  11. * The fs value determines whether argument validity checking should be
  12. * performed or not. If get_fs() == USER_DS, checking is performed, with
  13. * get_fs() == KERNEL_DS, checking is bypassed.
  14. *
  15. * For historical reasons, these macros are grossly misnamed.
  16. *
  17. * The fs/ds values are now the highest legal address in the "segment".
  18. * This simplifies the checking in the routines below.
  19. */
  20. #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  21. #define KERNEL_DS MAKE_MM_SEG(~0UL)
  22. #ifdef __powerpc64__
  23. /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
  24. #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
  25. #else
  26. #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
  27. #endif
  28. #define get_ds() (KERNEL_DS)
  29. #define get_fs() (current->thread.fs)
  30. #define set_fs(val) (current->thread.fs = (val))
  31. #define segment_eq(a, b) ((a).seg == (b).seg)
  32. #ifdef __powerpc64__
  33. /*
  34. * This check is sufficient because there is a large enough
  35. * gap between user addresses and the kernel addresses
  36. */
  37. #define __access_ok(addr, size, segment) \
  38. (((addr) <= (segment).seg) && ((size) <= (segment).seg))
  39. #else
  40. #define __access_ok(addr, size, segment) \
  41. (((addr) <= (segment).seg) && \
  42. (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
  43. #endif
  44. #define access_ok(type, addr, size) \
  45. (__chk_user_ptr(addr), \
  46. __access_ok((__force unsigned long)(addr), (size), get_fs()))
  47. /*
  48. * The exception table consists of pairs of addresses: the first is the
  49. * address of an instruction that is allowed to fault, and the second is
  50. * the address at which the program should continue. No registers are
  51. * modified, so it is entirely up to the continuation code to figure out
  52. * what to do.
  53. *
  54. * All the routines below use bits of fixup code that are out of line
  55. * with the main instruction path. This means when everything is well,
  56. * we don't even have to jump over them. Further, they do not intrude
  57. * on our cache or tlb entries.
  58. */
  59. struct exception_table_entry {
  60. unsigned long insn;
  61. unsigned long fixup;
  62. };
  63. /*
  64. * These are the main single-value transfer routines. They automatically
  65. * use the right size if we just have the right pointer type.
  66. *
  67. * This gets kind of ugly. We want to return _two_ values in "get_user()"
  68. * and yet we don't want to do any pointers, because that is too much
  69. * of a performance impact. Thus we have a few rather ugly macros here,
  70. * and hide all the ugliness from the user.
  71. *
  72. * The "__xxx" versions of the user access functions are versions that
  73. * do not verify the address space, that must have been done previously
  74. * with a separate "access_ok()" call (this is used when we do multiple
  75. * accesses to the same area of user memory).
  76. *
  77. * As we use the same address space for kernel and user data on the
  78. * PowerPC, we can just do these as direct assignments. (Of course, the
  79. * exception handling means that it's no longer "just"...)
  80. *
  81. * The "user64" versions of the user access functions are versions that
  82. * allow access of 64-bit data. The "get_user" functions do not
  83. * properly handle 64-bit data because the value gets down cast to a long.
  84. * The "put_user" functions already handle 64-bit data properly but we add
  85. * "user64" versions for completeness
  86. */
  87. #define get_user(x, ptr) \
  88. __get_user_check((x), (ptr), sizeof(*(ptr)))
  89. #define put_user(x, ptr) \
  90. __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  91. #define __get_user(x, ptr) \
  92. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  93. #define __put_user(x, ptr) \
  94. __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
  95. #ifndef __powerpc64__
  96. #define __get_user64(x, ptr) \
  97. __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
  98. #define __put_user64(x, ptr) __put_user(x, ptr)
  99. #endif
  100. #define __get_user_unaligned __get_user
  101. #define __put_user_unaligned __put_user
  102. extern long __put_user_bad(void);
  103. /*
  104. * We don't tell gcc that we are accessing memory, but this is OK
  105. * because we do not write to any memory gcc knows about, so there
  106. * are no aliasing issues.
  107. */
  108. #define __put_user_asm(x, addr, err, op) \
  109. __asm__ __volatile__( \
  110. "1: " op " %1,0(%2) # put_user\n" \
  111. "2:\n" \
  112. ".section .fixup,\"ax\"\n" \
  113. "3: li %0,%3\n" \
  114. " b 2b\n" \
  115. ".previous\n" \
  116. ".section __ex_table,\"a\"\n" \
  117. " .balign %5\n" \
  118. PPC_LONG "1b,3b\n" \
  119. ".previous" \
  120. : "=r" (err) \
  121. : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
  122. "i"(sizeof(unsigned long)))
  123. #ifdef __powerpc64__
  124. #define __put_user_asm2(x, ptr, retval) \
  125. __put_user_asm(x, ptr, retval, "std")
  126. #else /* __powerpc64__ */
  127. #define __put_user_asm2(x, addr, err) \
  128. __asm__ __volatile__( \
  129. "1: stw %1,0(%2)\n" \
  130. "2: stw %1+1,4(%2)\n" \
  131. "3:\n" \
  132. ".section .fixup,\"ax\"\n" \
  133. "4: li %0,%3\n" \
  134. " b 3b\n" \
  135. ".previous\n" \
  136. ".section __ex_table,\"a\"\n" \
  137. " .balign %5\n" \
  138. PPC_LONG "1b,4b\n" \
  139. PPC_LONG "2b,4b\n" \
  140. ".previous" \
  141. : "=r" (err) \
  142. : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
  143. "i"(sizeof(unsigned long)))
  144. #endif /* __powerpc64__ */
  145. #define __put_user_size(x, ptr, size, retval) \
  146. do { \
  147. retval = 0; \
  148. switch (size) { \
  149. case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
  150. case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
  151. case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
  152. case 8: __put_user_asm2(x, ptr, retval); break; \
  153. default: __put_user_bad(); \
  154. } \
  155. } while (0)
  156. #define __put_user_nocheck(x, ptr, size) \
  157. ({ \
  158. long __pu_err; \
  159. might_sleep(); \
  160. __chk_user_ptr(ptr); \
  161. __put_user_size((x), (ptr), (size), __pu_err); \
  162. __pu_err; \
  163. })
  164. #define __put_user_check(x, ptr, size) \
  165. ({ \
  166. long __pu_err = -EFAULT; \
  167. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  168. might_sleep(); \
  169. if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
  170. __put_user_size((x), __pu_addr, (size), __pu_err); \
  171. __pu_err; \
  172. })
  173. extern long __get_user_bad(void);
  174. #define __get_user_asm(x, addr, err, op) \
  175. __asm__ __volatile__( \
  176. "1: "op" %1,0(%2) # get_user\n" \
  177. "2:\n" \
  178. ".section .fixup,\"ax\"\n" \
  179. "3: li %0,%3\n" \
  180. " li %1,0\n" \
  181. " b 2b\n" \
  182. ".previous\n" \
  183. ".section __ex_table,\"a\"\n" \
  184. " .balign %5\n" \
  185. PPC_LONG "1b,3b\n" \
  186. ".previous" \
  187. : "=r" (err), "=r" (x) \
  188. : "b" (addr), "i" (-EFAULT), "0" (err), \
  189. "i"(sizeof(unsigned long)))
  190. #ifdef __powerpc64__
  191. #define __get_user_asm2(x, addr, err) \
  192. __get_user_asm(x, addr, err, "ld")
  193. #else /* __powerpc64__ */
  194. #define __get_user_asm2(x, addr, err) \
  195. __asm__ __volatile__( \
  196. "1: lwz %1,0(%2)\n" \
  197. "2: lwz %1+1,4(%2)\n" \
  198. "3:\n" \
  199. ".section .fixup,\"ax\"\n" \
  200. "4: li %0,%3\n" \
  201. " li %1,0\n" \
  202. " li %1+1,0\n" \
  203. " b 3b\n" \
  204. ".previous\n" \
  205. ".section __ex_table,\"a\"\n" \
  206. " .balign %5\n" \
  207. PPC_LONG "1b,4b\n" \
  208. PPC_LONG "2b,4b\n" \
  209. ".previous" \
  210. : "=r" (err), "=&r" (x) \
  211. : "b" (addr), "i" (-EFAULT), "0" (err), \
  212. "i"(sizeof(unsigned long)))
  213. #endif /* __powerpc64__ */
  214. #define __get_user_size(x, ptr, size, retval) \
  215. do { \
  216. retval = 0; \
  217. __chk_user_ptr(ptr); \
  218. if (size > sizeof(x)) \
  219. (x) = __get_user_bad(); \
  220. switch (size) { \
  221. case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
  222. case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
  223. case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
  224. case 8: __get_user_asm2(x, ptr, retval); break; \
  225. default: (x) = __get_user_bad(); \
  226. } \
  227. } while (0)
  228. #define __get_user_nocheck(x, ptr, size) \
  229. ({ \
  230. long __gu_err; \
  231. unsigned long __gu_val; \
  232. __chk_user_ptr(ptr); \
  233. might_sleep(); \
  234. __get_user_size(__gu_val, (ptr), (size), __gu_err); \
  235. (x) = (__typeof__(*(ptr)))__gu_val; \
  236. __gu_err; \
  237. })
  238. #ifndef __powerpc64__
  239. #define __get_user64_nocheck(x, ptr, size) \
  240. ({ \
  241. long __gu_err; \
  242. long long __gu_val; \
  243. __chk_user_ptr(ptr); \
  244. might_sleep(); \
  245. __get_user_size(__gu_val, (ptr), (size), __gu_err); \
  246. (x) = (__typeof__(*(ptr)))__gu_val; \
  247. __gu_err; \
  248. })
  249. #endif /* __powerpc64__ */
  250. #define __get_user_check(x, ptr, size) \
  251. ({ \
  252. long __gu_err = -EFAULT; \
  253. unsigned long __gu_val = 0; \
  254. const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
  255. might_sleep(); \
  256. if (access_ok(VERIFY_READ, __gu_addr, (size))) \
  257. __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
  258. (x) = (__typeof__(*(ptr)))__gu_val; \
  259. __gu_err; \
  260. })
  261. /* more complex routines */
  262. extern unsigned long __copy_tofrom_user(void __user *to,
  263. const void __user *from, unsigned long size);
  264. #ifndef __powerpc64__
  265. extern inline unsigned long copy_from_user(void *to,
  266. const void __user *from, unsigned long n)
  267. {
  268. unsigned long over;
  269. if (access_ok(VERIFY_READ, from, n))
  270. return __copy_tofrom_user((__force void __user *)to, from, n);
  271. if ((unsigned long)from < TASK_SIZE) {
  272. over = (unsigned long)from + n - TASK_SIZE;
  273. return __copy_tofrom_user((__force void __user *)to, from,
  274. n - over) + over;
  275. }
  276. return n;
  277. }
  278. extern inline unsigned long copy_to_user(void __user *to,
  279. const void *from, unsigned long n)
  280. {
  281. unsigned long over;
  282. if (access_ok(VERIFY_WRITE, to, n))
  283. return __copy_tofrom_user(to, (__force void __user *)from, n);
  284. if ((unsigned long)to < TASK_SIZE) {
  285. over = (unsigned long)to + n - TASK_SIZE;
  286. return __copy_tofrom_user(to, (__force void __user *)from,
  287. n - over) + over;
  288. }
  289. return n;
  290. }
  291. #else /* __powerpc64__ */
  292. #define __copy_in_user(to, from, size) \
  293. __copy_tofrom_user((to), (from), (size))
  294. extern unsigned long copy_from_user(void *to, const void __user *from,
  295. unsigned long n);
  296. extern unsigned long copy_to_user(void __user *to, const void *from,
  297. unsigned long n);
  298. extern unsigned long copy_in_user(void __user *to, const void __user *from,
  299. unsigned long n);
  300. #endif /* __powerpc64__ */
  301. static inline unsigned long __copy_from_user_inatomic(void *to,
  302. const void __user *from, unsigned long n)
  303. {
  304. if (__builtin_constant_p(n) && (n <= 8)) {
  305. unsigned long ret;
  306. switch (n) {
  307. case 1:
  308. __get_user_size(*(u8 *)to, from, 1, ret);
  309. break;
  310. case 2:
  311. __get_user_size(*(u16 *)to, from, 2, ret);
  312. break;
  313. case 4:
  314. __get_user_size(*(u32 *)to, from, 4, ret);
  315. break;
  316. case 8:
  317. __get_user_size(*(u64 *)to, from, 8, ret);
  318. break;
  319. }
  320. if (ret == 0)
  321. return 0;
  322. }
  323. return __copy_tofrom_user((__force void __user *)to, from, n);
  324. }
  325. static inline unsigned long __copy_to_user_inatomic(void __user *to,
  326. const void *from, unsigned long n)
  327. {
  328. if (__builtin_constant_p(n) && (n <= 8)) {
  329. unsigned long ret;
  330. switch (n) {
  331. case 1:
  332. __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
  333. break;
  334. case 2:
  335. __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
  336. break;
  337. case 4:
  338. __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
  339. break;
  340. case 8:
  341. __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
  342. break;
  343. }
  344. if (ret == 0)
  345. return 0;
  346. }
  347. return __copy_tofrom_user(to, (__force const void __user *)from, n);
  348. }
  349. static inline unsigned long __copy_from_user(void *to,
  350. const void __user *from, unsigned long size)
  351. {
  352. might_sleep();
  353. return __copy_from_user_inatomic(to, from, size);
  354. }
  355. static inline unsigned long __copy_to_user(void __user *to,
  356. const void *from, unsigned long size)
  357. {
  358. might_sleep();
  359. return __copy_to_user_inatomic(to, from, size);
  360. }
  361. extern unsigned long __clear_user(void __user *addr, unsigned long size);
  362. static inline unsigned long clear_user(void __user *addr, unsigned long size)
  363. {
  364. might_sleep();
  365. if (likely(access_ok(VERIFY_WRITE, addr, size)))
  366. return __clear_user(addr, size);
  367. if ((unsigned long)addr < TASK_SIZE) {
  368. unsigned long over = (unsigned long)addr + size - TASK_SIZE;
  369. return __clear_user(addr, size - over) + over;
  370. }
  371. return size;
  372. }
  373. extern int __strncpy_from_user(char *dst, const char __user *src, long count);
  374. static inline long strncpy_from_user(char *dst, const char __user *src,
  375. long count)
  376. {
  377. might_sleep();
  378. if (likely(access_ok(VERIFY_READ, src, 1)))
  379. return __strncpy_from_user(dst, src, count);
  380. return -EFAULT;
  381. }
  382. /*
  383. * Return the size of a string (including the ending 0)
  384. *
  385. * Return 0 for error
  386. */
  387. extern int __strnlen_user(const char __user *str, long len, unsigned long top);
  388. /*
  389. * Returns the length of the string at str (including the null byte),
  390. * or 0 if we hit a page we can't access,
  391. * or something > len if we didn't find a null byte.
  392. *
  393. * The `top' parameter to __strnlen_user is to make sure that
  394. * we can never overflow from the user area into kernel space.
  395. */
  396. static inline int strnlen_user(const char __user *str, long len)
  397. {
  398. unsigned long top = current->thread.fs.seg;
  399. if ((unsigned long)str > top)
  400. return 0;
  401. return __strnlen_user(str, len, top);
  402. }
  403. #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
  404. #endif /* __ASSEMBLY__ */
  405. #endif /* __KERNEL__ */
  406. #endif /* _ARCH_POWERPC_UACCESS_H */