uaccess.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Hartmut Penner (hp@de.ibm.com),
  5. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. *
  7. * Derived from "include/asm-i386/uaccess.h"
  8. */
  9. #ifndef __S390_UACCESS_H
  10. #define __S390_UACCESS_H
  11. /*
  12. * User space memory access functions
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/errno.h>
  16. #include <asm/ctl_reg.h>
  17. #define VERIFY_READ 0
  18. #define VERIFY_WRITE 1
  19. /*
  20. * The fs value determines whether argument validity checking should be
  21. * performed or not. If get_fs() == USER_DS, checking is performed, with
  22. * get_fs() == KERNEL_DS, checking is bypassed.
  23. *
  24. * For historical reasons, these macros are grossly misnamed.
  25. */
  26. #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
  27. #define KERNEL_DS MAKE_MM_SEG(0)
  28. #define USER_DS MAKE_MM_SEG(1)
  29. #define get_ds() (KERNEL_DS)
  30. #define get_fs() (current->thread.mm_segment)
  31. #define set_fs(x) \
  32. ({ \
  33. unsigned long __pto; \
  34. current->thread.mm_segment = (x); \
  35. __pto = current->thread.mm_segment.ar4 ? \
  36. S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
  37. __ctl_load(__pto, 7, 7); \
  38. })
  39. #define segment_eq(a,b) ((a).ar4 == (b).ar4)
  40. static inline int __range_ok(unsigned long addr, unsigned long size)
  41. {
  42. return 1;
  43. }
  44. #define __access_ok(addr, size) \
  45. ({ \
  46. __chk_user_ptr(addr); \
  47. __range_ok((unsigned long)(addr), (size)); \
  48. })
  49. #define access_ok(type, addr, size) __access_ok(addr, size)
  50. /*
  51. * The exception table consists of pairs of addresses: the first is the
  52. * address of an instruction that is allowed to fault, and the second is
  53. * the address at which the program should continue. No registers are
  54. * modified, so it is entirely up to the continuation code to figure out
  55. * what to do.
  56. *
  57. * All the routines below use bits of fixup code that are out of line
  58. * with the main instruction path. This means when everything is well,
  59. * we don't even have to jump over them. Further, they do not intrude
  60. * on our cache or tlb entries.
  61. */
  62. struct exception_table_entry
  63. {
  64. int insn, fixup;
  65. };
  66. static inline unsigned long extable_insn(const struct exception_table_entry *x)
  67. {
  68. return (unsigned long)&x->insn + x->insn;
  69. }
  70. static inline unsigned long extable_fixup(const struct exception_table_entry *x)
  71. {
  72. return (unsigned long)&x->fixup + x->fixup;
  73. }
  74. #define ARCH_HAS_SORT_EXTABLE
  75. #define ARCH_HAS_SEARCH_EXTABLE
  76. struct uaccess_ops {
  77. size_t (*copy_from_user)(size_t, const void __user *, void *);
  78. size_t (*copy_from_user_small)(size_t, const void __user *, void *);
  79. size_t (*copy_to_user)(size_t, void __user *, const void *);
  80. size_t (*copy_to_user_small)(size_t, void __user *, const void *);
  81. size_t (*copy_in_user)(size_t, void __user *, const void __user *);
  82. size_t (*clear_user)(size_t, void __user *);
  83. size_t (*strnlen_user)(size_t, const char __user *);
  84. size_t (*strncpy_from_user)(size_t, const char __user *, char *);
  85. int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
  86. int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
  87. };
  88. extern struct uaccess_ops uaccess;
  89. extern struct uaccess_ops uaccess_std;
  90. extern struct uaccess_ops uaccess_mvcos;
  91. extern struct uaccess_ops uaccess_mvcos_switch;
  92. extern struct uaccess_ops uaccess_pt;
  93. extern int __handle_fault(unsigned long, unsigned long, int);
  94. static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
  95. {
  96. size = uaccess.copy_to_user_small(size, ptr, x);
  97. return size ? -EFAULT : size;
  98. }
  99. static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
  100. {
  101. size = uaccess.copy_from_user_small(size, ptr, x);
  102. return size ? -EFAULT : size;
  103. }
  104. /*
  105. * These are the main single-value transfer routines. They automatically
  106. * use the right size if we just have the right pointer type.
  107. */
  108. #define __put_user(x, ptr) \
  109. ({ \
  110. __typeof__(*(ptr)) __x = (x); \
  111. int __pu_err = -EFAULT; \
  112. __chk_user_ptr(ptr); \
  113. switch (sizeof (*(ptr))) { \
  114. case 1: \
  115. case 2: \
  116. case 4: \
  117. case 8: \
  118. __pu_err = __put_user_fn(sizeof (*(ptr)), \
  119. ptr, &__x); \
  120. break; \
  121. default: \
  122. __put_user_bad(); \
  123. break; \
  124. } \
  125. __pu_err; \
  126. })
  127. #define put_user(x, ptr) \
  128. ({ \
  129. might_fault(); \
  130. __put_user(x, ptr); \
  131. })
  132. extern int __put_user_bad(void) __attribute__((noreturn));
  133. #define __get_user(x, ptr) \
  134. ({ \
  135. int __gu_err = -EFAULT; \
  136. __chk_user_ptr(ptr); \
  137. switch (sizeof(*(ptr))) { \
  138. case 1: { \
  139. unsigned char __x; \
  140. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  141. ptr, &__x); \
  142. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  143. break; \
  144. }; \
  145. case 2: { \
  146. unsigned short __x; \
  147. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  148. ptr, &__x); \
  149. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  150. break; \
  151. }; \
  152. case 4: { \
  153. unsigned int __x; \
  154. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  155. ptr, &__x); \
  156. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  157. break; \
  158. }; \
  159. case 8: { \
  160. unsigned long long __x; \
  161. __gu_err = __get_user_fn(sizeof (*(ptr)), \
  162. ptr, &__x); \
  163. (x) = *(__force __typeof__(*(ptr)) *) &__x; \
  164. break; \
  165. }; \
  166. default: \
  167. __get_user_bad(); \
  168. break; \
  169. } \
  170. __gu_err; \
  171. })
  172. #define get_user(x, ptr) \
  173. ({ \
  174. might_fault(); \
  175. __get_user(x, ptr); \
  176. })
  177. extern int __get_user_bad(void) __attribute__((noreturn));
  178. #define __put_user_unaligned __put_user
  179. #define __get_user_unaligned __get_user
  180. /**
  181. * __copy_to_user: - Copy a block of data into user space, with less checking.
  182. * @to: Destination address, in user space.
  183. * @from: Source address, in kernel space.
  184. * @n: Number of bytes to copy.
  185. *
  186. * Context: User context only. This function may sleep.
  187. *
  188. * Copy data from kernel space to user space. Caller must check
  189. * the specified block with access_ok() before calling this function.
  190. *
  191. * Returns number of bytes that could not be copied.
  192. * On success, this will be zero.
  193. */
  194. static inline unsigned long __must_check
  195. __copy_to_user(void __user *to, const void *from, unsigned long n)
  196. {
  197. if (__builtin_constant_p(n) && (n <= 256))
  198. return uaccess.copy_to_user_small(n, to, from);
  199. else
  200. return uaccess.copy_to_user(n, to, from);
  201. }
  202. #define __copy_to_user_inatomic __copy_to_user
  203. #define __copy_from_user_inatomic __copy_from_user
  204. /**
  205. * copy_to_user: - Copy a block of data into user space.
  206. * @to: Destination address, in user space.
  207. * @from: Source address, in kernel space.
  208. * @n: Number of bytes to copy.
  209. *
  210. * Context: User context only. This function may sleep.
  211. *
  212. * Copy data from kernel space to user space.
  213. *
  214. * Returns number of bytes that could not be copied.
  215. * On success, this will be zero.
  216. */
  217. static inline unsigned long __must_check
  218. copy_to_user(void __user *to, const void *from, unsigned long n)
  219. {
  220. might_fault();
  221. if (access_ok(VERIFY_WRITE, to, n))
  222. n = __copy_to_user(to, from, n);
  223. return n;
  224. }
  225. /**
  226. * __copy_from_user: - Copy a block of data from user space, with less checking.
  227. * @to: Destination address, in kernel space.
  228. * @from: Source address, in user space.
  229. * @n: Number of bytes to copy.
  230. *
  231. * Context: User context only. This function may sleep.
  232. *
  233. * Copy data from user space to kernel space. Caller must check
  234. * the specified block with access_ok() before calling this function.
  235. *
  236. * Returns number of bytes that could not be copied.
  237. * On success, this will be zero.
  238. *
  239. * If some data could not be copied, this function will pad the copied
  240. * data to the requested size using zero bytes.
  241. */
  242. static inline unsigned long __must_check
  243. __copy_from_user(void *to, const void __user *from, unsigned long n)
  244. {
  245. if (__builtin_constant_p(n) && (n <= 256))
  246. return uaccess.copy_from_user_small(n, from, to);
  247. else
  248. return uaccess.copy_from_user(n, from, to);
  249. }
  250. extern void copy_from_user_overflow(void)
  251. #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
  252. __compiletime_warning("copy_from_user() buffer size is not provably correct")
  253. #endif
  254. ;
  255. /**
  256. * copy_from_user: - Copy a block of data from user space.
  257. * @to: Destination address, in kernel space.
  258. * @from: Source address, in user space.
  259. * @n: Number of bytes to copy.
  260. *
  261. * Context: User context only. This function may sleep.
  262. *
  263. * Copy data from user space to kernel space.
  264. *
  265. * Returns number of bytes that could not be copied.
  266. * On success, this will be zero.
  267. *
  268. * If some data could not be copied, this function will pad the copied
  269. * data to the requested size using zero bytes.
  270. */
  271. static inline unsigned long __must_check
  272. copy_from_user(void *to, const void __user *from, unsigned long n)
  273. {
  274. unsigned int sz = __compiletime_object_size(to);
  275. might_fault();
  276. if (unlikely(sz != -1 && sz < n)) {
  277. copy_from_user_overflow();
  278. return n;
  279. }
  280. if (access_ok(VERIFY_READ, from, n))
  281. n = __copy_from_user(to, from, n);
  282. else
  283. memset(to, 0, n);
  284. return n;
  285. }
  286. static inline unsigned long __must_check
  287. __copy_in_user(void __user *to, const void __user *from, unsigned long n)
  288. {
  289. return uaccess.copy_in_user(n, to, from);
  290. }
  291. static inline unsigned long __must_check
  292. copy_in_user(void __user *to, const void __user *from, unsigned long n)
  293. {
  294. might_fault();
  295. if (__access_ok(from,n) && __access_ok(to,n))
  296. n = __copy_in_user(to, from, n);
  297. return n;
  298. }
  299. /*
  300. * Copy a null terminated string from userspace.
  301. */
  302. static inline long __must_check
  303. strncpy_from_user(char *dst, const char __user *src, long count)
  304. {
  305. long res = -EFAULT;
  306. might_fault();
  307. if (access_ok(VERIFY_READ, src, 1))
  308. res = uaccess.strncpy_from_user(count, src, dst);
  309. return res;
  310. }
  311. static inline unsigned long
  312. strnlen_user(const char __user * src, unsigned long n)
  313. {
  314. might_fault();
  315. return uaccess.strnlen_user(n, src);
  316. }
  317. /**
  318. * strlen_user: - Get the size of a string in user space.
  319. * @str: The string to measure.
  320. *
  321. * Context: User context only. This function may sleep.
  322. *
  323. * Get the size of a NUL-terminated string in user space.
  324. *
  325. * Returns the size of the string INCLUDING the terminating NUL.
  326. * On exception, returns 0.
  327. *
  328. * If there is a limit on the length of a valid string, you may wish to
  329. * consider using strnlen_user() instead.
  330. */
  331. #define strlen_user(str) strnlen_user(str, ~0UL)
  332. /*
  333. * Zero Userspace
  334. */
  335. static inline unsigned long __must_check
  336. __clear_user(void __user *to, unsigned long n)
  337. {
  338. return uaccess.clear_user(n, to);
  339. }
  340. static inline unsigned long __must_check
  341. clear_user(void __user *to, unsigned long n)
  342. {
  343. might_fault();
  344. if (access_ok(VERIFY_WRITE, to, n))
  345. n = uaccess.clear_user(n, to);
  346. return n;
  347. }
  348. extern int copy_to_user_real(void __user *dest, void *src, size_t count);
  349. extern int copy_from_user_real(void *dest, void __user *src, size_t count);
  350. #endif /* __S390_UACCESS_H */