uaccess.h 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_AVR32_UACCESS_H
  9. #define __ASM_AVR32_UACCESS_H
  10. #include <linux/errno.h>
  11. #include <linux/sched.h>
  12. #define VERIFY_READ 0
  13. #define VERIFY_WRITE 1
  14. typedef struct {
  15. unsigned int is_user_space;
  16. } mm_segment_t;
  17. /*
  18. * The fs value determines whether argument validity checking should be
  19. * performed or not. If get_fs() == USER_DS, checking is performed, with
  20. * get_fs() == KERNEL_DS, checking is bypassed.
  21. *
  22. * For historical reasons (Data Segment Register?), these macros are misnamed.
  23. */
  24. #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  25. #define segment_eq(a,b) ((a).is_user_space == (b).is_user_space)
  26. #define USER_ADDR_LIMIT 0x80000000
  27. #define KERNEL_DS MAKE_MM_SEG(0)
  28. #define USER_DS MAKE_MM_SEG(1)
  29. #define get_ds() (KERNEL_DS)
  30. static inline mm_segment_t get_fs(void)
  31. {
  32. return MAKE_MM_SEG(test_thread_flag(TIF_USERSPACE));
  33. }
  34. static inline void set_fs(mm_segment_t s)
  35. {
  36. if (s.is_user_space)
  37. set_thread_flag(TIF_USERSPACE);
  38. else
  39. clear_thread_flag(TIF_USERSPACE);
  40. }
  41. /*
  42. * Test whether a block of memory is a valid user space address.
  43. * Returns 0 if the range is valid, nonzero otherwise.
  44. *
  45. * We do the following checks:
  46. * 1. Is the access from kernel space?
  47. * 2. Does (addr + size) set the carry bit?
  48. * 3. Is (addr + size) a negative number (i.e. >= 0x80000000)?
  49. *
  50. * If yes on the first check, access is granted.
  51. * If no on any of the others, access is denied.
  52. */
  53. #define __range_ok(addr, size) \
  54. (test_thread_flag(TIF_USERSPACE) \
  55. && (((unsigned long)(addr) >= 0x80000000) \
  56. || ((unsigned long)(size) > 0x80000000) \
  57. || (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000)))
  58. #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
  59. static inline int
  60. verify_area(int type, const void __user *addr, unsigned long size)
  61. {
  62. return access_ok(type, addr, size) ? 0 : -EFAULT;
  63. }
  64. /* Generic arbitrary sized copy. Return the number of bytes NOT copied */
  65. extern __kernel_size_t __copy_user(void *to, const void *from,
  66. __kernel_size_t n);
  67. extern __kernel_size_t copy_to_user(void __user *to, const void *from,
  68. __kernel_size_t n);
  69. extern __kernel_size_t copy_from_user(void *to, const void __user *from,
  70. __kernel_size_t n);
  71. static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
  72. __kernel_size_t n)
  73. {
  74. return __copy_user((void __force *)to, from, n);
  75. }
  76. static inline __kernel_size_t __copy_from_user(void *to,
  77. const void __user *from,
  78. __kernel_size_t n)
  79. {
  80. return __copy_user(to, (const void __force *)from, n);
  81. }
  82. #define __copy_to_user_inatomic __copy_to_user
  83. #define __copy_from_user_inatomic __copy_from_user
  84. /*
  85. * put_user: - Write a simple value into user space.
  86. * @x: Value to copy to user space.
  87. * @ptr: Destination address, in user space.
  88. *
  89. * Context: User context only. This function may sleep.
  90. *
  91. * This macro copies a single simple value from kernel space to user
  92. * space. It supports simple types like char and int, but not larger
  93. * data types like structures or arrays.
  94. *
  95. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  96. * to the result of dereferencing @ptr.
  97. *
  98. * Returns zero on success, or -EFAULT on error.
  99. */
  100. #define put_user(x,ptr) \
  101. __put_user_check((x),(ptr),sizeof(*(ptr)))
  102. /*
  103. * get_user: - Get a simple variable from user space.
  104. * @x: Variable to store result.
  105. * @ptr: Source address, in user space.
  106. *
  107. * Context: User context only. This function may sleep.
  108. *
  109. * This macro copies a single simple variable from user space to kernel
  110. * space. It supports simple types like char and int, but not larger
  111. * data types like structures or arrays.
  112. *
  113. * @ptr must have pointer-to-simple-variable type, and the result of
  114. * dereferencing @ptr must be assignable to @x without a cast.
  115. *
  116. * Returns zero on success, or -EFAULT on error.
  117. * On error, the variable @x is set to zero.
  118. */
  119. #define get_user(x,ptr) \
  120. __get_user_check((x),(ptr),sizeof(*(ptr)))
  121. /*
  122. * __put_user: - Write a simple value into user space, with less checking.
  123. * @x: Value to copy to user space.
  124. * @ptr: Destination address, in user space.
  125. *
  126. * Context: User context only. This function may sleep.
  127. *
  128. * This macro copies a single simple value from kernel space to user
  129. * space. It supports simple types like char and int, but not larger
  130. * data types like structures or arrays.
  131. *
  132. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  133. * to the result of dereferencing @ptr.
  134. *
  135. * Caller must check the pointer with access_ok() before calling this
  136. * function.
  137. *
  138. * Returns zero on success, or -EFAULT on error.
  139. */
  140. #define __put_user(x,ptr) \
  141. __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
  142. /*
  143. * __get_user: - Get a simple variable from user space, with less checking.
  144. * @x: Variable to store result.
  145. * @ptr: Source address, in user space.
  146. *
  147. * Context: User context only. This function may sleep.
  148. *
  149. * This macro copies a single simple variable from user space to kernel
  150. * space. It supports simple types like char and int, but not larger
  151. * data types like structures or arrays.
  152. *
  153. * @ptr must have pointer-to-simple-variable type, and the result of
  154. * dereferencing @ptr must be assignable to @x without a cast.
  155. *
  156. * Caller must check the pointer with access_ok() before calling this
  157. * function.
  158. *
  159. * Returns zero on success, or -EFAULT on error.
  160. * On error, the variable @x is set to zero.
  161. */
  162. #define __get_user(x,ptr) \
  163. __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
  164. extern int __get_user_bad(void);
  165. extern int __put_user_bad(void);
  166. #define __get_user_nocheck(x, ptr, size) \
  167. ({ \
  168. typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \
  169. int __gu_err = 0; \
  170. \
  171. switch (size) { \
  172. case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \
  173. case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \
  174. case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \
  175. case 8: __get_user_asm("d", __gu_val, ptr, __gu_err); break; \
  176. default: __gu_err = __get_user_bad(); break; \
  177. } \
  178. \
  179. x = __gu_val; \
  180. __gu_err; \
  181. })
  182. #define __get_user_check(x, ptr, size) \
  183. ({ \
  184. typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \
  185. const typeof(*(ptr)) __user * __gu_addr = (ptr); \
  186. int __gu_err = 0; \
  187. \
  188. if (access_ok(VERIFY_READ, __gu_addr, size)) { \
  189. switch (size) { \
  190. case 1: \
  191. __get_user_asm("ub", __gu_val, __gu_addr, \
  192. __gu_err); \
  193. break; \
  194. case 2: \
  195. __get_user_asm("uh", __gu_val, __gu_addr, \
  196. __gu_err); \
  197. break; \
  198. case 4: \
  199. __get_user_asm("w", __gu_val, __gu_addr, \
  200. __gu_err); \
  201. break; \
  202. case 8: \
  203. __get_user_asm("d", __gu_val, __gu_addr, \
  204. __gu_err); \
  205. break; \
  206. default: \
  207. __gu_err = __get_user_bad(); \
  208. break; \
  209. } \
  210. } else { \
  211. __gu_err = -EFAULT; \
  212. } \
  213. x = __gu_val; \
  214. __gu_err; \
  215. })
  216. #define __get_user_asm(suffix, __gu_val, ptr, __gu_err) \
  217. asm volatile( \
  218. "1: ld." suffix " %1, %3 \n" \
  219. "2: \n" \
  220. " .section .fixup, \"ax\" \n" \
  221. "3: mov %0, %4 \n" \
  222. " rjmp 2b \n" \
  223. " .previous \n" \
  224. " .section __ex_table, \"a\" \n" \
  225. " .long 1b, 3b \n" \
  226. " .previous \n" \
  227. : "=r"(__gu_err), "=r"(__gu_val) \
  228. : "0"(__gu_err), "m"(*(ptr)), "i"(-EFAULT))
  229. #define __put_user_nocheck(x, ptr, size) \
  230. ({ \
  231. typeof(*(ptr)) __pu_val; \
  232. int __pu_err = 0; \
  233. \
  234. __pu_val = (x); \
  235. switch (size) { \
  236. case 1: __put_user_asm("b", ptr, __pu_val, __pu_err); break; \
  237. case 2: __put_user_asm("h", ptr, __pu_val, __pu_err); break; \
  238. case 4: __put_user_asm("w", ptr, __pu_val, __pu_err); break; \
  239. case 8: __put_user_asm("d", ptr, __pu_val, __pu_err); break; \
  240. default: __pu_err = __put_user_bad(); break; \
  241. } \
  242. __pu_err; \
  243. })
  244. #define __put_user_check(x, ptr, size) \
  245. ({ \
  246. typeof(*(ptr)) __pu_val; \
  247. typeof(*(ptr)) __user *__pu_addr = (ptr); \
  248. int __pu_err = 0; \
  249. \
  250. __pu_val = (x); \
  251. if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
  252. switch (size) { \
  253. case 1: \
  254. __put_user_asm("b", __pu_addr, __pu_val, \
  255. __pu_err); \
  256. break; \
  257. case 2: \
  258. __put_user_asm("h", __pu_addr, __pu_val, \
  259. __pu_err); \
  260. break; \
  261. case 4: \
  262. __put_user_asm("w", __pu_addr, __pu_val, \
  263. __pu_err); \
  264. break; \
  265. case 8: \
  266. __put_user_asm("d", __pu_addr, __pu_val, \
  267. __pu_err); \
  268. break; \
  269. default: \
  270. __pu_err = __put_user_bad(); \
  271. break; \
  272. } \
  273. } else { \
  274. __pu_err = -EFAULT; \
  275. } \
  276. __pu_err; \
  277. })
  278. #define __put_user_asm(suffix, ptr, __pu_val, __gu_err) \
  279. asm volatile( \
  280. "1: st." suffix " %1, %3 \n" \
  281. "2: \n" \
  282. " .section .fixup, \"ax\" \n" \
  283. "3: mov %0, %4 \n" \
  284. " rjmp 2b \n" \
  285. " .previous \n" \
  286. " .section __ex_table, \"a\" \n" \
  287. " .long 1b, 3b \n" \
  288. " .previous \n" \
  289. : "=r"(__gu_err), "=m"(*(ptr)) \
  290. : "0"(__gu_err), "r"(__pu_val), "i"(-EFAULT))
  291. extern __kernel_size_t clear_user(void __user *addr, __kernel_size_t size);
  292. extern __kernel_size_t __clear_user(void __user *addr, __kernel_size_t size);
  293. extern long strncpy_from_user(char *dst, const char __user *src, long count);
  294. extern long __strncpy_from_user(char *dst, const char __user *src, long count);
  295. extern long strnlen_user(const char __user *__s, long __n);
  296. extern long __strnlen_user(const char __user *__s, long __n);
  297. #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
  298. struct exception_table_entry
  299. {
  300. unsigned long insn, fixup;
  301. };
  302. #endif /* __ASM_AVR32_UACCESS_H */