uaccess.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. #ifndef _ASMARM_UACCESS_H
  2. #define _ASMARM_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/sched.h>
  7. #include <asm/errno.h>
  8. #define VERIFY_READ 0
  9. #define VERIFY_WRITE 1
  10. /*
  11. * The exception table consists of pairs of addresses: the first is the
  12. * address of an instruction that is allowed to fault, and the second is
  13. * the address at which the program should continue. No registers are
  14. * modified, so it is entirely up to the continuation code to figure out
  15. * what to do.
  16. *
  17. * All the routines below use bits of fixup code that are out of line
  18. * with the main instruction path. This means when everything is well,
  19. * we don't even have to jump over them. Further, they do not intrude
  20. * on our cache or tlb entries.
  21. */
  22. struct exception_table_entry
  23. {
  24. unsigned long insn, fixup;
  25. };
  26. /* Returns 0 if exception not found and fixup otherwise. */
  27. extern unsigned long search_exception_table(unsigned long);
  28. extern int fixup_exception(struct pt_regs *regs);
  29. #define get_ds() (KERNEL_DS)
  30. #define get_fs() (current_thread_info()->addr_limit)
  31. #define segment_eq(a,b) ((a) == (b))
  32. #include <asm/uaccess-asm.h>
  33. #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
  34. /*
  35. * Single-value transfer routines. They automatically use the right
  36. * size if we just have the right pointer type. Note that the functions
  37. * which read from user space (*get_*) need to take care not to leak
  38. * kernel data even if the calling code is buggy and fails to check
  39. * the return value. This means zeroing out the destination variable
  40. * or buffer on error. Normally this is done out of line by the
  41. * fixup code, but there are a few places where it intrudes on the
  42. * main code path. When we only write to user space, there is no
  43. * problem.
  44. *
  45. * The "__xxx" versions of the user access functions do not verify the
  46. * address space - it must have been done previously with a separate
  47. * "access_ok()" call.
  48. *
  49. * The "xxx_error" versions set the third argument to EFAULT if an
  50. * error occurs, and leave it unchanged on success. Note that these
  51. * versions are void (ie, don't return a value as such).
  52. */
  53. extern int __get_user_1(void *);
  54. extern int __get_user_2(void *);
  55. extern int __get_user_4(void *);
  56. extern int __get_user_8(void *);
  57. extern int __get_user_bad(void);
  58. #define __get_user_x(__r1,__p,__e,__s,__i...) \
  59. __asm__ __volatile__ ("bl __get_user_" #__s \
  60. : "=&r" (__e), "=r" (__r1) \
  61. : "0" (__p) \
  62. : __i)
  63. #define get_user(x,p) \
  64. ({ \
  65. const register typeof(*(p)) *__p asm("r0") = (p); \
  66. register typeof(*(p)) __r1 asm("r1"); \
  67. register int __e asm("r0"); \
  68. switch (sizeof(*(p))) { \
  69. case 1: \
  70. __get_user_x(__r1, __p, __e, 1, "lr"); \
  71. break; \
  72. case 2: \
  73. __get_user_x(__r1, __p, __e, 2, "r2", "lr"); \
  74. break; \
  75. case 4: \
  76. __get_user_x(__r1, __p, __e, 4, "lr"); \
  77. break; \
  78. case 8: \
  79. __get_user_x(__r1, __p, __e, 8, "lr"); \
  80. break; \
  81. default: __e = __get_user_bad(); break; \
  82. } \
  83. x = __r1; \
  84. __e; \
  85. })
  86. #define __get_user(x,ptr) \
  87. ({ \
  88. long __gu_err = 0; \
  89. __get_user_err((x),(ptr),__gu_err); \
  90. __gu_err; \
  91. })
  92. #define __get_user_error(x,ptr,err) \
  93. ({ \
  94. __get_user_err((x),(ptr),err); \
  95. (void) 0; \
  96. })
  97. #define __get_user_err(x,ptr,err) \
  98. do { \
  99. unsigned long __gu_addr = (unsigned long)(ptr); \
  100. unsigned long __gu_val; \
  101. switch (sizeof(*(ptr))) { \
  102. case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
  103. case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
  104. case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
  105. default: (__gu_val) = __get_user_bad(); \
  106. } \
  107. (x) = (__typeof__(*(ptr)))__gu_val; \
  108. } while (0)
  109. extern int __put_user_1(void *, unsigned int);
  110. extern int __put_user_2(void *, unsigned int);
  111. extern int __put_user_4(void *, unsigned int);
  112. extern int __put_user_8(void *, unsigned long long);
  113. extern int __put_user_bad(void);
  114. #define __put_user_x(__r1,__p,__e,__s) \
  115. __asm__ __volatile__ ( \
  116. __asmeq("%0", "r0") __asmeq("%2", "r1") \
  117. "bl __put_user_" #__s \
  118. : "=&r" (__e) \
  119. : "0" (__p), "r" (__r1) \
  120. : "ip", "lr", "cc")
  121. #define put_user(x,p) \
  122. ({ \
  123. const register typeof(*(p)) __r1 asm("r1") = (x); \
  124. const register typeof(*(p)) *__p asm("r0") = (p); \
  125. register int __e asm("r0"); \
  126. switch (sizeof(*(__p))) { \
  127. case 1: \
  128. __put_user_x(__r1, __p, __e, 1); \
  129. break; \
  130. case 2: \
  131. __put_user_x(__r1, __p, __e, 2); \
  132. break; \
  133. case 4: \
  134. __put_user_x(__r1, __p, __e, 4); \
  135. break; \
  136. case 8: \
  137. __put_user_x(__r1, __p, __e, 8); \
  138. break; \
  139. default: __e = __put_user_bad(); break; \
  140. } \
  141. __e; \
  142. })
  143. #if 0
  144. /********************* OLD METHOD *******************/
  145. #define __put_user_x(__r1,__p,__e,__s,__i...) \
  146. __asm__ __volatile__ ("bl __put_user_" #__s \
  147. : "=&r" (__e) \
  148. : "0" (__p), "r" (__r1) \
  149. : __i)
  150. #define put_user(x,p) \
  151. ({ \
  152. const register typeof(*(p)) __r1 asm("r1") = (x); \
  153. const register typeof(*(p)) *__p asm("r0") = (p); \
  154. register int __e asm("r0"); \
  155. switch (sizeof(*(p))) { \
  156. case 1: \
  157. __put_user_x(__r1, __p, __e, 1, "r2", "lr"); \
  158. break; \
  159. case 2: \
  160. __put_user_x(__r1, __p, __e, 2, "r2", "lr"); \
  161. break; \
  162. case 4: \
  163. __put_user_x(__r1, __p, __e, 4, "r2", "lr"); \
  164. break; \
  165. case 8: \
  166. __put_user_x(__r1, __p, __e, 8, "r2", "ip", "lr"); \
  167. break; \
  168. default: __e = __put_user_bad(); break; \
  169. } \
  170. __e; \
  171. })
  172. /*************************************************/
  173. #endif
  174. #define __put_user(x,ptr) \
  175. ({ \
  176. long __pu_err = 0; \
  177. __put_user_err((x),(ptr),__pu_err); \
  178. __pu_err; \
  179. })
  180. #define __put_user_error(x,ptr,err) \
  181. ({ \
  182. __put_user_err((x),(ptr),err); \
  183. (void) 0; \
  184. })
  185. #define __put_user_err(x,ptr,err) \
  186. do { \
  187. unsigned long __pu_addr = (unsigned long)(ptr); \
  188. __typeof__(*(ptr)) __pu_val = (x); \
  189. switch (sizeof(*(ptr))) { \
  190. case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
  191. case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
  192. case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
  193. case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
  194. default: __put_user_bad(); \
  195. } \
  196. } while (0)
  197. static __inline__ unsigned long copy_from_user(void *to, const void *from, unsigned long n)
  198. {
  199. if (access_ok(VERIFY_READ, from, n))
  200. __do_copy_from_user(to, from, n);
  201. else /* security hole - plug it */
  202. memzero(to, n);
  203. return n;
  204. }
  205. static __inline__ unsigned long __copy_from_user(void *to, const void *from, unsigned long n)
  206. {
  207. __do_copy_from_user(to, from, n);
  208. return n;
  209. }
  210. static __inline__ unsigned long copy_to_user(void *to, const void *from, unsigned long n)
  211. {
  212. if (access_ok(VERIFY_WRITE, to, n))
  213. __do_copy_to_user(to, from, n);
  214. return n;
  215. }
  216. static __inline__ unsigned long __copy_to_user(void *to, const void *from, unsigned long n)
  217. {
  218. __do_copy_to_user(to, from, n);
  219. return n;
  220. }
  221. #define __copy_to_user_inatomic __copy_to_user
  222. #define __copy_from_user_inatomic __copy_from_user
  223. static __inline__ unsigned long clear_user (void *to, unsigned long n)
  224. {
  225. if (access_ok(VERIFY_WRITE, to, n))
  226. __do_clear_user(to, n);
  227. return n;
  228. }
  229. static __inline__ unsigned long __clear_user (void *to, unsigned long n)
  230. {
  231. __do_clear_user(to, n);
  232. return n;
  233. }
  234. static __inline__ long strncpy_from_user (char *dst, const char *src, long count)
  235. {
  236. long res = -EFAULT;
  237. if (access_ok(VERIFY_READ, src, 1))
  238. __do_strncpy_from_user(dst, src, count, res);
  239. return res;
  240. }
  241. static __inline__ long __strncpy_from_user (char *dst, const char *src, long count)
  242. {
  243. long res;
  244. __do_strncpy_from_user(dst, src, count, res);
  245. return res;
  246. }
  247. #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
  248. static inline long strnlen_user(const char *s, long n)
  249. {
  250. unsigned long res = 0;
  251. if (__addr_ok(s))
  252. __do_strnlen_user(s, n, res);
  253. return res;
  254. }
  255. #endif /* _ASMARM_UACCESS_H */