uaccess.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. #ifndef __PARISC_UACCESS_H
  2. #define __PARISC_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <asm/processor.h>
  7. #include <asm/page.h>
  8. #include <asm/cache.h>
  9. #include <asm/errno.h>
  10. #include <asm-generic/uaccess-unaligned.h>
  11. #include <linux/sched.h>
  12. #define VERIFY_READ 0
  13. #define VERIFY_WRITE 1
  14. #define KERNEL_DS ((mm_segment_t){0})
  15. #define USER_DS ((mm_segment_t){1})
  16. #define segment_eq(a,b) ((a).seg == (b).seg)
  17. #define get_ds() (KERNEL_DS)
  18. #define get_fs() (current_thread_info()->addr_limit)
  19. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  20. /*
  21. * Note that since kernel addresses are in a separate address space on
  22. * parisc, we don't need to do anything for access_ok().
  23. * We just let the page fault handler do the right thing. This also means
  24. * that put_user is the same as __put_user, etc.
  25. */
  26. extern int __get_kernel_bad(void);
  27. extern int __get_user_bad(void);
  28. extern int __put_kernel_bad(void);
  29. extern int __put_user_bad(void);
  30. /*
  31. * Test whether a block of memory is a valid user space address.
  32. * Returns 0 if the range is valid, nonzero otherwise.
  33. */
  34. static inline int __range_not_ok(unsigned long addr, unsigned long size,
  35. unsigned long limit)
  36. {
  37. unsigned long __newaddr = addr + size;
  38. return (__newaddr < addr || __newaddr > limit || size > limit);
  39. }
  40. /**
  41. * access_ok: - Checks if a user space pointer is valid
  42. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  43. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  44. * to write to a block, it is always safe to read from it.
  45. * @addr: User space pointer to start of block to check
  46. * @size: Size of block to check
  47. *
  48. * Context: User context only. This function may sleep.
  49. *
  50. * Checks if a pointer to a block of memory in user space is valid.
  51. *
  52. * Returns true (nonzero) if the memory block may be valid, false (zero)
  53. * if it is definitely invalid.
  54. *
  55. * Note that, depending on architecture, this function probably just
  56. * checks that the pointer is in the user space range - after calling
  57. * this function, memory access functions may still return -EFAULT.
  58. */
  59. #define access_ok(type, addr, size) \
  60. ( __chk_user_ptr(addr), \
  61. !__range_not_ok((unsigned long) (__force void *) (addr), \
  62. size, user_addr_max()) \
  63. )
  64. #define put_user __put_user
  65. #define get_user __get_user
  66. #if !defined(CONFIG_64BIT)
  67. #define LDD_KERNEL(ptr) __get_kernel_bad();
  68. #define LDD_USER(ptr) __get_user_bad();
  69. #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
  70. #define STD_USER(x, ptr) __put_user_asm64(x,ptr)
  71. #define ASM_WORD_INSN ".word\t"
  72. #else
  73. #define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
  74. #define LDD_USER(ptr) __get_user_asm("ldd",ptr)
  75. #define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
  76. #define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
  77. #define ASM_WORD_INSN ".dword\t"
  78. #endif
  79. /*
  80. * The exception table contains two values: the first is an address
  81. * for an instruction that is allowed to fault, and the second is
  82. * the address to the fixup routine. Even on a 64bit kernel we could
  83. * use a 32bit (unsigned int) address here.
  84. */
  85. struct exception_table_entry {
  86. unsigned long insn; /* address of insn that is allowed to fault. */
  87. unsigned long fixup; /* fixup routine */
  88. };
  89. #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
  90. ".section __ex_table,\"aw\"\n" \
  91. ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
  92. ".previous\n"
  93. /*
  94. * The page fault handler stores, in a per-cpu area, the following information
  95. * if a fixup routine is available.
  96. */
  97. struct exception_data {
  98. unsigned long fault_ip;
  99. unsigned long fault_space;
  100. unsigned long fault_addr;
  101. };
  102. #define __get_user(x,ptr) \
  103. ({ \
  104. register long __gu_err __asm__ ("r8") = 0; \
  105. register long __gu_val __asm__ ("r9") = 0; \
  106. \
  107. if (segment_eq(get_fs(),KERNEL_DS)) { \
  108. switch (sizeof(*(ptr))) { \
  109. case 1: __get_kernel_asm("ldb",ptr); break; \
  110. case 2: __get_kernel_asm("ldh",ptr); break; \
  111. case 4: __get_kernel_asm("ldw",ptr); break; \
  112. case 8: LDD_KERNEL(ptr); break; \
  113. default: __get_kernel_bad(); break; \
  114. } \
  115. } \
  116. else { \
  117. switch (sizeof(*(ptr))) { \
  118. case 1: __get_user_asm("ldb",ptr); break; \
  119. case 2: __get_user_asm("ldh",ptr); break; \
  120. case 4: __get_user_asm("ldw",ptr); break; \
  121. case 8: LDD_USER(ptr); break; \
  122. default: __get_user_bad(); break; \
  123. } \
  124. } \
  125. \
  126. (x) = (__typeof__(*(ptr))) __gu_val; \
  127. __gu_err; \
  128. })
  129. #define __get_kernel_asm(ldx,ptr) \
  130. __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
  131. ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
  132. : "=r"(__gu_val), "=r"(__gu_err) \
  133. : "r"(ptr), "1"(__gu_err) \
  134. : "r1");
  135. #define __get_user_asm(ldx,ptr) \
  136. __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
  137. ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
  138. : "=r"(__gu_val), "=r"(__gu_err) \
  139. : "r"(ptr), "1"(__gu_err) \
  140. : "r1");
  141. #define __put_user(x,ptr) \
  142. ({ \
  143. register long __pu_err __asm__ ("r8") = 0; \
  144. __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
  145. \
  146. if (segment_eq(get_fs(),KERNEL_DS)) { \
  147. switch (sizeof(*(ptr))) { \
  148. case 1: __put_kernel_asm("stb",__x,ptr); break; \
  149. case 2: __put_kernel_asm("sth",__x,ptr); break; \
  150. case 4: __put_kernel_asm("stw",__x,ptr); break; \
  151. case 8: STD_KERNEL(__x,ptr); break; \
  152. default: __put_kernel_bad(); break; \
  153. } \
  154. } \
  155. else { \
  156. switch (sizeof(*(ptr))) { \
  157. case 1: __put_user_asm("stb",__x,ptr); break; \
  158. case 2: __put_user_asm("sth",__x,ptr); break; \
  159. case 4: __put_user_asm("stw",__x,ptr); break; \
  160. case 8: STD_USER(__x,ptr); break; \
  161. default: __put_user_bad(); break; \
  162. } \
  163. } \
  164. \
  165. __pu_err; \
  166. })
  167. /*
  168. * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  169. * instead of writing. This is because they do not write to any memory
  170. * gcc knows about, so there are no aliasing issues. These macros must
  171. * also be aware that "fixup_put_user_skip_[12]" are executed in the
  172. * context of the fault, and any registers used there must be listed
  173. * as clobbers. In this case only "r1" is used by the current routines.
  174. * r8/r9 are already listed as err/val.
  175. */
  176. #define __put_kernel_asm(stx,x,ptr) \
  177. __asm__ __volatile__ ( \
  178. "\n1:\t" stx "\t%2,0(%1)\n\t" \
  179. ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
  180. : "=r"(__pu_err) \
  181. : "r"(ptr), "r"(x), "0"(__pu_err) \
  182. : "r1")
  183. #define __put_user_asm(stx,x,ptr) \
  184. __asm__ __volatile__ ( \
  185. "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
  186. ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
  187. : "=r"(__pu_err) \
  188. : "r"(ptr), "r"(x), "0"(__pu_err) \
  189. : "r1")
  190. #if !defined(CONFIG_64BIT)
  191. #define __put_kernel_asm64(__val,ptr) do { \
  192. __asm__ __volatile__ ( \
  193. "\n1:\tstw %2,0(%1)" \
  194. "\n2:\tstw %R2,4(%1)\n\t" \
  195. ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
  196. ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
  197. : "=r"(__pu_err) \
  198. : "r"(ptr), "r"(__val), "0"(__pu_err) \
  199. : "r1"); \
  200. } while (0)
  201. #define __put_user_asm64(__val,ptr) do { \
  202. __asm__ __volatile__ ( \
  203. "\n1:\tstw %2,0(%%sr3,%1)" \
  204. "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
  205. ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
  206. ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
  207. : "=r"(__pu_err) \
  208. : "r"(ptr), "r"(__val), "0"(__pu_err) \
  209. : "r1"); \
  210. } while (0)
  211. #endif /* !defined(CONFIG_64BIT) */
  212. /*
  213. * Complex access routines -- external declarations
  214. */
  215. extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
  216. extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
  217. extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
  218. extern long strncpy_from_user(char *, const char __user *, long);
  219. extern unsigned lclear_user(void __user *,unsigned long);
  220. extern long lstrnlen_user(const char __user *,long);
  221. /*
  222. * Complex access routines -- macros
  223. */
  224. #ifdef CONFIG_COMPAT
  225. #define user_addr_max() (TASK_SIZE)
  226. #else
  227. #define user_addr_max() (DEFAULT_TASK_SIZE)
  228. #endif
  229. #define strnlen_user lstrnlen_user
  230. #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
  231. #define clear_user lclear_user
  232. #define __clear_user lclear_user
  233. unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
  234. #define __copy_to_user copy_to_user
  235. unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len);
  236. unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
  237. #define __copy_in_user copy_in_user
  238. #define __copy_to_user_inatomic __copy_to_user
  239. #define __copy_from_user_inatomic __copy_from_user
  240. extern void copy_from_user_overflow(void)
  241. #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
  242. __compiletime_error("copy_from_user() buffer size is not provably correct")
  243. #else
  244. __compiletime_warning("copy_from_user() buffer size is not provably correct")
  245. #endif
  246. ;
  247. static inline unsigned long __must_check copy_from_user(void *to,
  248. const void __user *from,
  249. unsigned long n)
  250. {
  251. int sz = __compiletime_object_size(to);
  252. int ret = -EFAULT;
  253. if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
  254. ret = __copy_from_user(to, from, n);
  255. else
  256. copy_from_user_overflow();
  257. return ret;
  258. }
  259. struct pt_regs;
  260. int fixup_exception(struct pt_regs *regs);
  261. #endif /* __PARISC_UACCESS_H */