uaccess.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. #ifndef __PARISC_UACCESS_H
  2. #define __PARISC_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/sched.h>
  7. #include <asm/page.h>
  8. #include <asm/system.h>
  9. #include <asm/cache.h>
  10. #include <asm-generic/uaccess.h>
  11. #define VERIFY_READ 0
  12. #define VERIFY_WRITE 1
  13. #define KERNEL_DS ((mm_segment_t){0})
  14. #define USER_DS ((mm_segment_t){1})
  15. #define segment_eq(a,b) ((a).seg == (b).seg)
  16. #define get_ds() (KERNEL_DS)
  17. #define get_fs() (current_thread_info()->addr_limit)
  18. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  19. /*
  20. * Note that since kernel addresses are in a separate address space on
  21. * parisc, we don't need to do anything for access_ok().
  22. * We just let the page fault handler do the right thing. This also means
  23. * that put_user is the same as __put_user, etc.
  24. */
  25. extern int __get_kernel_bad(void);
  26. extern int __get_user_bad(void);
  27. extern int __put_kernel_bad(void);
  28. extern int __put_user_bad(void);
  29. static inline long access_ok(int type, const void __user * addr,
  30. unsigned long size)
  31. {
  32. return 1;
  33. }
  34. #define verify_area(type,addr,size) (0) /* FIXME: all users should go away soon,
  35. * and use access_ok instead, then this
  36. * should be removed. */
  37. #define put_user __put_user
  38. #define get_user __get_user
  39. #if BITS_PER_LONG == 32
  40. #define LDD_KERNEL(ptr) __get_kernel_bad();
  41. #define LDD_USER(ptr) __get_user_bad();
  42. #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
  43. #define STD_USER(x, ptr) __put_user_asm64(x,ptr)
  44. #else
  45. #define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
  46. #define LDD_USER(ptr) __get_user_asm("ldd",ptr)
  47. #define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
  48. #define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
  49. #endif
  50. /*
  51. * The exception table contains two values: the first is an address
  52. * for an instruction that is allowed to fault, and the second is
  53. * the address to the fixup routine.
  54. */
  55. struct exception_table_entry {
  56. unsigned long insn; /* address of insn that is allowed to fault. */
  57. long fixup; /* fixup routine */
  58. };
  59. /*
  60. * The page fault handler stores, in a per-cpu area, the following information
  61. * if a fixup routine is available.
  62. */
  63. struct exception_data {
  64. unsigned long fault_ip;
  65. unsigned long fault_space;
  66. unsigned long fault_addr;
  67. };
  68. #define __get_user(x,ptr) \
  69. ({ \
  70. register long __gu_err __asm__ ("r8") = 0; \
  71. register long __gu_val __asm__ ("r9") = 0; \
  72. \
  73. if (segment_eq(get_fs(),KERNEL_DS)) { \
  74. switch (sizeof(*(ptr))) { \
  75. case 1: __get_kernel_asm("ldb",ptr); break; \
  76. case 2: __get_kernel_asm("ldh",ptr); break; \
  77. case 4: __get_kernel_asm("ldw",ptr); break; \
  78. case 8: LDD_KERNEL(ptr); break; \
  79. default: __get_kernel_bad(); break; \
  80. } \
  81. } \
  82. else { \
  83. switch (sizeof(*(ptr))) { \
  84. case 1: __get_user_asm("ldb",ptr); break; \
  85. case 2: __get_user_asm("ldh",ptr); break; \
  86. case 4: __get_user_asm("ldw",ptr); break; \
  87. case 8: LDD_USER(ptr); break; \
  88. default: __get_user_bad(); break; \
  89. } \
  90. } \
  91. \
  92. (x) = (__typeof__(*(ptr))) __gu_val; \
  93. __gu_err; \
  94. })
  95. #ifdef __LP64__
  96. #define __get_kernel_asm(ldx,ptr) \
  97. __asm__("\n1:\t" ldx "\t0(%2),%0\n" \
  98. "\t.section __ex_table,\"aw\"\n" \
  99. "\t.dword\t1b,fixup_get_user_skip_1\n" \
  100. "\t.previous" \
  101. : "=r"(__gu_val), "=r"(__gu_err) \
  102. : "r"(ptr), "1"(__gu_err) \
  103. : "r1");
  104. #define __get_user_asm(ldx,ptr) \
  105. __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
  106. "\t.section __ex_table,\"aw\"\n" \
  107. "\t.dword\t1b,fixup_get_user_skip_1\n" \
  108. "\t.previous" \
  109. : "=r"(__gu_val), "=r"(__gu_err) \
  110. : "r"(ptr), "1"(__gu_err) \
  111. : "r1");
  112. #else
  113. #define __get_kernel_asm(ldx,ptr) \
  114. __asm__("\n1:\t" ldx "\t0(%2),%0\n" \
  115. "\t.section __ex_table,\"aw\"\n" \
  116. "\t.word\t1b,fixup_get_user_skip_1\n" \
  117. "\t.previous" \
  118. : "=r"(__gu_val), "=r"(__gu_err) \
  119. : "r"(ptr), "1"(__gu_err) \
  120. : "r1");
  121. #define __get_user_asm(ldx,ptr) \
  122. __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
  123. "\t.section __ex_table,\"aw\"\n" \
  124. "\t.word\t1b,fixup_get_user_skip_1\n" \
  125. "\t.previous" \
  126. : "=r"(__gu_val), "=r"(__gu_err) \
  127. : "r"(ptr), "1"(__gu_err) \
  128. : "r1");
  129. #endif /* !__LP64__ */
  130. #define __put_user(x,ptr) \
  131. ({ \
  132. register long __pu_err __asm__ ("r8") = 0; \
  133. __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
  134. \
  135. if (segment_eq(get_fs(),KERNEL_DS)) { \
  136. switch (sizeof(*(ptr))) { \
  137. case 1: __put_kernel_asm("stb",__x,ptr); break; \
  138. case 2: __put_kernel_asm("sth",__x,ptr); break; \
  139. case 4: __put_kernel_asm("stw",__x,ptr); break; \
  140. case 8: STD_KERNEL(__x,ptr); break; \
  141. default: __put_kernel_bad(); break; \
  142. } \
  143. } \
  144. else { \
  145. switch (sizeof(*(ptr))) { \
  146. case 1: __put_user_asm("stb",__x,ptr); break; \
  147. case 2: __put_user_asm("sth",__x,ptr); break; \
  148. case 4: __put_user_asm("stw",__x,ptr); break; \
  149. case 8: STD_USER(__x,ptr); break; \
  150. default: __put_user_bad(); break; \
  151. } \
  152. } \
  153. \
  154. __pu_err; \
  155. })
  156. /*
  157. * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  158. * instead of writing. This is because they do not write to any memory
  159. * gcc knows about, so there are no aliasing issues.
  160. */
  161. #ifdef __LP64__
  162. #define __put_kernel_asm(stx,x,ptr) \
  163. __asm__ __volatile__ ( \
  164. "\n1:\t" stx "\t%2,0(%1)\n" \
  165. "\t.section __ex_table,\"aw\"\n" \
  166. "\t.dword\t1b,fixup_put_user_skip_1\n" \
  167. "\t.previous" \
  168. : "=r"(__pu_err) \
  169. : "r"(ptr), "r"(x), "0"(__pu_err))
  170. #define __put_user_asm(stx,x,ptr) \
  171. __asm__ __volatile__ ( \
  172. "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
  173. "\t.section __ex_table,\"aw\"\n" \
  174. "\t.dword\t1b,fixup_put_user_skip_1\n" \
  175. "\t.previous" \
  176. : "=r"(__pu_err) \
  177. : "r"(ptr), "r"(x), "0"(__pu_err) \
  178. : "r1")
  179. #else
  180. #define __put_kernel_asm(stx,x,ptr) \
  181. __asm__ __volatile__ ( \
  182. "\n1:\t" stx "\t%2,0(%1)\n" \
  183. "\t.section __ex_table,\"aw\"\n" \
  184. "\t.word\t1b,fixup_put_user_skip_1\n" \
  185. "\t.previous" \
  186. : "=r"(__pu_err) \
  187. : "r"(ptr), "r"(x), "0"(__pu_err) \
  188. : "r1")
  189. #define __put_user_asm(stx,x,ptr) \
  190. __asm__ __volatile__ ( \
  191. "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
  192. "\t.section __ex_table,\"aw\"\n" \
  193. "\t.word\t1b,fixup_put_user_skip_1\n" \
  194. "\t.previous" \
  195. : "=r"(__pu_err) \
  196. : "r"(ptr), "r"(x), "0"(__pu_err) \
  197. : "r1")
  198. #define __put_kernel_asm64(__val,ptr) do { \
  199. u64 __val64 = (u64)(__val); \
  200. u32 hi = (__val64) >> 32; \
  201. u32 lo = (__val64) & 0xffffffff; \
  202. __asm__ __volatile__ ( \
  203. "\n1:\tstw %2,0(%1)\n" \
  204. "\n2:\tstw %3,4(%1)\n" \
  205. "\t.section __ex_table,\"aw\"\n" \
  206. "\t.word\t1b,fixup_put_user_skip_2\n" \
  207. "\t.word\t2b,fixup_put_user_skip_1\n" \
  208. "\t.previous" \
  209. : "=r"(__pu_err) \
  210. : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
  211. : "r1"); \
  212. } while (0)
  213. #define __put_user_asm64(__val,ptr) do { \
  214. u64 __val64 = (u64)__val; \
  215. u32 hi = (__val64) >> 32; \
  216. u32 lo = (__val64) & 0xffffffff; \
  217. __asm__ __volatile__ ( \
  218. "\n1:\tstw %2,0(%%sr3,%1)\n" \
  219. "\n2:\tstw %3,4(%%sr3,%1)\n" \
  220. "\t.section __ex_table,\"aw\"\n" \
  221. "\t.word\t1b,fixup_get_user_skip_2\n" \
  222. "\t.word\t2b,fixup_get_user_skip_1\n" \
  223. "\t.previous" \
  224. : "=r"(__pu_err) \
  225. : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
  226. : "r1"); \
  227. } while (0)
  228. #endif /* !__LP64__ */
  229. /*
  230. * Complex access routines -- external declarations
  231. */
  232. extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
  233. extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
  234. extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
  235. extern long lstrncpy_from_user(char *, const char __user *, long);
  236. extern unsigned lclear_user(void __user *,unsigned long);
  237. extern long lstrnlen_user(const char __user *,long);
  238. /*
  239. * Complex access routines -- macros
  240. */
  241. #define strncpy_from_user lstrncpy_from_user
  242. #define strnlen_user lstrnlen_user
  243. #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
  244. #define clear_user lclear_user
  245. #define __clear_user lclear_user
  246. unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
  247. #define __copy_to_user copy_to_user
  248. unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len);
  249. #define __copy_from_user copy_from_user
  250. unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
  251. #define __copy_in_user copy_in_user
  252. #define __copy_to_user_inatomic __copy_to_user
  253. #define __copy_from_user_inatomic __copy_from_user
  254. #endif /* __PARISC_UACCESS_H */