uaccess.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. #ifndef __ALPHA_UACCESS_H
  2. #define __ALPHA_UACCESS_H
  3. #include <linux/errno.h>
  4. #include <linux/sched.h>
  5. /*
  6. * The fs value determines whether argument validity checking should be
  7. * performed or not. If get_fs() == USER_DS, checking is performed, with
  8. * get_fs() == KERNEL_DS, checking is bypassed.
  9. *
  10. * Or at least it did once upon a time. Nowadays it is a mask that
  11. * defines which bits of the address space are off limits. This is a
  12. * wee bit faster than the above.
  13. *
  14. * For historical reasons, these macros are grossly misnamed.
  15. */
  16. #define KERNEL_DS ((mm_segment_t) { 0UL })
  17. #define USER_DS ((mm_segment_t) { -0x40000000000UL })
  18. #define VERIFY_READ 0
  19. #define VERIFY_WRITE 1
  20. #define get_fs() (current_thread_info()->addr_limit)
  21. #define get_ds() (KERNEL_DS)
  22. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  23. #define segment_eq(a,b) ((a).seg == (b).seg)
  24. /*
  25. * Is a address valid? This does a straightforward calculation rather
  26. * than tests.
  27. *
  28. * Address valid if:
  29. * - "addr" doesn't have any high-bits set
  30. * - AND "size" doesn't have any high-bits set
  31. * - AND "addr+size" doesn't have any high-bits set
  32. * - OR we are in kernel mode.
  33. */
  34. #define __access_ok(addr,size,segment) \
  35. (((segment).seg & (addr | size | (addr+size))) == 0)
  36. #define access_ok(type,addr,size) \
  37. ({ \
  38. __chk_user_ptr(addr); \
  39. __access_ok(((unsigned long)(addr)),(size),get_fs()); \
  40. })
  41. /* this function will go away soon - use access_ok() instead */
  42. extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
  43. {
  44. return access_ok(type,addr,size) ? 0 : -EFAULT;
  45. }
  46. /*
  47. * These are the main single-value transfer routines. They automatically
  48. * use the right size if we just have the right pointer type.
  49. *
  50. * As the alpha uses the same address space for kernel and user
  51. * data, we can just do these as direct assignments. (Of course, the
  52. * exception handling means that it's no longer "just"...)
  53. *
  54. * Careful to not
  55. * (a) re-use the arguments for side effects (sizeof/typeof is ok)
  56. * (b) require any knowledge of processes at this stage
  57. */
  58. #define put_user(x,ptr) \
  59. __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
  60. #define get_user(x,ptr) \
  61. __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
  62. /*
  63. * The "__xxx" versions do not do address space checking, useful when
  64. * doing multiple accesses to the same area (the programmer has to do the
  65. * checks by hand with "access_ok()")
  66. */
  67. #define __put_user(x,ptr) \
  68. __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
  69. #define __get_user(x,ptr) \
  70. __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
  71. /*
  72. * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
  73. * encode the bits we need for resolving the exception. See the
  74. * more extensive comments with fixup_inline_exception below for
  75. * more information.
  76. */
  77. extern void __get_user_unknown(void);
  78. #define __get_user_nocheck(x,ptr,size) \
  79. ({ \
  80. long __gu_err = 0; \
  81. unsigned long __gu_val; \
  82. __chk_user_ptr(ptr); \
  83. switch (size) { \
  84. case 1: __get_user_8(ptr); break; \
  85. case 2: __get_user_16(ptr); break; \
  86. case 4: __get_user_32(ptr); break; \
  87. case 8: __get_user_64(ptr); break; \
  88. default: __get_user_unknown(); break; \
  89. } \
  90. (x) = (__typeof__(*(ptr))) __gu_val; \
  91. __gu_err; \
  92. })
  93. #define __get_user_check(x,ptr,size,segment) \
  94. ({ \
  95. long __gu_err = -EFAULT; \
  96. unsigned long __gu_val = 0; \
  97. const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
  98. if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
  99. __gu_err = 0; \
  100. switch (size) { \
  101. case 1: __get_user_8(__gu_addr); break; \
  102. case 2: __get_user_16(__gu_addr); break; \
  103. case 4: __get_user_32(__gu_addr); break; \
  104. case 8: __get_user_64(__gu_addr); break; \
  105. default: __get_user_unknown(); break; \
  106. } \
  107. } \
  108. (x) = (__typeof__(*(ptr))) __gu_val; \
  109. __gu_err; \
  110. })
  111. struct __large_struct { unsigned long buf[100]; };
  112. #define __m(x) (*(struct __large_struct __user *)(x))
  113. #define __get_user_64(addr) \
  114. __asm__("1: ldq %0,%2\n" \
  115. "2:\n" \
  116. ".section __ex_table,\"a\"\n" \
  117. " .long 1b - .\n" \
  118. " lda %0, 2b-1b(%1)\n" \
  119. ".previous" \
  120. : "=r"(__gu_val), "=r"(__gu_err) \
  121. : "m"(__m(addr)), "1"(__gu_err))
  122. #define __get_user_32(addr) \
  123. __asm__("1: ldl %0,%2\n" \
  124. "2:\n" \
  125. ".section __ex_table,\"a\"\n" \
  126. " .long 1b - .\n" \
  127. " lda %0, 2b-1b(%1)\n" \
  128. ".previous" \
  129. : "=r"(__gu_val), "=r"(__gu_err) \
  130. : "m"(__m(addr)), "1"(__gu_err))
  131. #ifdef __alpha_bwx__
  132. /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
  133. #define __get_user_16(addr) \
  134. __asm__("1: ldwu %0,%2\n" \
  135. "2:\n" \
  136. ".section __ex_table,\"a\"\n" \
  137. " .long 1b - .\n" \
  138. " lda %0, 2b-1b(%1)\n" \
  139. ".previous" \
  140. : "=r"(__gu_val), "=r"(__gu_err) \
  141. : "m"(__m(addr)), "1"(__gu_err))
  142. #define __get_user_8(addr) \
  143. __asm__("1: ldbu %0,%2\n" \
  144. "2:\n" \
  145. ".section __ex_table,\"a\"\n" \
  146. " .long 1b - .\n" \
  147. " lda %0, 2b-1b(%1)\n" \
  148. ".previous" \
  149. : "=r"(__gu_val), "=r"(__gu_err) \
  150. : "m"(__m(addr)), "1"(__gu_err))
  151. #else
  152. /* Unfortunately, we can't get an unaligned access trap for the sub-word
  153. load, so we have to do a general unaligned operation. */
  154. #define __get_user_16(addr) \
  155. { \
  156. long __gu_tmp; \
  157. __asm__("1: ldq_u %0,0(%3)\n" \
  158. "2: ldq_u %1,1(%3)\n" \
  159. " extwl %0,%3,%0\n" \
  160. " extwh %1,%3,%1\n" \
  161. " or %0,%1,%0\n" \
  162. "3:\n" \
  163. ".section __ex_table,\"a\"\n" \
  164. " .long 1b - .\n" \
  165. " lda %0, 3b-1b(%2)\n" \
  166. " .long 2b - .\n" \
  167. " lda %0, 3b-2b(%2)\n" \
  168. ".previous" \
  169. : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
  170. : "r"(addr), "2"(__gu_err)); \
  171. }
  172. #define __get_user_8(addr) \
  173. __asm__("1: ldq_u %0,0(%2)\n" \
  174. " extbl %0,%2,%0\n" \
  175. "2:\n" \
  176. ".section __ex_table,\"a\"\n" \
  177. " .long 1b - .\n" \
  178. " lda %0, 2b-1b(%1)\n" \
  179. ".previous" \
  180. : "=&r"(__gu_val), "=r"(__gu_err) \
  181. : "r"(addr), "1"(__gu_err))
  182. #endif
  183. extern void __put_user_unknown(void);
  184. #define __put_user_nocheck(x,ptr,size) \
  185. ({ \
  186. long __pu_err = 0; \
  187. __chk_user_ptr(ptr); \
  188. switch (size) { \
  189. case 1: __put_user_8(x,ptr); break; \
  190. case 2: __put_user_16(x,ptr); break; \
  191. case 4: __put_user_32(x,ptr); break; \
  192. case 8: __put_user_64(x,ptr); break; \
  193. default: __put_user_unknown(); break; \
  194. } \
  195. __pu_err; \
  196. })
  197. #define __put_user_check(x,ptr,size,segment) \
  198. ({ \
  199. long __pu_err = -EFAULT; \
  200. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  201. if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
  202. __pu_err = 0; \
  203. switch (size) { \
  204. case 1: __put_user_8(x,__pu_addr); break; \
  205. case 2: __put_user_16(x,__pu_addr); break; \
  206. case 4: __put_user_32(x,__pu_addr); break; \
  207. case 8: __put_user_64(x,__pu_addr); break; \
  208. default: __put_user_unknown(); break; \
  209. } \
  210. } \
  211. __pu_err; \
  212. })
  213. /*
  214. * The "__put_user_xx()" macros tell gcc they read from memory
  215. * instead of writing: this is because they do not write to
  216. * any memory gcc knows about, so there are no aliasing issues
  217. */
  218. #define __put_user_64(x,addr) \
  219. __asm__ __volatile__("1: stq %r2,%1\n" \
  220. "2:\n" \
  221. ".section __ex_table,\"a\"\n" \
  222. " .long 1b - .\n" \
  223. " lda $31,2b-1b(%0)\n" \
  224. ".previous" \
  225. : "=r"(__pu_err) \
  226. : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
  227. #define __put_user_32(x,addr) \
  228. __asm__ __volatile__("1: stl %r2,%1\n" \
  229. "2:\n" \
  230. ".section __ex_table,\"a\"\n" \
  231. " .long 1b - .\n" \
  232. " lda $31,2b-1b(%0)\n" \
  233. ".previous" \
  234. : "=r"(__pu_err) \
  235. : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
  236. #ifdef __alpha_bwx__
  237. /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
  238. #define __put_user_16(x,addr) \
  239. __asm__ __volatile__("1: stw %r2,%1\n" \
  240. "2:\n" \
  241. ".section __ex_table,\"a\"\n" \
  242. " .long 1b - .\n" \
  243. " lda $31,2b-1b(%0)\n" \
  244. ".previous" \
  245. : "=r"(__pu_err) \
  246. : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
  247. #define __put_user_8(x,addr) \
  248. __asm__ __volatile__("1: stb %r2,%1\n" \
  249. "2:\n" \
  250. ".section __ex_table,\"a\"\n" \
  251. " .long 1b - .\n" \
  252. " lda $31,2b-1b(%0)\n" \
  253. ".previous" \
  254. : "=r"(__pu_err) \
  255. : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
  256. #else
  257. /* Unfortunately, we can't get an unaligned access trap for the sub-word
  258. write, so we have to do a general unaligned operation. */
  259. #define __put_user_16(x,addr) \
  260. { \
  261. long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
  262. __asm__ __volatile__( \
  263. "1: ldq_u %2,1(%5)\n" \
  264. "2: ldq_u %1,0(%5)\n" \
  265. " inswh %6,%5,%4\n" \
  266. " inswl %6,%5,%3\n" \
  267. " mskwh %2,%5,%2\n" \
  268. " mskwl %1,%5,%1\n" \
  269. " or %2,%4,%2\n" \
  270. " or %1,%3,%1\n" \
  271. "3: stq_u %2,1(%5)\n" \
  272. "4: stq_u %1,0(%5)\n" \
  273. "5:\n" \
  274. ".section __ex_table,\"a\"\n" \
  275. " .long 1b - .\n" \
  276. " lda $31, 5b-1b(%0)\n" \
  277. " .long 2b - .\n" \
  278. " lda $31, 5b-2b(%0)\n" \
  279. " .long 3b - .\n" \
  280. " lda $31, 5b-3b(%0)\n" \
  281. " .long 4b - .\n" \
  282. " lda $31, 5b-4b(%0)\n" \
  283. ".previous" \
  284. : "=r"(__pu_err), "=&r"(__pu_tmp1), \
  285. "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
  286. "=&r"(__pu_tmp4) \
  287. : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
  288. }
  289. #define __put_user_8(x,addr) \
  290. { \
  291. long __pu_tmp1, __pu_tmp2; \
  292. __asm__ __volatile__( \
  293. "1: ldq_u %1,0(%4)\n" \
  294. " insbl %3,%4,%2\n" \
  295. " mskbl %1,%4,%1\n" \
  296. " or %1,%2,%1\n" \
  297. "2: stq_u %1,0(%4)\n" \
  298. "3:\n" \
  299. ".section __ex_table,\"a\"\n" \
  300. " .long 1b - .\n" \
  301. " lda $31, 3b-1b(%0)\n" \
  302. " .long 2b - .\n" \
  303. " lda $31, 3b-2b(%0)\n" \
  304. ".previous" \
  305. : "=r"(__pu_err), \
  306. "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
  307. : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
  308. }
  309. #endif
  310. /*
  311. * Complex access routines
  312. */
  313. /* This little bit of silliness is to get the GP loaded for a function
  314. that ordinarily wouldn't. Otherwise we could have it done by the macro
  315. directly, which can be optimized the linker. */
  316. #ifdef MODULE
  317. #define __module_address(sym) "r"(sym),
  318. #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
  319. #else
  320. #define __module_address(sym)
  321. #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
  322. #endif
  323. extern void __copy_user(void);
  324. extern inline long
  325. __copy_tofrom_user_nocheck(void *to, const void *from, long len)
  326. {
  327. register void * __cu_to __asm__("$6") = to;
  328. register const void * __cu_from __asm__("$7") = from;
  329. register long __cu_len __asm__("$0") = len;
  330. __asm__ __volatile__(
  331. __module_call(28, 3, __copy_user)
  332. : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
  333. : __module_address(__copy_user)
  334. "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
  335. : "$1","$2","$3","$4","$5","$28","memory");
  336. return __cu_len;
  337. }
  338. extern inline long
  339. __copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
  340. {
  341. if (__access_ok((unsigned long)validate, len, get_fs()))
  342. len = __copy_tofrom_user_nocheck(to, from, len);
  343. return len;
  344. }
  345. #define __copy_to_user(to,from,n) \
  346. ({ \
  347. __chk_user_ptr(to); \
  348. __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \
  349. })
  350. #define __copy_from_user(to,from,n) \
  351. ({ \
  352. __chk_user_ptr(from); \
  353. __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
  354. })
  355. #define __copy_to_user_inatomic __copy_to_user
  356. #define __copy_from_user_inatomic __copy_from_user
  357. extern inline long
  358. copy_to_user(void __user *to, const void *from, long n)
  359. {
  360. return __copy_tofrom_user((__force void *)to, from, n, to);
  361. }
  362. extern inline long
  363. copy_from_user(void *to, const void __user *from, long n)
  364. {
  365. return __copy_tofrom_user(to, (__force void *)from, n, from);
  366. }
  367. extern void __do_clear_user(void);
  368. extern inline long
  369. __clear_user(void __user *to, long len)
  370. {
  371. register void __user * __cl_to __asm__("$6") = to;
  372. register long __cl_len __asm__("$0") = len;
  373. __asm__ __volatile__(
  374. __module_call(28, 2, __do_clear_user)
  375. : "=r"(__cl_len), "=r"(__cl_to)
  376. : __module_address(__do_clear_user)
  377. "0"(__cl_len), "1"(__cl_to)
  378. : "$1","$2","$3","$4","$5","$28","memory");
  379. return __cl_len;
  380. }
  381. extern inline long
  382. clear_user(void __user *to, long len)
  383. {
  384. if (__access_ok((unsigned long)to, len, get_fs()))
  385. len = __clear_user(to, len);
  386. return len;
  387. }
  388. #undef __module_address
  389. #undef __module_call
  390. /* Returns: -EFAULT if exception before terminator, N if the entire
  391. buffer filled, else strlen. */
  392. extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
  393. extern inline long
  394. strncpy_from_user(char *to, const char __user *from, long n)
  395. {
  396. long ret = -EFAULT;
  397. if (__access_ok((unsigned long)from, 0, get_fs()))
  398. ret = __strncpy_from_user(to, from, n);
  399. return ret;
  400. }
  401. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  402. extern long __strlen_user(const char __user *);
  403. extern inline long strlen_user(const char __user *str)
  404. {
  405. return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
  406. }
  407. /* Returns: 0 if exception before NUL or reaching the supplied limit (N),
  408. * a value greater than N if the limit would be exceeded, else strlen. */
  409. extern long __strnlen_user(const char __user *, long);
  410. extern inline long strnlen_user(const char __user *str, long n)
  411. {
  412. return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
  413. }
  414. /*
  415. * About the exception table:
  416. *
  417. * - insn is a 32-bit pc-relative offset from the faulting insn.
  418. * - nextinsn is a 16-bit offset off of the faulting instruction
  419. * (not off of the *next* instruction as branches are).
  420. * - errreg is the register in which to place -EFAULT.
  421. * - valreg is the final target register for the load sequence
  422. * and will be zeroed.
  423. *
  424. * Either errreg or valreg may be $31, in which case nothing happens.
  425. *
  426. * The exception fixup information "just so happens" to be arranged
  427. * as in a MEM format instruction. This lets us emit our three
  428. * values like so:
  429. *
  430. * lda valreg, nextinsn(errreg)
  431. *
  432. */
  433. struct exception_table_entry
  434. {
  435. signed int insn;
  436. union exception_fixup {
  437. unsigned unit;
  438. struct {
  439. signed int nextinsn : 16;
  440. unsigned int errreg : 5;
  441. unsigned int valreg : 5;
  442. } bits;
  443. } fixup;
  444. };
  445. /* Returns the new pc */
  446. #define fixup_exception(map_reg, fixup, pc) \
  447. ({ \
  448. if ((fixup)->fixup.bits.valreg != 31) \
  449. map_reg((fixup)->fixup.bits.valreg) = 0; \
  450. if ((fixup)->fixup.bits.errreg != 31) \
  451. map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \
  452. (pc) + (fixup)->fixup.bits.nextinsn; \
  453. })
  454. #endif /* __ALPHA_UACCESS_H */