uaccess_mm.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. #ifndef __M68K_UACCESS_H
  2. #define __M68K_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/types.h>
  9. #include <linux/sched.h>
  10. #include <asm/segment.h>
  11. #define VERIFY_READ 0
  12. #define VERIFY_WRITE 1
  13. /* We let the MMU do all checking */
  14. static inline int access_ok(int type, const void __user *addr,
  15. unsigned long size)
  16. {
  17. return 1;
  18. }
  19. /*
  20. * The exception table consists of pairs of addresses: the first is the
  21. * address of an instruction that is allowed to fault, and the second is
  22. * the address at which the program should continue. No registers are
  23. * modified, so it is entirely up to the continuation code to figure out
  24. * what to do.
  25. *
  26. * All the routines below use bits of fixup code that are out of line
  27. * with the main instruction path. This means when everything is well,
  28. * we don't even have to jump over them. Further, they do not intrude
  29. * on our cache or tlb entries.
  30. */
  31. struct exception_table_entry
  32. {
  33. unsigned long insn, fixup;
  34. };
  35. extern int __put_user_bad(void);
  36. extern int __get_user_bad(void);
  37. #define __put_user_asm(res, x, ptr, bwl, reg, err) \
  38. asm volatile ("\n" \
  39. "1: moves."#bwl" %2,%1\n" \
  40. "2:\n" \
  41. " .section .fixup,\"ax\"\n" \
  42. " .even\n" \
  43. "10: moveq.l %3,%0\n" \
  44. " jra 2b\n" \
  45. " .previous\n" \
  46. "\n" \
  47. " .section __ex_table,\"a\"\n" \
  48. " .align 4\n" \
  49. " .long 1b,10b\n" \
  50. " .long 2b,10b\n" \
  51. " .previous" \
  52. : "+d" (res), "=m" (*(ptr)) \
  53. : #reg (x), "i" (err))
  54. /*
  55. * These are the main single-value transfer routines. They automatically
  56. * use the right size if we just have the right pointer type.
  57. */
  58. #define __put_user(x, ptr) \
  59. ({ \
  60. typeof(*(ptr)) __pu_val = (x); \
  61. int __pu_err = 0; \
  62. __chk_user_ptr(ptr); \
  63. switch (sizeof (*(ptr))) { \
  64. case 1: \
  65. __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
  66. break; \
  67. case 2: \
  68. __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
  69. break; \
  70. case 4: \
  71. __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
  72. break; \
  73. case 8: \
  74. { \
  75. const void __user *__pu_ptr = (ptr); \
  76. asm volatile ("\n" \
  77. "1: moves.l %2,(%1)+\n" \
  78. "2: moves.l %R2,(%1)\n" \
  79. "3:\n" \
  80. " .section .fixup,\"ax\"\n" \
  81. " .even\n" \
  82. "10: movel %3,%0\n" \
  83. " jra 3b\n" \
  84. " .previous\n" \
  85. "\n" \
  86. " .section __ex_table,\"a\"\n" \
  87. " .align 4\n" \
  88. " .long 1b,10b\n" \
  89. " .long 2b,10b\n" \
  90. " .long 3b,10b\n" \
  91. " .previous" \
  92. : "+d" (__pu_err), "+a" (__pu_ptr) \
  93. : "r" (__pu_val), "i" (-EFAULT) \
  94. : "memory"); \
  95. break; \
  96. } \
  97. default: \
  98. __pu_err = __put_user_bad(); \
  99. break; \
  100. } \
  101. __pu_err; \
  102. })
  103. #define put_user(x, ptr) __put_user(x, ptr)
  104. #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
  105. type __gu_val; \
  106. asm volatile ("\n" \
  107. "1: moves."#bwl" %2,%1\n" \
  108. "2:\n" \
  109. " .section .fixup,\"ax\"\n" \
  110. " .even\n" \
  111. "10: move.l %3,%0\n" \
  112. " sub."#bwl" %1,%1\n" \
  113. " jra 2b\n" \
  114. " .previous\n" \
  115. "\n" \
  116. " .section __ex_table,\"a\"\n" \
  117. " .align 4\n" \
  118. " .long 1b,10b\n" \
  119. " .previous" \
  120. : "+d" (res), "=&" #reg (__gu_val) \
  121. : "m" (*(ptr)), "i" (err)); \
  122. (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
  123. })
  124. #define __get_user(x, ptr) \
  125. ({ \
  126. int __gu_err = 0; \
  127. __chk_user_ptr(ptr); \
  128. switch (sizeof(*(ptr))) { \
  129. case 1: \
  130. __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
  131. break; \
  132. case 2: \
  133. __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
  134. break; \
  135. case 4: \
  136. __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
  137. break; \
  138. /* case 8: disabled because gcc-4.1 has a broken typeof \
  139. { \
  140. const void *__gu_ptr = (ptr); \
  141. u64 __gu_val; \
  142. asm volatile ("\n" \
  143. "1: moves.l (%2)+,%1\n" \
  144. "2: moves.l (%2),%R1\n" \
  145. "3:\n" \
  146. " .section .fixup,\"ax\"\n" \
  147. " .even\n" \
  148. "10: move.l %3,%0\n" \
  149. " sub.l %1,%1\n" \
  150. " sub.l %R1,%R1\n" \
  151. " jra 3b\n" \
  152. " .previous\n" \
  153. "\n" \
  154. " .section __ex_table,\"a\"\n" \
  155. " .align 4\n" \
  156. " .long 1b,10b\n" \
  157. " .long 2b,10b\n" \
  158. " .previous" \
  159. : "+d" (__gu_err), "=&r" (__gu_val), \
  160. "+a" (__gu_ptr) \
  161. : "i" (-EFAULT) \
  162. : "memory"); \
  163. (x) = (typeof(*(ptr)))__gu_val; \
  164. break; \
  165. } */ \
  166. default: \
  167. __gu_err = __get_user_bad(); \
  168. break; \
  169. } \
  170. __gu_err; \
  171. })
  172. #define get_user(x, ptr) __get_user(x, ptr)
  173. unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
  174. unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
  175. #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
  176. asm volatile ("\n" \
  177. "1: moves."#s1" (%2)+,%3\n" \
  178. " move."#s1" %3,(%1)+\n" \
  179. "2: moves."#s2" (%2)+,%3\n" \
  180. " move."#s2" %3,(%1)+\n" \
  181. " .ifnc \""#s3"\",\"\"\n" \
  182. "3: moves."#s3" (%2)+,%3\n" \
  183. " move."#s3" %3,(%1)+\n" \
  184. " .endif\n" \
  185. "4:\n" \
  186. " .section __ex_table,\"a\"\n" \
  187. " .align 4\n" \
  188. " .long 1b,10f\n" \
  189. " .long 2b,20f\n" \
  190. " .ifnc \""#s3"\",\"\"\n" \
  191. " .long 3b,30f\n" \
  192. " .endif\n" \
  193. " .previous\n" \
  194. "\n" \
  195. " .section .fixup,\"ax\"\n" \
  196. " .even\n" \
  197. "10: clr."#s1" (%1)+\n" \
  198. "20: clr."#s2" (%1)+\n" \
  199. " .ifnc \""#s3"\",\"\"\n" \
  200. "30: clr."#s3" (%1)+\n" \
  201. " .endif\n" \
  202. " moveq.l #"#n",%0\n" \
  203. " jra 4b\n" \
  204. " .previous\n" \
  205. : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
  206. : : "memory")
  207. static __always_inline unsigned long
  208. __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
  209. {
  210. unsigned long res = 0, tmp;
  211. switch (n) {
  212. case 1:
  213. __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
  214. break;
  215. case 2:
  216. __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2);
  217. break;
  218. case 3:
  219. __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
  220. break;
  221. case 4:
  222. __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
  223. break;
  224. case 5:
  225. __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
  226. break;
  227. case 6:
  228. __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
  229. break;
  230. case 7:
  231. __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
  232. break;
  233. case 8:
  234. __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
  235. break;
  236. case 9:
  237. __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
  238. break;
  239. case 10:
  240. __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
  241. break;
  242. case 12:
  243. __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
  244. break;
  245. default:
  246. /* we limit the inlined version to 3 moves */
  247. return __generic_copy_from_user(to, from, n);
  248. }
  249. return res;
  250. }
  251. #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
  252. asm volatile ("\n" \
  253. " move."#s1" (%2)+,%3\n" \
  254. "11: moves."#s1" %3,(%1)+\n" \
  255. "12: move."#s2" (%2)+,%3\n" \
  256. "21: moves."#s2" %3,(%1)+\n" \
  257. "22:\n" \
  258. " .ifnc \""#s3"\",\"\"\n" \
  259. " move."#s3" (%2)+,%3\n" \
  260. "31: moves."#s3" %3,(%1)+\n" \
  261. "32:\n" \
  262. " .endif\n" \
  263. "4:\n" \
  264. "\n" \
  265. " .section __ex_table,\"a\"\n" \
  266. " .align 4\n" \
  267. " .long 11b,5f\n" \
  268. " .long 12b,5f\n" \
  269. " .long 21b,5f\n" \
  270. " .long 22b,5f\n" \
  271. " .ifnc \""#s3"\",\"\"\n" \
  272. " .long 31b,5f\n" \
  273. " .long 32b,5f\n" \
  274. " .endif\n" \
  275. " .previous\n" \
  276. "\n" \
  277. " .section .fixup,\"ax\"\n" \
  278. " .even\n" \
  279. "5: moveq.l #"#n",%0\n" \
  280. " jra 4b\n" \
  281. " .previous\n" \
  282. : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
  283. : : "memory")
  284. static __always_inline unsigned long
  285. __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
  286. {
  287. unsigned long res = 0, tmp;
  288. switch (n) {
  289. case 1:
  290. __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
  291. break;
  292. case 2:
  293. __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
  294. break;
  295. case 3:
  296. __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
  297. break;
  298. case 4:
  299. __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
  300. break;
  301. case 5:
  302. __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
  303. break;
  304. case 6:
  305. __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
  306. break;
  307. case 7:
  308. __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
  309. break;
  310. case 8:
  311. __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
  312. break;
  313. case 9:
  314. __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
  315. break;
  316. case 10:
  317. __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
  318. break;
  319. case 12:
  320. __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
  321. break;
  322. default:
  323. /* limit the inlined version to 3 moves */
  324. return __generic_copy_to_user(to, from, n);
  325. }
  326. return res;
  327. }
  328. #define __copy_from_user(to, from, n) \
  329. (__builtin_constant_p(n) ? \
  330. __constant_copy_from_user(to, from, n) : \
  331. __generic_copy_from_user(to, from, n))
  332. #define __copy_to_user(to, from, n) \
  333. (__builtin_constant_p(n) ? \
  334. __constant_copy_to_user(to, from, n) : \
  335. __generic_copy_to_user(to, from, n))
  336. #define __copy_to_user_inatomic __copy_to_user
  337. #define __copy_from_user_inatomic __copy_from_user
  338. #define copy_from_user(to, from, n) __copy_from_user(to, from, n)
  339. #define copy_to_user(to, from, n) __copy_to_user(to, from, n)
  340. long strncpy_from_user(char *dst, const char __user *src, long count);
  341. long strnlen_user(const char __user *src, long n);
  342. unsigned long __clear_user(void __user *to, unsigned long n);
  343. #define clear_user __clear_user
  344. #define strlen_user(str) strnlen_user(str, 32767)
  345. #endif /* _M68K_UACCESS_H */