uaccess.h 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2008-2009 PetaLogix
  4. * Copyright (C) 2006 Atmark Techno, Inc.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef _ASM_MICROBLAZE_UACCESS_H
  11. #define _ASM_MICROBLAZE_UACCESS_H
  12. #ifdef __KERNEL__
  13. #ifndef __ASSEMBLY__
  14. #include <linux/kernel.h>
  15. #include <linux/errno.h>
  16. #include <linux/sched.h> /* RLIMIT_FSIZE */
  17. #include <linux/mm.h>
  18. #include <asm/mmu.h>
  19. #include <asm/page.h>
  20. #include <asm/pgtable.h>
  21. #include <linux/string.h>
  22. #define VERIFY_READ 0
  23. #define VERIFY_WRITE 1
  24. /*
  25. * On Microblaze the fs value is actually the top of the corresponding
  26. * address space.
  27. *
  28. * The fs value determines whether argument validity checking should be
  29. * performed or not. If get_fs() == USER_DS, checking is performed, with
  30. * get_fs() == KERNEL_DS, checking is bypassed.
  31. *
  32. * For historical reasons, these macros are grossly misnamed.
  33. *
  34. * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
  35. */
  36. # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  37. # ifndef CONFIG_MMU
  38. # define KERNEL_DS MAKE_MM_SEG(0)
  39. # define USER_DS KERNEL_DS
  40. # else
  41. # define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  42. # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
  43. # endif
  44. # define get_ds() (KERNEL_DS)
  45. # define get_fs() (current_thread_info()->addr_limit)
  46. # define set_fs(val) (current_thread_info()->addr_limit = (val))
  47. # define segment_eq(a, b) ((a).seg == (b).seg)
  48. #define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
  49. #ifndef CONFIG_MMU
  50. extern int ___range_ok(unsigned long addr, unsigned long size);
  51. #define __range_ok(addr, size) \
  52. ___range_ok((unsigned long)(addr), (unsigned long)(size))
  53. #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
  54. #define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
  55. /* Undefined function to trigger linker error */
  56. extern int bad_user_access_length(void);
  57. /* FIXME this is function for optimalization -> memcpy */
  58. #define __get_user(var, ptr) \
  59. ({ \
  60. int __gu_err = 0; \
  61. switch (sizeof(*(ptr))) { \
  62. case 1: \
  63. case 2: \
  64. case 4: \
  65. (var) = *(ptr); \
  66. break; \
  67. case 8: \
  68. memcpy((void *) &(var), (ptr), 8); \
  69. break; \
  70. default: \
  71. (var) = 0; \
  72. __gu_err = __get_user_bad(); \
  73. break; \
  74. } \
  75. __gu_err; \
  76. })
  77. #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
  78. /* FIXME is not there defined __pu_val */
  79. #define __put_user(var, ptr) \
  80. ({ \
  81. int __pu_err = 0; \
  82. switch (sizeof(*(ptr))) { \
  83. case 1: \
  84. case 2: \
  85. case 4: \
  86. *(ptr) = (var); \
  87. break; \
  88. case 8: { \
  89. typeof(*(ptr)) __pu_val = (var); \
  90. memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
  91. } \
  92. break; \
  93. default: \
  94. __pu_err = __put_user_bad(); \
  95. break; \
  96. } \
  97. __pu_err; \
  98. })
  99. #define __put_user_bad() (bad_user_access_length(), (-EFAULT))
  100. #define put_user(x, ptr) __put_user((x), (ptr))
  101. #define get_user(x, ptr) __get_user((x), (ptr))
  102. #define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
  103. #define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
  104. #define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
  105. #define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
  106. #define __copy_to_user_inatomic(to, from, n) \
  107. (__copy_to_user((to), (from), (n)))
  108. #define __copy_from_user_inatomic(to, from, n) \
  109. (__copy_from_user((to), (from), (n)))
  110. static inline unsigned long clear_user(void *addr, unsigned long size)
  111. {
  112. if (access_ok(VERIFY_WRITE, addr, size))
  113. size = __clear_user(addr, size);
  114. return size;
  115. }
  116. /* Returns 0 if exception not found and fixup otherwise. */
  117. extern unsigned long search_exception_table(unsigned long);
  118. extern long strncpy_from_user(char *dst, const char *src, long count);
  119. extern long strnlen_user(const char *src, long count);
  120. #else /* CONFIG_MMU */
  121. /*
  122. * Address is valid if:
  123. * - "addr", "addr + size" and "size" are all below the limit
  124. */
  125. #define access_ok(type, addr, size) \
  126. (get_fs().seg > (((unsigned long)(addr)) | \
  127. (size) | ((unsigned long)(addr) + (size))))
  128. /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
  129. type?"WRITE":"READ",addr,size,get_fs().seg)) */
  130. /*
  131. * All the __XXX versions macros/functions below do not perform
  132. * access checking. It is assumed that the necessary checks have been
  133. * already performed before the finction (macro) is called.
  134. */
  135. #define get_user(x, ptr) \
  136. ({ \
  137. access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
  138. ? __get_user((x), (ptr)) : -EFAULT; \
  139. })
  140. #define put_user(x, ptr) \
  141. ({ \
  142. access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
  143. ? __put_user((x), (ptr)) : -EFAULT; \
  144. })
  145. #define __get_user(x, ptr) \
  146. ({ \
  147. unsigned long __gu_val; \
  148. /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
  149. long __gu_err; \
  150. switch (sizeof(*(ptr))) { \
  151. case 1: \
  152. __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
  153. break; \
  154. case 2: \
  155. __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
  156. break; \
  157. case 4: \
  158. __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
  159. break; \
  160. default: \
  161. __gu_val = 0; __gu_err = -EINVAL; \
  162. } \
  163. x = (__typeof__(*(ptr))) __gu_val; \
  164. __gu_err; \
  165. })
  166. #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
  167. ({ \
  168. __asm__ __volatile__ ( \
  169. "1:" insn " %1, %2, r0; \
  170. addk %0, r0, r0; \
  171. 2: \
  172. .section .fixup,\"ax\"; \
  173. 3: brid 2b; \
  174. addik %0, r0, %3; \
  175. .previous; \
  176. .section __ex_table,\"a\"; \
  177. .word 1b,3b; \
  178. .previous;" \
  179. : "=r"(__gu_err), "=r"(__gu_val) \
  180. : "r"(__gu_ptr), "i"(-EFAULT) \
  181. ); \
  182. })
  183. #define __put_user(x, ptr) \
  184. ({ \
  185. __typeof__(*(ptr)) volatile __gu_val = (x); \
  186. long __gu_err = 0; \
  187. switch (sizeof(__gu_val)) { \
  188. case 1: \
  189. __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
  190. break; \
  191. case 2: \
  192. __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
  193. break; \
  194. case 4: \
  195. __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
  196. break; \
  197. case 8: \
  198. __put_user_asm_8((ptr), __gu_val, __gu_err); \
  199. break; \
  200. default: \
  201. __gu_err = -EINVAL; \
  202. } \
  203. __gu_err; \
  204. })
  205. #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
  206. ({ \
  207. __asm__ __volatile__ (" lwi %0, %1, 0; \
  208. 1: swi %0, %2, 0; \
  209. lwi %0, %1, 4; \
  210. 2: swi %0, %2, 4; \
  211. addk %0,r0,r0; \
  212. 3: \
  213. .section .fixup,\"ax\"; \
  214. 4: brid 3b; \
  215. addik %0, r0, %3; \
  216. .previous; \
  217. .section __ex_table,\"a\"; \
  218. .word 1b,4b,2b,4b; \
  219. .previous;" \
  220. : "=&r"(__gu_err) \
  221. : "r"(&__gu_val), \
  222. "r"(__gu_ptr), "i"(-EFAULT) \
  223. ); \
  224. })
  225. #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
  226. ({ \
  227. __asm__ __volatile__ ( \
  228. "1:" insn " %1, %2, r0; \
  229. addk %0, r0, r0; \
  230. 2: \
  231. .section .fixup,\"ax\"; \
  232. 3: brid 2b; \
  233. addik %0, r0, %3; \
  234. .previous; \
  235. .section __ex_table,\"a\"; \
  236. .word 1b,3b; \
  237. .previous;" \
  238. : "=r"(__gu_err) \
  239. : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
  240. ); \
  241. })
  242. /*
  243. * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
  244. */
  245. static inline int clear_user(char *to, int size)
  246. {
  247. if (size && access_ok(VERIFY_WRITE, to, size)) {
  248. __asm__ __volatile__ (" \
  249. 1: \
  250. sb r0, %2, r0; \
  251. addik %0, %0, -1; \
  252. bneid %0, 1b; \
  253. addik %2, %2, 1; \
  254. 2: \
  255. .section __ex_table,\"a\"; \
  256. .word 1b,2b; \
  257. .section .text;" \
  258. : "=r"(size) \
  259. : "0"(size), "r"(to)
  260. );
  261. }
  262. return size;
  263. }
  264. #define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
  265. #define __copy_from_user_inatomic(to, from, n) \
  266. copy_from_user((to), (from), (n))
  267. #define copy_to_user(to, from, n) \
  268. (access_ok(VERIFY_WRITE, (to), (n)) ? \
  269. __copy_tofrom_user((void __user *)(to), \
  270. (__force const void __user *)(from), (n)) \
  271. : -EFAULT)
  272. #define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
  273. #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
  274. #define copy_from_user(to, from, n) \
  275. (access_ok(VERIFY_READ, (from), (n)) ? \
  276. __copy_tofrom_user((__force void __user *)(to), \
  277. (void __user *)(from), (n)) \
  278. : -EFAULT)
  279. extern int __strncpy_user(char *to, const char __user *from, int len);
  280. extern int __strnlen_user(const char __user *sstr, int len);
  281. #define strncpy_from_user(to, from, len) \
  282. (access_ok(VERIFY_READ, from, 1) ? \
  283. __strncpy_user(to, from, len) : -EFAULT)
  284. #define strnlen_user(str, len) \
  285. (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
  286. #endif /* CONFIG_MMU */
  287. extern unsigned long __copy_tofrom_user(void __user *to,
  288. const void __user *from, unsigned long size);
  289. /*
  290. * The exception table consists of pairs of addresses: the first is the
  291. * address of an instruction that is allowed to fault, and the second is
  292. * the address at which the program should continue. No registers are
  293. * modified, so it is entirely up to the continuation code to figure out
  294. * what to do.
  295. *
  296. * All the routines below use bits of fixup code that are out of line
  297. * with the main instruction path. This means when everything is well,
  298. * we don't even have to jump over them. Further, they do not intrude
  299. * on our cache or tlb entries.
  300. */
  301. struct exception_table_entry {
  302. unsigned long insn, fixup;
  303. };
  304. #endif /* __ASSEMBLY__ */
  305. #endif /* __KERNEL__ */
  306. #endif /* _ASM_MICROBLAZE_UACCESS_H */