uaccess.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2008-2009 PetaLogix
  4. * Copyright (C) 2006 Atmark Techno, Inc.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef _ASM_MICROBLAZE_UACCESS_H
  11. #define _ASM_MICROBLAZE_UACCESS_H
  12. #ifdef __KERNEL__
  13. #ifndef __ASSEMBLY__
  14. #include <linux/kernel.h>
  15. #include <linux/errno.h>
  16. #include <linux/sched.h> /* RLIMIT_FSIZE */
  17. #include <linux/mm.h>
  18. #include <asm/mmu.h>
  19. #include <asm/page.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/segment.h>
  22. #include <linux/string.h>
  23. #define VERIFY_READ 0
  24. #define VERIFY_WRITE 1
  25. #define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
  26. #ifndef CONFIG_MMU
  27. extern int ___range_ok(unsigned long addr, unsigned long size);
  28. #define __range_ok(addr, size) \
  29. ___range_ok((unsigned long)(addr), (unsigned long)(size))
  30. #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
  31. #define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
  32. /* Undefined function to trigger linker error */
  33. extern int bad_user_access_length(void);
  34. /* FIXME this is function for optimalization -> memcpy */
  35. #define __get_user(var, ptr) \
  36. ({ \
  37. int __gu_err = 0; \
  38. switch (sizeof(*(ptr))) { \
  39. case 1: \
  40. case 2: \
  41. case 4: \
  42. (var) = *(ptr); \
  43. break; \
  44. case 8: \
  45. memcpy((void *) &(var), (ptr), 8); \
  46. break; \
  47. default: \
  48. (var) = 0; \
  49. __gu_err = __get_user_bad(); \
  50. break; \
  51. } \
  52. __gu_err; \
  53. })
  54. #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
  55. /* FIXME is not there defined __pu_val */
  56. #define __put_user(var, ptr) \
  57. ({ \
  58. int __pu_err = 0; \
  59. switch (sizeof(*(ptr))) { \
  60. case 1: \
  61. case 2: \
  62. case 4: \
  63. *(ptr) = (var); \
  64. break; \
  65. case 8: { \
  66. typeof(*(ptr)) __pu_val = (var); \
  67. memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
  68. } \
  69. break; \
  70. default: \
  71. __pu_err = __put_user_bad(); \
  72. break; \
  73. } \
  74. __pu_err; \
  75. })
  76. #define __put_user_bad() (bad_user_access_length(), (-EFAULT))
  77. #define put_user(x, ptr) __put_user((x), (ptr))
  78. #define get_user(x, ptr) __get_user((x), (ptr))
  79. #define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
  80. #define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
  81. #define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
  82. #define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
  83. #define __copy_to_user_inatomic(to, from, n) \
  84. (__copy_to_user((to), (from), (n)))
  85. #define __copy_from_user_inatomic(to, from, n) \
  86. (__copy_from_user((to), (from), (n)))
  87. static inline unsigned long clear_user(void *addr, unsigned long size)
  88. {
  89. if (access_ok(VERIFY_WRITE, addr, size))
  90. size = __clear_user(addr, size);
  91. return size;
  92. }
  93. /* Returns 0 if exception not found and fixup otherwise. */
  94. extern unsigned long search_exception_table(unsigned long);
  95. extern long strncpy_from_user(char *dst, const char *src, long count);
  96. extern long strnlen_user(const char *src, long count);
  97. #else /* CONFIG_MMU */
  98. /*
  99. * Address is valid if:
  100. * - "addr", "addr + size" and "size" are all below the limit
  101. */
  102. #define access_ok(type, addr, size) \
  103. (get_fs().seg > (((unsigned long)(addr)) | \
  104. (size) | ((unsigned long)(addr) + (size))))
  105. /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
  106. type?"WRITE":"READ",addr,size,get_fs().seg)) */
  107. /*
  108. * All the __XXX versions macros/functions below do not perform
  109. * access checking. It is assumed that the necessary checks have been
  110. * already performed before the finction (macro) is called.
  111. */
  112. #define get_user(x, ptr) \
  113. ({ \
  114. access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
  115. ? __get_user((x), (ptr)) : -EFAULT; \
  116. })
  117. #define put_user(x, ptr) \
  118. ({ \
  119. access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
  120. ? __put_user((x), (ptr)) : -EFAULT; \
  121. })
  122. #define __get_user(x, ptr) \
  123. ({ \
  124. unsigned long __gu_val; \
  125. /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
  126. long __gu_err; \
  127. switch (sizeof(*(ptr))) { \
  128. case 1: \
  129. __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
  130. break; \
  131. case 2: \
  132. __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
  133. break; \
  134. case 4: \
  135. __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
  136. break; \
  137. default: \
  138. __gu_val = 0; __gu_err = -EINVAL; \
  139. } \
  140. x = (__typeof__(*(ptr))) __gu_val; \
  141. __gu_err; \
  142. })
  143. #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
  144. ({ \
  145. __asm__ __volatile__ ( \
  146. "1:" insn " %1, %2, r0; \
  147. addk %0, r0, r0; \
  148. 2: \
  149. .section .fixup,\"ax\"; \
  150. 3: brid 2b; \
  151. addik %0, r0, %3; \
  152. .previous; \
  153. .section __ex_table,\"a\"; \
  154. .word 1b,3b; \
  155. .previous;" \
  156. : "=r"(__gu_err), "=r"(__gu_val) \
  157. : "r"(__gu_ptr), "i"(-EFAULT) \
  158. ); \
  159. })
  160. #define __put_user(x, ptr) \
  161. ({ \
  162. __typeof__(*(ptr)) volatile __gu_val = (x); \
  163. long __gu_err = 0; \
  164. switch (sizeof(__gu_val)) { \
  165. case 1: \
  166. __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
  167. break; \
  168. case 2: \
  169. __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
  170. break; \
  171. case 4: \
  172. __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
  173. break; \
  174. case 8: \
  175. __put_user_asm_8((ptr), __gu_val, __gu_err); \
  176. break; \
  177. default: \
  178. __gu_err = -EINVAL; \
  179. } \
  180. __gu_err; \
  181. })
  182. #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
  183. ({ \
  184. __asm__ __volatile__ (" lwi %0, %1, 0; \
  185. 1: swi %0, %2, 0; \
  186. lwi %0, %1, 4; \
  187. 2: swi %0, %2, 4; \
  188. addk %0,r0,r0; \
  189. 3: \
  190. .section .fixup,\"ax\"; \
  191. 4: brid 3b; \
  192. addik %0, r0, %3; \
  193. .previous; \
  194. .section __ex_table,\"a\"; \
  195. .word 1b,4b,2b,4b; \
  196. .previous;" \
  197. : "=&r"(__gu_err) \
  198. : "r"(&__gu_val), \
  199. "r"(__gu_ptr), "i"(-EFAULT) \
  200. ); \
  201. })
  202. #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
  203. ({ \
  204. __asm__ __volatile__ ( \
  205. "1:" insn " %1, %2, r0; \
  206. addk %0, r0, r0; \
  207. 2: \
  208. .section .fixup,\"ax\"; \
  209. 3: brid 2b; \
  210. addik %0, r0, %3; \
  211. .previous; \
  212. .section __ex_table,\"a\"; \
  213. .word 1b,3b; \
  214. .previous;" \
  215. : "=r"(__gu_err) \
  216. : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
  217. ); \
  218. })
  219. /*
  220. * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
  221. */
  222. static inline int clear_user(char *to, int size)
  223. {
  224. if (size && access_ok(VERIFY_WRITE, to, size)) {
  225. __asm__ __volatile__ (" \
  226. 1: \
  227. sb r0, %2, r0; \
  228. addik %0, %0, -1; \
  229. bneid %0, 1b; \
  230. addik %2, %2, 1; \
  231. 2: \
  232. .section __ex_table,\"a\"; \
  233. .word 1b,2b; \
  234. .section .text;" \
  235. : "=r"(size) \
  236. : "0"(size), "r"(to)
  237. );
  238. }
  239. return size;
  240. }
  241. extern unsigned long __copy_tofrom_user(void __user *to,
  242. const void __user *from, unsigned long size);
  243. #define copy_to_user(to, from, n) \
  244. (access_ok(VERIFY_WRITE, (to), (n)) ? \
  245. __copy_tofrom_user((void __user *)(to), \
  246. (__force const void __user *)(from), (n)) \
  247. : -EFAULT)
  248. #define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
  249. #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
  250. #define copy_from_user(to, from, n) \
  251. (access_ok(VERIFY_READ, (from), (n)) ? \
  252. __copy_tofrom_user((__force void __user *)(to), \
  253. (void __user *)(from), (n)) \
  254. : -EFAULT)
  255. #define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
  256. #define __copy_from_user_inatomic(to, from, n) \
  257. copy_from_user((to), (from), (n))
  258. extern int __strncpy_user(char *to, const char __user *from, int len);
  259. extern int __strnlen_user(const char __user *sstr, int len);
  260. #define strncpy_from_user(to, from, len) \
  261. (access_ok(VERIFY_READ, from, 1) ? \
  262. __strncpy_user(to, from, len) : -EFAULT)
  263. #define strnlen_user(str, len) \
  264. (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
  265. #endif /* CONFIG_MMU */
  266. /*
  267. * The exception table consists of pairs of addresses: the first is the
  268. * address of an instruction that is allowed to fault, and the second is
  269. * the address at which the program should continue. No registers are
  270. * modified, so it is entirely up to the continuation code to figure out
  271. * what to do.
  272. *
  273. * All the routines below use bits of fixup code that are out of line
  274. * with the main instruction path. This means when everything is well,
  275. * we don't even have to jump over them. Further, they do not intrude
  276. * on our cache or tlb entries.
  277. */
  278. struct exception_table_entry {
  279. unsigned long insn, fixup;
  280. };
  281. #endif /* __ASSEMBLY__ */
  282. #endif /* __KERNEL__ */
  283. #endif /* _ASM_MICROBLAZE_UACCESS_H */