uaccess.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * Based on arch/arm/include/asm/uaccess.h
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #ifndef __ASM_UACCESS_H
  19. #define __ASM_UACCESS_H
  20. /*
  21. * User space memory access functions
  22. */
  23. #include <linux/string.h>
  24. #include <linux/thread_info.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/errno.h>
  27. #include <asm/memory.h>
  28. #include <asm/compiler.h>
  29. #define VERIFY_READ 0
  30. #define VERIFY_WRITE 1
  31. /*
  32. * The exception table consists of pairs of addresses: the first is the
  33. * address of an instruction that is allowed to fault, and the second is
  34. * the address at which the program should continue. No registers are
  35. * modified, so it is entirely up to the continuation code to figure out
  36. * what to do.
  37. *
  38. * All the routines below use bits of fixup code that are out of line
  39. * with the main instruction path. This means when everything is well,
  40. * we don't even have to jump over them. Further, they do not intrude
  41. * on our cache or tlb entries.
  42. */
  43. struct exception_table_entry
  44. {
  45. unsigned long insn, fixup;
  46. };
  47. extern int fixup_exception(struct pt_regs *regs);
  48. #define KERNEL_DS (-1UL)
  49. #define get_ds() (KERNEL_DS)
  50. #define USER_DS TASK_SIZE_64
  51. #define get_fs() (current_thread_info()->addr_limit)
  52. static inline void set_fs(mm_segment_t fs)
  53. {
  54. current_thread_info()->addr_limit = fs;
  55. }
  56. #define segment_eq(a,b) ((a) == (b))
  57. /*
  58. * Return 1 if addr < current->addr_limit, 0 otherwise.
  59. */
  60. #define __addr_ok(addr) \
  61. ({ \
  62. unsigned long flag; \
  63. asm("cmp %1, %0; cset %0, lo" \
  64. : "=&r" (flag) \
  65. : "r" (addr), "0" (current_thread_info()->addr_limit) \
  66. : "cc"); \
  67. flag; \
  68. })
  69. /*
  70. * Test whether a block of memory is a valid user space address.
  71. * Returns 1 if the range is valid, 0 otherwise.
  72. *
  73. * This is equivalent to the following test:
  74. * (u65)addr + (u65)size < (u65)current->addr_limit
  75. *
  76. * This needs 65-bit arithmetic.
  77. */
  78. #define __range_ok(addr, size) \
  79. ({ \
  80. unsigned long flag, roksum; \
  81. __chk_user_ptr(addr); \
  82. asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \
  83. : "=&r" (flag), "=&r" (roksum) \
  84. : "1" (addr), "Ir" (size), \
  85. "r" (current_thread_info()->addr_limit) \
  86. : "cc"); \
  87. flag; \
  88. })
  89. #define access_ok(type, addr, size) __range_ok(addr, size)
  90. /*
  91. * The "__xxx" versions of the user access functions do not verify the address
  92. * space - it must have been done previously with a separate "access_ok()"
  93. * call.
  94. *
  95. * The "__xxx_error" versions set the third argument to -EFAULT if an error
  96. * occurs, and leave it unchanged on success.
  97. */
  98. #define __get_user_asm(instr, reg, x, addr, err) \
  99. asm volatile( \
  100. "1: " instr " " reg "1, [%2]\n" \
  101. "2:\n" \
  102. " .section .fixup, \"ax\"\n" \
  103. " .align 2\n" \
  104. "3: mov %w0, %3\n" \
  105. " mov %1, #0\n" \
  106. " b 2b\n" \
  107. " .previous\n" \
  108. " .section __ex_table,\"a\"\n" \
  109. " .align 3\n" \
  110. " .quad 1b, 3b\n" \
  111. " .previous" \
  112. : "+r" (err), "=&r" (x) \
  113. : "r" (addr), "i" (-EFAULT))
  114. #define __get_user_err(x, ptr, err) \
  115. do { \
  116. unsigned long __gu_val; \
  117. __chk_user_ptr(ptr); \
  118. switch (sizeof(*(ptr))) { \
  119. case 1: \
  120. __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
  121. break; \
  122. case 2: \
  123. __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \
  124. break; \
  125. case 4: \
  126. __get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \
  127. break; \
  128. case 8: \
  129. __get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \
  130. break; \
  131. default: \
  132. BUILD_BUG(); \
  133. } \
  134. (x) = (__typeof__(*(ptr)))__gu_val; \
  135. } while (0)
  136. #define __get_user(x, ptr) \
  137. ({ \
  138. int __gu_err = 0; \
  139. __get_user_err((x), (ptr), __gu_err); \
  140. __gu_err; \
  141. })
  142. #define __get_user_error(x, ptr, err) \
  143. ({ \
  144. __get_user_err((x), (ptr), (err)); \
  145. (void)0; \
  146. })
  147. #define __get_user_unaligned __get_user
  148. #define get_user(x, ptr) \
  149. ({ \
  150. might_sleep(); \
  151. access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \
  152. __get_user((x), (ptr)) : \
  153. ((x) = 0, -EFAULT); \
  154. })
  155. #define __put_user_asm(instr, reg, x, addr, err) \
  156. asm volatile( \
  157. "1: " instr " " reg "1, [%2]\n" \
  158. "2:\n" \
  159. " .section .fixup,\"ax\"\n" \
  160. " .align 2\n" \
  161. "3: mov %w0, %3\n" \
  162. " b 2b\n" \
  163. " .previous\n" \
  164. " .section __ex_table,\"a\"\n" \
  165. " .align 3\n" \
  166. " .quad 1b, 3b\n" \
  167. " .previous" \
  168. : "+r" (err) \
  169. : "r" (x), "r" (addr), "i" (-EFAULT))
  170. #define __put_user_err(x, ptr, err) \
  171. do { \
  172. __typeof__(*(ptr)) __pu_val = (x); \
  173. __chk_user_ptr(ptr); \
  174. switch (sizeof(*(ptr))) { \
  175. case 1: \
  176. __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
  177. break; \
  178. case 2: \
  179. __put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \
  180. break; \
  181. case 4: \
  182. __put_user_asm("str", "%w", __pu_val, (ptr), (err)); \
  183. break; \
  184. case 8: \
  185. __put_user_asm("str", "%", __pu_val, (ptr), (err)); \
  186. break; \
  187. default: \
  188. BUILD_BUG(); \
  189. } \
  190. } while (0)
  191. #define __put_user(x, ptr) \
  192. ({ \
  193. int __pu_err = 0; \
  194. __put_user_err((x), (ptr), __pu_err); \
  195. __pu_err; \
  196. })
  197. #define __put_user_error(x, ptr, err) \
  198. ({ \
  199. __put_user_err((x), (ptr), (err)); \
  200. (void)0; \
  201. })
  202. #define __put_user_unaligned __put_user
  203. #define put_user(x, ptr) \
  204. ({ \
  205. might_sleep(); \
  206. access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
  207. __put_user((x), (ptr)) : \
  208. -EFAULT; \
  209. })
  210. extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
  211. extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
  212. extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
  213. extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
  214. extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
  215. extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
  216. static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
  217. {
  218. if (access_ok(VERIFY_READ, from, n))
  219. n = __copy_from_user(to, from, n);
  220. else /* security hole - plug it */
  221. memset(to, 0, n);
  222. return n;
  223. }
  224. static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
  225. {
  226. if (access_ok(VERIFY_WRITE, to, n))
  227. n = __copy_to_user(to, from, n);
  228. return n;
  229. }
  230. static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
  231. {
  232. if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
  233. n = __copy_in_user(to, from, n);
  234. return n;
  235. }
  236. #define __copy_to_user_inatomic __copy_to_user
  237. #define __copy_from_user_inatomic __copy_from_user
  238. static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
  239. {
  240. if (access_ok(VERIFY_WRITE, to, n))
  241. n = __clear_user(to, n);
  242. return n;
  243. }
  244. static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
  245. {
  246. long res = -EFAULT;
  247. if (access_ok(VERIFY_READ, src, 1))
  248. res = __strncpy_from_user(dst, src, count);
  249. return res;
  250. }
  251. #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
  252. static inline long __must_check strnlen_user(const char __user *s, long n)
  253. {
  254. unsigned long res = 0;
  255. if (__addr_ok(s))
  256. res = __strnlen_user(s, n);
  257. return res;
  258. }
  259. #endif /* __ASM_UACCESS_H */