uaccess_32.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #ifndef _ASM_X86_UACCESS_32_H
  2. #define _ASM_X86_UACCESS_32_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/errno.h>
  7. #include <linux/thread_info.h>
  8. #include <linux/prefetch.h>
  9. #include <linux/string.h>
  10. #include <asm/asm.h>
  11. #include <asm/page.h>
  12. unsigned long __must_check __copy_to_user_ll
  13. (void __user *to, const void *from, unsigned long n);
  14. unsigned long __must_check __copy_from_user_ll
  15. (void *to, const void __user *from, unsigned long n);
  16. unsigned long __must_check __copy_from_user_ll_nozero
  17. (void *to, const void __user *from, unsigned long n);
  18. unsigned long __must_check __copy_from_user_ll_nocache
  19. (void *to, const void __user *from, unsigned long n);
  20. unsigned long __must_check __copy_from_user_ll_nocache_nozero
  21. (void *to, const void __user *from, unsigned long n);
  22. /**
  23. * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
  24. * @to: Destination address, in user space.
  25. * @from: Source address, in kernel space.
  26. * @n: Number of bytes to copy.
  27. *
  28. * Context: User context only.
  29. *
  30. * Copy data from kernel space to user space. Caller must check
  31. * the specified block with access_ok() before calling this function.
  32. * The caller should also make sure he pins the user space address
  33. * so that the we don't result in page fault and sleep.
  34. *
  35. * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
  36. * we return the initial request size (1, 2 or 4), as copy_*_user should do.
  37. * If a store crosses a page boundary and gets a fault, the x86 will not write
  38. * anything, so this is accurate.
  39. */
  40. static __always_inline unsigned long __must_check
  41. __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
  42. {
  43. if (__builtin_constant_p(n)) {
  44. unsigned long ret;
  45. switch (n) {
  46. case 1:
  47. __put_user_size(*(u8 *)from, (u8 __user *)to,
  48. 1, ret, 1);
  49. return ret;
  50. case 2:
  51. __put_user_size(*(u16 *)from, (u16 __user *)to,
  52. 2, ret, 2);
  53. return ret;
  54. case 4:
  55. __put_user_size(*(u32 *)from, (u32 __user *)to,
  56. 4, ret, 4);
  57. return ret;
  58. }
  59. }
  60. return __copy_to_user_ll(to, from, n);
  61. }
  62. /**
  63. * __copy_to_user: - Copy a block of data into user space, with less checking.
  64. * @to: Destination address, in user space.
  65. * @from: Source address, in kernel space.
  66. * @n: Number of bytes to copy.
  67. *
  68. * Context: User context only. This function may sleep.
  69. *
  70. * Copy data from kernel space to user space. Caller must check
  71. * the specified block with access_ok() before calling this function.
  72. *
  73. * Returns number of bytes that could not be copied.
  74. * On success, this will be zero.
  75. */
  76. static __always_inline unsigned long __must_check
  77. __copy_to_user(void __user *to, const void *from, unsigned long n)
  78. {
  79. might_fault();
  80. return __copy_to_user_inatomic(to, from, n);
  81. }
  82. static __always_inline unsigned long
  83. __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
  84. {
  85. /* Avoid zeroing the tail if the copy fails..
  86. * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
  87. * but as the zeroing behaviour is only significant when n is not
  88. * constant, that shouldn't be a problem.
  89. */
  90. if (__builtin_constant_p(n)) {
  91. unsigned long ret;
  92. switch (n) {
  93. case 1:
  94. __get_user_size(*(u8 *)to, from, 1, ret, 1);
  95. return ret;
  96. case 2:
  97. __get_user_size(*(u16 *)to, from, 2, ret, 2);
  98. return ret;
  99. case 4:
  100. __get_user_size(*(u32 *)to, from, 4, ret, 4);
  101. return ret;
  102. }
  103. }
  104. return __copy_from_user_ll_nozero(to, from, n);
  105. }
  106. /**
  107. * __copy_from_user: - Copy a block of data from user space, with less checking.
  108. * @to: Destination address, in kernel space.
  109. * @from: Source address, in user space.
  110. * @n: Number of bytes to copy.
  111. *
  112. * Context: User context only. This function may sleep.
  113. *
  114. * Copy data from user space to kernel space. Caller must check
  115. * the specified block with access_ok() before calling this function.
  116. *
  117. * Returns number of bytes that could not be copied.
  118. * On success, this will be zero.
  119. *
  120. * If some data could not be copied, this function will pad the copied
  121. * data to the requested size using zero bytes.
  122. *
  123. * An alternate version - __copy_from_user_inatomic() - may be called from
  124. * atomic context and will fail rather than sleep. In this case the
  125. * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
  126. * for explanation of why this is needed.
  127. */
  128. static __always_inline unsigned long
  129. __copy_from_user(void *to, const void __user *from, unsigned long n)
  130. {
  131. might_fault();
  132. if (__builtin_constant_p(n)) {
  133. unsigned long ret;
  134. switch (n) {
  135. case 1:
  136. __get_user_size(*(u8 *)to, from, 1, ret, 1);
  137. return ret;
  138. case 2:
  139. __get_user_size(*(u16 *)to, from, 2, ret, 2);
  140. return ret;
  141. case 4:
  142. __get_user_size(*(u32 *)to, from, 4, ret, 4);
  143. return ret;
  144. }
  145. }
  146. return __copy_from_user_ll(to, from, n);
  147. }
  148. static __always_inline unsigned long __copy_from_user_nocache(void *to,
  149. const void __user *from, unsigned long n)
  150. {
  151. might_fault();
  152. if (__builtin_constant_p(n)) {
  153. unsigned long ret;
  154. switch (n) {
  155. case 1:
  156. __get_user_size(*(u8 *)to, from, 1, ret, 1);
  157. return ret;
  158. case 2:
  159. __get_user_size(*(u16 *)to, from, 2, ret, 2);
  160. return ret;
  161. case 4:
  162. __get_user_size(*(u32 *)to, from, 4, ret, 4);
  163. return ret;
  164. }
  165. }
  166. return __copy_from_user_ll_nocache(to, from, n);
  167. }
  168. static __always_inline unsigned long
  169. __copy_from_user_inatomic_nocache(void *to, const void __user *from,
  170. unsigned long n)
  171. {
  172. return __copy_from_user_ll_nocache_nozero(to, from, n);
  173. }
  174. unsigned long __must_check copy_to_user(void __user *to,
  175. const void *from, unsigned long n);
  176. unsigned long __must_check copy_from_user(void *to,
  177. const void __user *from,
  178. unsigned long n);
  179. long __must_check strncpy_from_user(char *dst, const char __user *src,
  180. long count);
  181. long __must_check __strncpy_from_user(char *dst,
  182. const char __user *src, long count);
  183. /**
  184. * strlen_user: - Get the size of a string in user space.
  185. * @str: The string to measure.
  186. *
  187. * Context: User context only. This function may sleep.
  188. *
  189. * Get the size of a NUL-terminated string in user space.
  190. *
  191. * Returns the size of the string INCLUDING the terminating NUL.
  192. * On exception, returns 0.
  193. *
  194. * If there is a limit on the length of a valid string, you may wish to
  195. * consider using strnlen_user() instead.
  196. */
  197. #define strlen_user(str) strnlen_user(str, LONG_MAX)
  198. long strnlen_user(const char __user *str, long n);
  199. unsigned long __must_check clear_user(void __user *mem, unsigned long len);
  200. unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
  201. #endif /* _ASM_X86_UACCESS_32_H */