uaccess_32.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #ifndef __i386_UACCESS_H
  2. #define __i386_UACCESS_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/errno.h>
  7. #include <linux/thread_info.h>
  8. #include <linux/prefetch.h>
  9. #include <linux/string.h>
  10. #include <asm/asm.h>
  11. #include <asm/page.h>
  12. unsigned long __must_check __copy_to_user_ll
  13. (void __user *to, const void *from, unsigned long n);
  14. unsigned long __must_check __copy_from_user_ll
  15. (void *to, const void __user *from, unsigned long n);
  16. unsigned long __must_check __copy_from_user_ll_nozero
  17. (void *to, const void __user *from, unsigned long n);
  18. unsigned long __must_check __copy_from_user_ll_nocache
  19. (void *to, const void __user *from, unsigned long n);
  20. unsigned long __must_check __copy_from_user_ll_nocache_nozero
  21. (void *to, const void __user *from, unsigned long n);
  22. /**
  23. * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
  24. * @to: Destination address, in user space.
  25. * @from: Source address, in kernel space.
  26. * @n: Number of bytes to copy.
  27. *
  28. * Context: User context only.
  29. *
  30. * Copy data from kernel space to user space. Caller must check
  31. * the specified block with access_ok() before calling this function.
  32. * The caller should also make sure he pins the user space address
  33. * so that the we don't result in page fault and sleep.
  34. *
  35. * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
  36. * we return the initial request size (1, 2 or 4), as copy_*_user should do.
  37. * If a store crosses a page boundary and gets a fault, the x86 will not write
  38. * anything, so this is accurate.
  39. */
  40. static __always_inline unsigned long __must_check
  41. __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
  42. {
  43. if (__builtin_constant_p(n)) {
  44. unsigned long ret;
  45. switch (n) {
  46. case 1:
  47. __put_user_size(*(u8 *)from, (u8 __user *)to,
  48. 1, ret, 1);
  49. return ret;
  50. case 2:
  51. __put_user_size(*(u16 *)from, (u16 __user *)to,
  52. 2, ret, 2);
  53. return ret;
  54. case 4:
  55. __put_user_size(*(u32 *)from, (u32 __user *)to,
  56. 4, ret, 4);
  57. return ret;
  58. }
  59. }
  60. return __copy_to_user_ll(to, from, n);
  61. }
  62. /**
  63. * __copy_to_user: - Copy a block of data into user space, with less checking.
  64. * @to: Destination address, in user space.
  65. * @from: Source address, in kernel space.
  66. * @n: Number of bytes to copy.
  67. *
  68. * Context: User context only. This function may sleep.
  69. *
  70. * Copy data from kernel space to user space. Caller must check
  71. * the specified block with access_ok() before calling this function.
  72. *
  73. * Returns number of bytes that could not be copied.
  74. * On success, this will be zero.
  75. */
  76. static __always_inline unsigned long __must_check
  77. __copy_to_user(void __user *to, const void *from, unsigned long n)
  78. {
  79. might_sleep();
  80. if (current->mm)
  81. might_lock_read(&current->mm->mmap_sem);
  82. return __copy_to_user_inatomic(to, from, n);
  83. }
  84. static __always_inline unsigned long
  85. __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
  86. {
  87. /* Avoid zeroing the tail if the copy fails..
  88. * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
  89. * but as the zeroing behaviour is only significant when n is not
  90. * constant, that shouldn't be a problem.
  91. */
  92. if (__builtin_constant_p(n)) {
  93. unsigned long ret;
  94. switch (n) {
  95. case 1:
  96. __get_user_size(*(u8 *)to, from, 1, ret, 1);
  97. return ret;
  98. case 2:
  99. __get_user_size(*(u16 *)to, from, 2, ret, 2);
  100. return ret;
  101. case 4:
  102. __get_user_size(*(u32 *)to, from, 4, ret, 4);
  103. return ret;
  104. }
  105. }
  106. return __copy_from_user_ll_nozero(to, from, n);
  107. }
  108. /**
  109. * __copy_from_user: - Copy a block of data from user space, with less checking.
  110. * @to: Destination address, in kernel space.
  111. * @from: Source address, in user space.
  112. * @n: Number of bytes to copy.
  113. *
  114. * Context: User context only. This function may sleep.
  115. *
  116. * Copy data from user space to kernel space. Caller must check
  117. * the specified block with access_ok() before calling this function.
  118. *
  119. * Returns number of bytes that could not be copied.
  120. * On success, this will be zero.
  121. *
  122. * If some data could not be copied, this function will pad the copied
  123. * data to the requested size using zero bytes.
  124. *
  125. * An alternate version - __copy_from_user_inatomic() - may be called from
  126. * atomic context and will fail rather than sleep. In this case the
  127. * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
  128. * for explanation of why this is needed.
  129. */
  130. static __always_inline unsigned long
  131. __copy_from_user(void *to, const void __user *from, unsigned long n)
  132. {
  133. might_sleep();
  134. if (current->mm)
  135. might_lock_read(&current->mm->mmap_sem);
  136. if (__builtin_constant_p(n)) {
  137. unsigned long ret;
  138. switch (n) {
  139. case 1:
  140. __get_user_size(*(u8 *)to, from, 1, ret, 1);
  141. return ret;
  142. case 2:
  143. __get_user_size(*(u16 *)to, from, 2, ret, 2);
  144. return ret;
  145. case 4:
  146. __get_user_size(*(u32 *)to, from, 4, ret, 4);
  147. return ret;
  148. }
  149. }
  150. return __copy_from_user_ll(to, from, n);
  151. }
  152. static __always_inline unsigned long __copy_from_user_nocache(void *to,
  153. const void __user *from, unsigned long n)
  154. {
  155. might_sleep();
  156. if (current->mm)
  157. might_lock_read(&current->mm->mmap_sem);
  158. if (__builtin_constant_p(n)) {
  159. unsigned long ret;
  160. switch (n) {
  161. case 1:
  162. __get_user_size(*(u8 *)to, from, 1, ret, 1);
  163. return ret;
  164. case 2:
  165. __get_user_size(*(u16 *)to, from, 2, ret, 2);
  166. return ret;
  167. case 4:
  168. __get_user_size(*(u32 *)to, from, 4, ret, 4);
  169. return ret;
  170. }
  171. }
  172. return __copy_from_user_ll_nocache(to, from, n);
  173. }
  174. static __always_inline unsigned long
  175. __copy_from_user_inatomic_nocache(void *to, const void __user *from,
  176. unsigned long n)
  177. {
  178. return __copy_from_user_ll_nocache_nozero(to, from, n);
  179. }
  180. unsigned long __must_check copy_to_user(void __user *to,
  181. const void *from, unsigned long n);
  182. unsigned long __must_check copy_from_user(void *to,
  183. const void __user *from,
  184. unsigned long n);
  185. long __must_check strncpy_from_user(char *dst, const char __user *src,
  186. long count);
  187. long __must_check __strncpy_from_user(char *dst,
  188. const char __user *src, long count);
  189. /**
  190. * strlen_user: - Get the size of a string in user space.
  191. * @str: The string to measure.
  192. *
  193. * Context: User context only. This function may sleep.
  194. *
  195. * Get the size of a NUL-terminated string in user space.
  196. *
  197. * Returns the size of the string INCLUDING the terminating NUL.
  198. * On exception, returns 0.
  199. *
  200. * If there is a limit on the length of a valid string, you may wish to
  201. * consider using strnlen_user() instead.
  202. */
  203. #define strlen_user(str) strnlen_user(str, LONG_MAX)
  204. long strnlen_user(const char __user *str, long n);
  205. unsigned long __must_check clear_user(void __user *mem, unsigned long len);
  206. unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
  207. #endif /* __i386_UACCESS_H */