uaccess_64.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #ifndef _ASM_X86_UACCESS_64_H
  2. #define _ASM_X86_UACCESS_64_H
  3. /*
  4. * User space memory access functions
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/errno.h>
  8. #include <linux/prefetch.h>
  9. #include <linux/lockdep.h>
  10. #include <asm/page.h>
  11. /*
  12. * Copy To/From Userspace
  13. */
  14. /* Handles exceptions in both to and from, but doesn't do access_ok */
  15. __must_check unsigned long
  16. copy_user_generic(void *to, const void *from, unsigned len);
  17. __must_check unsigned long
  18. copy_to_user(void __user *to, const void *from, unsigned len);
  19. __must_check unsigned long
  20. copy_from_user(void *to, const void __user *from, unsigned len);
  21. __must_check unsigned long
  22. copy_in_user(void __user *to, const void __user *from, unsigned len);
  23. static __always_inline __must_check
  24. int __copy_from_user(void *dst, const void __user *src, unsigned size)
  25. {
  26. int ret = 0;
  27. might_fault();
  28. if (!__builtin_constant_p(size))
  29. return copy_user_generic(dst, (__force void *)src, size);
  30. switch (size) {
  31. case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
  32. ret, "b", "b", "=q", 1);
  33. return ret;
  34. case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
  35. ret, "w", "w", "=r", 2);
  36. return ret;
  37. case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
  38. ret, "l", "k", "=r", 4);
  39. return ret;
  40. case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
  41. ret, "q", "", "=r", 8);
  42. return ret;
  43. case 10:
  44. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  45. ret, "q", "", "=r", 10);
  46. if (unlikely(ret))
  47. return ret;
  48. __get_user_asm(*(u16 *)(8 + (char *)dst),
  49. (u16 __user *)(8 + (char __user *)src),
  50. ret, "w", "w", "=r", 2);
  51. return ret;
  52. case 16:
  53. __get_user_asm(*(u64 *)dst, (u64 __user *)src,
  54. ret, "q", "", "=r", 16);
  55. if (unlikely(ret))
  56. return ret;
  57. __get_user_asm(*(u64 *)(8 + (char *)dst),
  58. (u64 __user *)(8 + (char __user *)src),
  59. ret, "q", "", "=r", 8);
  60. return ret;
  61. default:
  62. return copy_user_generic(dst, (__force void *)src, size);
  63. }
  64. }
  65. static __always_inline __must_check
  66. int __copy_to_user(void __user *dst, const void *src, unsigned size)
  67. {
  68. int ret = 0;
  69. might_fault();
  70. if (!__builtin_constant_p(size))
  71. return copy_user_generic((__force void *)dst, src, size);
  72. switch (size) {
  73. case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
  74. ret, "b", "b", "iq", 1);
  75. return ret;
  76. case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
  77. ret, "w", "w", "ir", 2);
  78. return ret;
  79. case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
  80. ret, "l", "k", "ir", 4);
  81. return ret;
  82. case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
  83. ret, "q", "", "ir", 8);
  84. return ret;
  85. case 10:
  86. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  87. ret, "q", "", "ir", 10);
  88. if (unlikely(ret))
  89. return ret;
  90. asm("":::"memory");
  91. __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
  92. ret, "w", "w", "ir", 2);
  93. return ret;
  94. case 16:
  95. __put_user_asm(*(u64 *)src, (u64 __user *)dst,
  96. ret, "q", "", "ir", 16);
  97. if (unlikely(ret))
  98. return ret;
  99. asm("":::"memory");
  100. __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
  101. ret, "q", "", "ir", 8);
  102. return ret;
  103. default:
  104. return copy_user_generic((__force void *)dst, src, size);
  105. }
  106. }
  107. static __always_inline __must_check
  108. int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
  109. {
  110. int ret = 0;
  111. might_fault();
  112. if (!__builtin_constant_p(size))
  113. return copy_user_generic((__force void *)dst,
  114. (__force void *)src, size);
  115. switch (size) {
  116. case 1: {
  117. u8 tmp;
  118. __get_user_asm(tmp, (u8 __user *)src,
  119. ret, "b", "b", "=q", 1);
  120. if (likely(!ret))
  121. __put_user_asm(tmp, (u8 __user *)dst,
  122. ret, "b", "b", "iq", 1);
  123. return ret;
  124. }
  125. case 2: {
  126. u16 tmp;
  127. __get_user_asm(tmp, (u16 __user *)src,
  128. ret, "w", "w", "=r", 2);
  129. if (likely(!ret))
  130. __put_user_asm(tmp, (u16 __user *)dst,
  131. ret, "w", "w", "ir", 2);
  132. return ret;
  133. }
  134. case 4: {
  135. u32 tmp;
  136. __get_user_asm(tmp, (u32 __user *)src,
  137. ret, "l", "k", "=r", 4);
  138. if (likely(!ret))
  139. __put_user_asm(tmp, (u32 __user *)dst,
  140. ret, "l", "k", "ir", 4);
  141. return ret;
  142. }
  143. case 8: {
  144. u64 tmp;
  145. __get_user_asm(tmp, (u64 __user *)src,
  146. ret, "q", "", "=r", 8);
  147. if (likely(!ret))
  148. __put_user_asm(tmp, (u64 __user *)dst,
  149. ret, "q", "", "ir", 8);
  150. return ret;
  151. }
  152. default:
  153. return copy_user_generic((__force void *)dst,
  154. (__force void *)src, size);
  155. }
  156. }
  157. __must_check long
  158. strncpy_from_user(char *dst, const char __user *src, long count);
  159. __must_check long
  160. __strncpy_from_user(char *dst, const char __user *src, long count);
  161. __must_check long strnlen_user(const char __user *str, long n);
  162. __must_check long __strnlen_user(const char __user *str, long n);
  163. __must_check long strlen_user(const char __user *str);
  164. __must_check unsigned long clear_user(void __user *mem, unsigned long len);
  165. __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
  166. __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
  167. unsigned size);
  168. static __must_check __always_inline int
  169. __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
  170. {
  171. return copy_user_generic((__force void *)dst, src, size);
  172. }
  173. extern long __copy_user_nocache(void *dst, const void __user *src,
  174. unsigned size, int zerorest);
  175. static inline int
  176. __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
  177. {
  178. might_sleep();
  179. return __copy_user_nocache(dst, src, size, 1);
  180. }
  181. static inline int
  182. __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
  183. unsigned size)
  184. {
  185. return __copy_user_nocache(dst, src, size, 0);
  186. }
  187. unsigned long
  188. copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
  189. #endif /* _ASM_X86_UACCESS_64_H */