|
@@ -188,29 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
|
|
|
extern long __copy_user_nocache(void *dst, const void __user *src,
|
|
|
unsigned size, int zerorest);
|
|
|
|
|
|
-static inline int __copy_from_user_nocache(void *dst, const void __user *src,
|
|
|
- unsigned size, unsigned long total)
|
|
|
+static inline int
|
|
|
+__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
|
|
|
{
|
|
|
might_sleep();
|
|
|
- /*
|
|
|
- * In practice this limit means that large file write()s
|
|
|
- * which get chunked to 4K copies get handled via
|
|
|
- * non-temporal stores here. Smaller writes get handled
|
|
|
- * via regular __copy_from_user():
|
|
|
- */
|
|
|
- if (likely(total >= PAGE_SIZE))
|
|
|
- return __copy_user_nocache(dst, src, size, 1);
|
|
|
- else
|
|
|
- return __copy_from_user(dst, src, size);
|
|
|
+ return __copy_user_nocache(dst, src, size, 1);
|
|
|
}
|
|
|
|
|
|
-static inline int __copy_from_user_inatomic_nocache(void *dst,
|
|
|
- const void __user *src, unsigned size, unsigned total)
|
|
|
+static inline int
|
|
|
+__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
|
|
|
+ unsigned size)
|
|
|
{
|
|
|
- if (likely(total >= PAGE_SIZE))
|
|
|
- return __copy_user_nocache(dst, src, size, 0);
|
|
|
- else
|
|
|
- return __copy_from_user_inatomic(dst, src, size);
|
|
|
+ return __copy_user_nocache(dst, src, size, 0);
|
|
|
}
|
|
|
|
|
|
unsigned long
|