|
@@ -426,7 +426,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
|
|
*/
|
|
*/
|
|
if (((unsigned long)uaddr & PAGE_MASK) !=
|
|
if (((unsigned long)uaddr & PAGE_MASK) !=
|
|
((unsigned long)end & PAGE_MASK))
|
|
((unsigned long)end & PAGE_MASK))
|
|
- ret = __put_user(0, end);
|
|
|
|
|
|
+ ret = __put_user(0, end);
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -445,13 +445,73 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
|
|
|
|
|
|
if (((unsigned long)uaddr & PAGE_MASK) !=
|
|
if (((unsigned long)uaddr & PAGE_MASK) !=
|
|
((unsigned long)end & PAGE_MASK)) {
|
|
((unsigned long)end & PAGE_MASK)) {
|
|
- ret = __get_user(c, end);
|
|
|
|
|
|
+ ret = __get_user(c, end);
|
|
(void)c;
|
|
(void)c;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Multipage variants of the above prefault helpers, useful if more than
|
|
|
|
+ * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
|
|
|
|
+ * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
|
|
|
|
+ * filemap.c hotpaths.
|
|
|
|
+ */
|
|
|
|
+static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+ const char __user *end = uaddr + size - 1;
|
|
|
|
+
|
|
|
|
+ if (unlikely(size == 0))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Writing zeroes into userspace here is OK, because we know that if
|
|
|
|
+ * the zero gets there, we'll be overwriting it.
|
|
|
|
+ */
|
|
|
|
+ while (uaddr <= end) {
|
|
|
|
+ ret = __put_user(0, uaddr);
|
|
|
|
+ if (ret != 0)
|
|
|
|
+ return ret;
|
|
|
|
+ uaddr += PAGE_SIZE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Check whether the range spilled into the next page. */
|
|
|
|
+ if (((unsigned long)uaddr & PAGE_MASK) ==
|
|
|
|
+ ((unsigned long)end & PAGE_MASK))
|
|
|
|
+ ret = __put_user(0, end);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int fault_in_multipages_readable(const char __user *uaddr,
|
|
|
|
+ int size)
|
|
|
|
+{
|
|
|
|
+ volatile char c;
|
|
|
|
+ int ret;
|
|
|
|
+ const char __user *end = uaddr + size - 1;
|
|
|
|
+
|
|
|
|
+ if (unlikely(size == 0))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ while (uaddr <= end) {
|
|
|
|
+ ret = __get_user(c, uaddr);
|
|
|
|
+ if (ret != 0)
|
|
|
|
+ return ret;
|
|
|
|
+ uaddr += PAGE_SIZE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Check whether the range spilled into the next page. */
|
|
|
|
+ if (((unsigned long)uaddr & PAGE_MASK) ==
|
|
|
|
+ ((unsigned long)end & PAGE_MASK)) {
|
|
|
|
+ ret = __get_user(c, end);
|
|
|
|
+ (void)c;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|
pgoff_t index, gfp_t gfp_mask);
|
|
pgoff_t index, gfp_t gfp_mask);
|
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|