|
@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
|
|
|
EXPORT_SYMBOL(sync_page_range);
|
|
|
|
|
|
/**
|
|
|
- * sync_page_range_nolock
|
|
|
+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking
|
|
|
* @inode: target inode
|
|
|
* @mapping: target address_space
|
|
|
* @pos: beginning offset in pages to write
|
|
@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page)
|
|
|
sync_page_killable, TASK_KILLABLE);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
+/**
|
|
|
+ * __lock_page_nosync - get a lock on the page, without calling sync_page()
|
|
|
+ * @page: the page to lock
|
|
|
+ *
|
|
|
* Variant of lock_page that does not require the caller to hold a reference
|
|
|
* on the page's mapping.
|
|
|
*/
|
|
@@ -1538,9 +1541,20 @@ repeat:
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
+/**
|
|
|
+ * read_cache_page_async - read into page cache, fill it if needed
|
|
|
+ * @mapping: the page's address_space
|
|
|
+ * @index: the page index
|
|
|
+ * @filler: function to perform the read
|
|
|
+ * @data: destination for read data
|
|
|
+ *
|
|
|
* Same as read_cache_page, but don't wait for page to become unlocked
|
|
|
* after submitting it to the filler.
|
|
|
+ *
|
|
|
+ * Read into the page cache. If a page already exists, and PageUptodate() is
|
|
|
+ * not set, try to fill the page but don't wait for it to become unlocked.
|
|
|
+ *
|
|
|
+ * If the page does not get brought uptodate, return -EIO.
|
|
|
*/
|
|
|
struct page *read_cache_page_async(struct address_space *mapping,
|
|
|
pgoff_t index,
|