|
@@ -146,6 +146,24 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
|
|
|
return truncate_complete_page(mapping, page);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Safely invalidate one page from its pagecache mapping.
|
|
|
+ * It only drops clean, unused pages. The page must be locked.
|
|
|
+ *
|
|
|
+ * Returns 1 if the page is successfully invalidated, otherwise 0.
|
|
|
+ */
|
|
|
+int invalidate_inode_page(struct page *page)
|
|
|
+{
|
|
|
+ struct address_space *mapping = page_mapping(page);
|
|
|
+ if (!mapping)
|
|
|
+ return 0;
|
|
|
+ if (PageDirty(page) || PageWriteback(page))
|
|
|
+ return 0;
|
|
|
+ if (page_mapped(page))
|
|
|
+ return 0;
|
|
|
+ return invalidate_complete_page(mapping, page);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* truncate_inode_pages - truncate range of pages specified by start & end byte offsets
|
|
|
* @mapping: mapping to truncate
|
|
@@ -312,12 +330,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
|
|
if (lock_failed)
|
|
|
continue;
|
|
|
|
|
|
- if (PageDirty(page) || PageWriteback(page))
|
|
|
- goto unlock;
|
|
|
- if (page_mapped(page))
|
|
|
- goto unlock;
|
|
|
- ret += invalidate_complete_page(mapping, page);
|
|
|
-unlock:
|
|
|
+ ret += invalidate_inode_page(page);
|
|
|
+
|
|
|
unlock_page(page);
|
|
|
if (next > end)
|
|
|
break;
|