|
@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue;
|
|
|
#define xfs_buf_deallocate(bp) \
|
|
|
kmem_zone_free(xfs_buf_zone, (bp));
|
|
|
|
|
|
+static inline int
|
|
|
+xfs_buf_is_vmapped(
|
|
|
+ struct xfs_buf *bp)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Return true if the buffer is vmapped.
|
|
|
+ *
|
|
|
+ * The XBF_MAPPED flag is set if the buffer should be mapped, but the
|
|
|
+ * code is clever enough to know it doesn't have to map a single page,
|
|
|
+ * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
|
|
|
+ */
|
|
|
+ return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int
|
|
|
+xfs_buf_vmap_len(
|
|
|
+ struct xfs_buf *bp)
|
|
|
+{
|
|
|
+ return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Page Region interfaces.
|
|
|
*
|
|
@@ -314,7 +335,7 @@ xfs_buf_free(
|
|
|
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
|
|
|
uint i;
|
|
|
|
|
|
- if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
|
|
|
+ if (xfs_buf_is_vmapped(bp))
|
|
|
free_address(bp->b_addr - bp->b_offset);
|
|
|
|
|
|
for (i = 0; i < bp->b_page_count; i++) {
|
|
@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io(
|
|
|
|
|
|
xfs_buf_ioerror(bp, -error);
|
|
|
|
|
|
+ if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
|
|
|
+ invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
|
|
|
+
|
|
|
do {
|
|
|
struct page *page = bvec->bv_page;
|
|
|
|
|
@@ -1216,6 +1240,10 @@ next_chunk:
|
|
|
|
|
|
submit_io:
|
|
|
if (likely(bio->bi_size)) {
|
|
|
+ if (xfs_buf_is_vmapped(bp)) {
|
|
|
+ flush_kernel_vmap_range(bp->b_addr,
|
|
|
+ xfs_buf_vmap_len(bp));
|
|
|
+ }
|
|
|
submit_bio(rw, bio);
|
|
|
if (size)
|
|
|
goto next_chunk;
|