Browse Source

[XFS] use scalable vmap API

Implement XFS's large buffer support with the new vmap APIs. See the vmap
rewrite (db64fe02) for some numbers. The biggest improvement that comes from
using the new APIs is avoiding the global KVA allocation lock on every call.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Reviewed-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Nick Piggin 16 năm trước cách đây
mục cha
commit
0087167c9d
1 tập tin đã thay đổi với 3 bổ sung3 xóa
  1. 3 3
      fs/xfs/linux-2.6/xfs_buf.c

+ 3 - 3
fs/xfs/linux-2.6/xfs_buf.c

@@ -264,7 +264,7 @@ xfs_buf_free(
 		uint		i;
 		uint		i;
 
 
 		if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
 		if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
-                       vunmap(bp->b_addr - bp->b_offset);
+                       vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
 
 
 		for (i = 0; i < bp->b_page_count; i++) {
 		for (i = 0; i < bp->b_page_count; i++) {
 			struct page	*page = bp->b_pages[i];
 			struct page	*page = bp->b_pages[i];
@@ -386,8 +386,8 @@ _xfs_buf_map_pages(
 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 		bp->b_flags |= XBF_MAPPED;
 		bp->b_flags |= XBF_MAPPED;
 	} else if (flags & XBF_MAPPED) {
 	} else if (flags & XBF_MAPPED) {
-		bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
-					VM_MAP, PAGE_KERNEL);
+               bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+                                       -1, PAGE_KERNEL);
 		if (unlikely(bp->b_addr == NULL))
 		if (unlikely(bp->b_addr == NULL))
 			return -ENOMEM;
 			return -ENOMEM;
 		bp->b_addr += bp->b_offset;
 		bp->b_addr += bp->b_offset;