Browse Source

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: don't warn about page discards on shutdown
  xfs: use scalable vmap API
  xfs: remove old vmap cache
Linus Torvalds 15 years ago
parent
commit
01d61d0d64
2 changed files with 14 additions and 80 deletions
  1. 10 3
      fs/xfs/linux-2.6/xfs_aops.c
  2. 4 77
      fs/xfs/linux-2.6/xfs_buf.c

+ 10 - 3
fs/xfs/linux-2.6/xfs_aops.c

@@ -932,6 +932,9 @@ xfs_aops_discard_page(
 	if (!xfs_is_delayed_page(page, IOMAP_DELAY))
 	if (!xfs_is_delayed_page(page, IOMAP_DELAY))
 		goto out_invalidate;
 		goto out_invalidate;
 
 
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+		goto out_invalidate;
+
 	xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
 	xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
 		"page discard on page %p, inode 0x%llx, offset %llu.",
 		"page discard on page %p, inode 0x%llx, offset %llu.",
 			page, ip->i_ino, offset);
 			page, ip->i_ino, offset);
@@ -964,8 +967,10 @@ xfs_aops_discard_page(
 
 
 		if (error) {
 		if (error) {
 			/* something screwed, just bail */
 			/* something screwed, just bail */
-			xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
-			"page discard failed delalloc mapping lookup.");
+			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+				xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+				"page discard failed delalloc mapping lookup.");
+			}
 			break;
 			break;
 		}
 		}
 		if (!nimaps) {
 		if (!nimaps) {
@@ -991,8 +996,10 @@ xfs_aops_discard_page(
 		ASSERT(!flist.xbf_count && !flist.xbf_first);
 		ASSERT(!flist.xbf_count && !flist.xbf_first);
 		if (error) {
 		if (error) {
 			/* something screwed, just bail */
 			/* something screwed, just bail */
-			xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+				xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
 			"page discard unable to remove delalloc mapping.");
 			"page discard unable to remove delalloc mapping.");
+			}
 			break;
 			break;
 		}
 		}
 next_buffer:
 next_buffer:

+ 4 - 77
fs/xfs/linux-2.6/xfs_buf.c

@@ -167,75 +167,6 @@ test_page_region(
 	return (mask && (page_private(page) & mask) == mask);
 	return (mask && (page_private(page) & mask) == mask);
 }
 }
 
 
-/*
- *	Mapping of multi-page buffers into contiguous virtual space
- */
-
-typedef struct a_list {
-	void		*vm_addr;
-	struct a_list	*next;
-} a_list_t;
-
-static a_list_t		*as_free_head;
-static int		as_list_len;
-static DEFINE_SPINLOCK(as_lock);
-
-/*
- *	Try to batch vunmaps because they are costly.
- */
-STATIC void
-free_address(
-	void		*addr)
-{
-	a_list_t	*aentry;
-
-#ifdef CONFIG_XEN
-	/*
-	 * Xen needs to be able to make sure it can get an exclusive
-	 * RO mapping of pages it wants to turn into a pagetable.  If
-	 * a newly allocated page is also still being vmap()ed by xfs,
-	 * it will cause pagetable construction to fail.  This is a
-	 * quick workaround to always eagerly unmap pages so that Xen
-	 * is happy.
-	 */
-	vunmap(addr);
-	return;
-#endif
-
-	aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
-	if (likely(aentry)) {
-		spin_lock(&as_lock);
-		aentry->next = as_free_head;
-		aentry->vm_addr = addr;
-		as_free_head = aentry;
-		as_list_len++;
-		spin_unlock(&as_lock);
-	} else {
-		vunmap(addr);
-	}
-}
-
-STATIC void
-purge_addresses(void)
-{
-	a_list_t	*aentry, *old;
-
-	if (as_free_head == NULL)
-		return;
-
-	spin_lock(&as_lock);
-	aentry = as_free_head;
-	as_free_head = NULL;
-	as_list_len = 0;
-	spin_unlock(&as_lock);
-
-	while ((old = aentry) != NULL) {
-		vunmap(aentry->vm_addr);
-		aentry = aentry->next;
-		kfree(old);
-	}
-}
-
 /*
 /*
  *	Internal xfs_buf_t object manipulation
  *	Internal xfs_buf_t object manipulation
  */
  */
@@ -337,7 +268,8 @@ xfs_buf_free(
 		uint		i;
 		uint		i;
 
 
 		if (xfs_buf_is_vmapped(bp))
 		if (xfs_buf_is_vmapped(bp))
-			free_address(bp->b_addr - bp->b_offset);
+			vm_unmap_ram(bp->b_addr - bp->b_offset,
+					bp->b_page_count);
 
 
 		for (i = 0; i < bp->b_page_count; i++) {
 		for (i = 0; i < bp->b_page_count; i++) {
 			struct page	*page = bp->b_pages[i];
 			struct page	*page = bp->b_pages[i];
@@ -457,10 +389,8 @@ _xfs_buf_map_pages(
 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 		bp->b_flags |= XBF_MAPPED;
 		bp->b_flags |= XBF_MAPPED;
 	} else if (flags & XBF_MAPPED) {
 	} else if (flags & XBF_MAPPED) {
-		if (as_list_len > 64)
-			purge_addresses();
-		bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
-					VM_MAP, PAGE_KERNEL);
+		bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+					-1, PAGE_KERNEL);
 		if (unlikely(bp->b_addr == NULL))
 		if (unlikely(bp->b_addr == NULL))
 			return -ENOMEM;
 			return -ENOMEM;
 		bp->b_addr += bp->b_offset;
 		bp->b_addr += bp->b_offset;
@@ -1955,9 +1885,6 @@ xfsbufd(
 			xfs_buf_iostrategy(bp);
 			xfs_buf_iostrategy(bp);
 			count++;
 			count++;
 		}
 		}
-
-		if (as_list_len > 0)
-			purge_addresses();
 		if (count)
 		if (count)
 			blk_run_address_space(target->bt_mapping);
 			blk_run_address_space(target->bt_mapping);