|
@@ -430,25 +430,6 @@ static void inode_write_block(struct logfs_block *block)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static gc_level_t inode_block_level(struct logfs_block *block)
|
|
|
-{
|
|
|
- BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER);
|
|
|
- return GC_LEVEL(LOGFS_MAX_LEVELS);
|
|
|
-}
|
|
|
-
|
|
|
-static gc_level_t indirect_block_level(struct logfs_block *block)
|
|
|
-{
|
|
|
- struct page *page;
|
|
|
- struct inode *inode;
|
|
|
- u64 bix;
|
|
|
- level_t level;
|
|
|
-
|
|
|
- page = block->page;
|
|
|
- inode = page->mapping->host;
|
|
|
- logfs_unpack_index(page->index, &bix, &level);
|
|
|
- return expand_level(inode->i_ino, level);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* This silences a false, yet annoying gcc warning. I hate it when my editor
|
|
|
* jumps into bitops.h each time I recompile this file.
|
|
@@ -587,14 +568,12 @@ static void indirect_free_block(struct super_block *sb,
|
|
|
|
|
|
static struct logfs_block_ops inode_block_ops = {
|
|
|
.write_block = inode_write_block,
|
|
|
- .block_level = inode_block_level,
|
|
|
.free_block = inode_free_block,
|
|
|
.write_alias = inode_write_alias,
|
|
|
};
|
|
|
|
|
|
struct logfs_block_ops indirect_block_ops = {
|
|
|
.write_block = indirect_write_block,
|
|
|
- .block_level = indirect_block_level,
|
|
|
.free_block = indirect_free_block,
|
|
|
.write_alias = indirect_write_alias,
|
|
|
};
|
|
@@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
|
|
|
mempool_free(shadow, super->s_shadow_pool);
|
|
|
}
|
|
|
|
|
|
+static void mark_segment(struct shadow_tree *tree, u32 segno)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!btree_lookup32(&tree->segment_map, segno)) {
|
|
|
+ err = btree_insert32(&tree->segment_map, segno, (void *)1,
|
|
|
+ GFP_NOFS);
|
|
|
+ BUG_ON(err);
|
|
|
+ tree->no_shadowed_segments++;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* fill_shadow_tree - Propagate shadow tree changes due to a write
|
|
|
* @inode: Inode owning the page
|
|
@@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page,
|
|
|
|
|
|
super->s_dirty_used_bytes += shadow->new_len;
|
|
|
super->s_dirty_free_bytes += shadow->old_len;
|
|
|
+ mark_segment(tree, shadow->old_ofs >> super->s_segshift);
|
|
|
+ mark_segment(tree, shadow->new_ofs >> super->s_segshift);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode *inode, u64 size)
|
|
|
return logfs_truncate_direct(inode, size);
|
|
|
}
|
|
|
|
|
|
-int logfs_truncate(struct inode *inode, u64 size)
|
|
|
+/*
|
|
|
+ * Truncate, by changing the segment file, can consume a fair amount
|
|
|
+ * of resources. So back off from time to time and do some GC.
|
|
|
+ * 8 or 2048 blocks should be well within safety limits even if
|
|
|
+ * every single block resided in a different segment.
|
|
|
+ */
|
|
|
+#define TRUNCATE_STEP (8 * 1024 * 1024)
|
|
|
+int logfs_truncate(struct inode *inode, u64 target)
|
|
|
{
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
- int err;
|
|
|
+ u64 size = i_size_read(inode);
|
|
|
+ int err = 0;
|
|
|
|
|
|
- logfs_get_wblocks(sb, NULL, 1);
|
|
|
- err = __logfs_truncate(inode, size);
|
|
|
- if (!err)
|
|
|
- err = __logfs_write_inode(inode, 0);
|
|
|
- logfs_put_wblocks(sb, NULL, 1);
|
|
|
+ size = ALIGN(size, TRUNCATE_STEP);
|
|
|
+ while (size > target) {
|
|
|
+ if (size > TRUNCATE_STEP)
|
|
|
+ size -= TRUNCATE_STEP;
|
|
|
+ else
|
|
|
+ size = 0;
|
|
|
+ if (size < target)
|
|
|
+ size = target;
|
|
|
+
|
|
|
+ logfs_get_wblocks(sb, NULL, 1);
|
|
|
+ err = __logfs_truncate(inode, target);
|
|
|
+ if (!err)
|
|
|
+ err = __logfs_write_inode(inode, 0);
|
|
|
+ logfs_put_wblocks(sb, NULL, 1);
|
|
|
+ }
|
|
|
|
|
|
if (!err)
|
|
|
- err = vmtruncate(inode, size);
|
|
|
+ err = vmtruncate(inode, target);
|
|
|
|
|
|
/* I don't trust error recovery yet. */
|
|
|
WARN_ON(err);
|
|
@@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block *sb)
|
|
|
struct logfs_super *super = logfs_super(sb);
|
|
|
|
|
|
destroy_meta_inode(super->s_segfile_inode);
|
|
|
- if (super->s_block_pool)
|
|
|
- mempool_destroy(super->s_block_pool);
|
|
|
- if (super->s_shadow_pool)
|
|
|
- mempool_destroy(super->s_shadow_pool);
|
|
|
+ logfs_mempool_destroy(super->s_block_pool);
|
|
|
+ logfs_mempool_destroy(super->s_shadow_pool);
|
|
|
}
|