|
@@ -946,6 +946,85 @@ out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * lock the group_info alloc_sem of all the groups
|
|
|
+ * belonging to the same buddy cache page. This
|
|
|
+ * make sure other parallel operation on the buddy
|
|
|
+ * cache doesn't happen whild holding the buddy cache
|
|
|
+ * lock
|
|
|
+ */
|
|
|
+static int ext4_mb_get_buddy_cache_lock(struct super_block *sb,
|
|
|
+ ext4_group_t group)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ int block, pnum;
|
|
|
+ int blocks_per_page;
|
|
|
+ int groups_per_page;
|
|
|
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
|
|
|
+ ext4_group_t first_group;
|
|
|
+ struct ext4_group_info *grp;
|
|
|
+
|
|
|
+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
|
|
|
+ /*
|
|
|
+ * the buddy cache inode stores the block bitmap
|
|
|
+ * and buddy information in consecutive blocks.
|
|
|
+ * So for each group we need two blocks.
|
|
|
+ */
|
|
|
+ block = group * 2;
|
|
|
+ pnum = block / blocks_per_page;
|
|
|
+ first_group = pnum * blocks_per_page / 2;
|
|
|
+
|
|
|
+ groups_per_page = blocks_per_page >> 1;
|
|
|
+ if (groups_per_page == 0)
|
|
|
+ groups_per_page = 1;
|
|
|
+ /* read all groups the page covers into the cache */
|
|
|
+ for (i = 0; i < groups_per_page; i++) {
|
|
|
+
|
|
|
+ if ((first_group + i) >= ngroups)
|
|
|
+ break;
|
|
|
+ grp = ext4_get_group_info(sb, first_group + i);
|
|
|
+ /* take all groups write allocation
|
|
|
+ * semaphore. This make sure there is
|
|
|
+ * no block allocation going on in any
|
|
|
+ * of that groups
|
|
|
+ */
|
|
|
+ down_write_nested(&grp->alloc_sem, i);
|
|
|
+ }
|
|
|
+ return i;
|
|
|
+}
|
|
|
+
|
|
|
+static void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
|
|
|
+ ext4_group_t group, int locked_group)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ int block, pnum;
|
|
|
+ int blocks_per_page;
|
|
|
+ ext4_group_t first_group;
|
|
|
+ struct ext4_group_info *grp;
|
|
|
+
|
|
|
+ blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
|
|
|
+ /*
|
|
|
+ * the buddy cache inode stores the block bitmap
|
|
|
+ * and buddy information in consecutive blocks.
|
|
|
+ * So for each group we need two blocks.
|
|
|
+ */
|
|
|
+ block = group * 2;
|
|
|
+ pnum = block / blocks_per_page;
|
|
|
+ first_group = pnum * blocks_per_page / 2;
|
|
|
+ /* release locks on all the groups */
|
|
|
+ for (i = 0; i < locked_group; i++) {
|
|
|
+
|
|
|
+ grp = ext4_get_group_info(sb, first_group + i);
|
|
|
+ /* take all groups write allocation
|
|
|
+ * semaphore. This make sure there is
|
|
|
+ * no block allocation going on in any
|
|
|
+ * of that groups
|
|
|
+ */
|
|
|
+ up_write(&grp->alloc_sem);
|
|
|
+ }
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Locking note: This routine calls ext4_mb_init_cache(), which takes the
|
|
|
* block group lock of all groups for this page; do not hold the BG lock when
|
|
@@ -1923,84 +2002,6 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * lock the group_info alloc_sem of all the groups
|
|
|
- * belonging to the same buddy cache page. This
|
|
|
- * make sure other parallel operation on the buddy
|
|
|
- * cache doesn't happen whild holding the buddy cache
|
|
|
- * lock
|
|
|
- */
|
|
|
-int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
|
|
|
-{
|
|
|
- int i;
|
|
|
- int block, pnum;
|
|
|
- int blocks_per_page;
|
|
|
- int groups_per_page;
|
|
|
- ext4_group_t ngroups = ext4_get_groups_count(sb);
|
|
|
- ext4_group_t first_group;
|
|
|
- struct ext4_group_info *grp;
|
|
|
-
|
|
|
- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
|
|
|
- /*
|
|
|
- * the buddy cache inode stores the block bitmap
|
|
|
- * and buddy information in consecutive blocks.
|
|
|
- * So for each group we need two blocks.
|
|
|
- */
|
|
|
- block = group * 2;
|
|
|
- pnum = block / blocks_per_page;
|
|
|
- first_group = pnum * blocks_per_page / 2;
|
|
|
-
|
|
|
- groups_per_page = blocks_per_page >> 1;
|
|
|
- if (groups_per_page == 0)
|
|
|
- groups_per_page = 1;
|
|
|
- /* read all groups the page covers into the cache */
|
|
|
- for (i = 0; i < groups_per_page; i++) {
|
|
|
-
|
|
|
- if ((first_group + i) >= ngroups)
|
|
|
- break;
|
|
|
- grp = ext4_get_group_info(sb, first_group + i);
|
|
|
- /* take all groups write allocation
|
|
|
- * semaphore. This make sure there is
|
|
|
- * no block allocation going on in any
|
|
|
- * of that groups
|
|
|
- */
|
|
|
- down_write_nested(&grp->alloc_sem, i);
|
|
|
- }
|
|
|
- return i;
|
|
|
-}
|
|
|
-
|
|
|
-void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
|
|
|
- ext4_group_t group, int locked_group)
|
|
|
-{
|
|
|
- int i;
|
|
|
- int block, pnum;
|
|
|
- int blocks_per_page;
|
|
|
- ext4_group_t first_group;
|
|
|
- struct ext4_group_info *grp;
|
|
|
-
|
|
|
- blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
|
|
|
- /*
|
|
|
- * the buddy cache inode stores the block bitmap
|
|
|
- * and buddy information in consecutive blocks.
|
|
|
- * So for each group we need two blocks.
|
|
|
- */
|
|
|
- block = group * 2;
|
|
|
- pnum = block / blocks_per_page;
|
|
|
- first_group = pnum * blocks_per_page / 2;
|
|
|
- /* release locks on all the groups */
|
|
|
- for (i = 0; i < locked_group; i++) {
|
|
|
-
|
|
|
- grp = ext4_get_group_info(sb, first_group + i);
|
|
|
- /* take all groups write allocation
|
|
|
- * semaphore. This make sure there is
|
|
|
- * no block allocation going on in any
|
|
|
- * of that groups
|
|
|
- */
|
|
|
- up_write(&grp->alloc_sem);
|
|
|
- }
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
static noinline_for_stack int
|
|
|
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
|
|
{
|