|
@@ -239,12 +239,47 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
|
|
|
do_release_stripe(conf, sh);
|
|
|
}
|
|
|
|
|
|
+/* should hold conf->device_lock already */
|
|
|
+static int release_stripe_list(struct r5conf *conf)
|
|
|
+{
|
|
|
+ struct stripe_head *sh;
|
|
|
+ int count = 0;
|
|
|
+ struct llist_node *head;
|
|
|
+
|
|
|
+ head = llist_del_all(&conf->released_stripes);
|
|
|
+ while (head) {
|
|
|
+ sh = llist_entry(head, struct stripe_head, release_list);
|
|
|
+ head = llist_next(head);
|
|
|
+ /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
|
|
|
+ smp_mb();
|
|
|
+ clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
|
|
|
+ /*
|
|
|
+ * Don't worry the bit is set here, because if the bit is set
|
|
|
+ * again, the count is always > 1. This is true for
|
|
|
+ * STRIPE_ON_UNPLUG_LIST bit too.
|
|
|
+ */
|
|
|
+ __release_stripe(conf, sh);
|
|
|
+ count++;
|
|
|
+ }
|
|
|
+
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
static void release_stripe(struct stripe_head *sh)
|
|
|
{
|
|
|
struct r5conf *conf = sh->raid_conf;
|
|
|
unsigned long flags;
|
|
|
+ bool wakeup;
|
|
|
|
|
|
+ if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
|
|
|
+ goto slow_path;
|
|
|
+ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
|
|
|
+ if (wakeup)
|
|
|
+ md_wakeup_thread(conf->mddev->thread);
|
|
|
+ return;
|
|
|
+slow_path:
|
|
|
local_irq_save(flags);
|
|
|
+ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
|
|
|
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
|
|
|
do_release_stripe(conf, sh);
|
|
|
spin_unlock(&conf->device_lock);
|
|
@@ -491,7 +526,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|
|
|
if (atomic_read(&sh->count)) {
|
|
|
BUG_ON(!list_empty(&sh->lru)
|
|
|
&& !test_bit(STRIPE_EXPANDING, &sh->state)
|
|
|
- && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
|
|
|
+ && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
|
|
|
+ && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
|
|
|
} else {
|
|
|
if (!test_bit(STRIPE_HANDLE, &sh->state))
|
|
|
atomic_inc(&conf->active_stripes);
|
|
@@ -4127,6 +4163,10 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
|
|
|
*/
|
|
|
smp_mb__before_clear_bit();
|
|
|
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
|
|
|
+ /*
|
|
|
+ * STRIPE_ON_RELEASE_LIST could be set here. In that
|
|
|
+ * case, the count is always > 1 here
|
|
|
+ */
|
|
|
__release_stripe(conf, sh);
|
|
|
cnt++;
|
|
|
}
|
|
@@ -4836,7 +4876,9 @@ static void raid5d(struct md_thread *thread)
|
|
|
spin_lock_irq(&conf->device_lock);
|
|
|
while (1) {
|
|
|
struct bio *bio;
|
|
|
- int batch_size;
|
|
|
+ int batch_size, released;
|
|
|
+
|
|
|
+ released = release_stripe_list(conf);
|
|
|
|
|
|
if (
|
|
|
!list_empty(&conf->bitmap_list)) {
|
|
@@ -4861,7 +4903,7 @@ static void raid5d(struct md_thread *thread)
|
|
|
}
|
|
|
|
|
|
batch_size = handle_active_stripes(conf);
|
|
|
- if (!batch_size)
|
|
|
+ if (!batch_size && !released)
|
|
|
break;
|
|
|
handled += batch_size;
|
|
|
|
|
@@ -5176,6 +5218,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
|
|
|
INIT_LIST_HEAD(&conf->delayed_list);
|
|
|
INIT_LIST_HEAD(&conf->bitmap_list);
|
|
|
INIT_LIST_HEAD(&conf->inactive_list);
|
|
|
+ init_llist_head(&conf->released_stripes);
|
|
|
atomic_set(&conf->active_stripes, 0);
|
|
|
atomic_set(&conf->preread_active_stripes, 0);
|
|
|
atomic_set(&conf->active_aligned_reads, 0);
|