|
@@ -445,6 +445,61 @@ void md_flush_request(mddev_t *mddev, struct bio *bio)
|
|
|
}
|
|
|
EXPORT_SYMBOL(md_flush_request);
|
|
|
|
|
|
+/* Support for plugging.
|
|
|
+ * This mirrors the plugging support in request_queue, but does not
|
|
|
+ * require having a whole queue or request structures.
|
|
|
+ * We allocate an md_plug_cb for each md device and each thread it gets
|
|
|
+ * plugged on. This links tot the private plug_handle structure in the
|
|
|
+ * personality data where we keep a count of the number of outstanding
|
|
|
+ * plugs so other code can see if a plug is active.
|
|
|
+ */
|
|
|
+struct md_plug_cb {
|
|
|
+ struct blk_plug_cb cb;
|
|
|
+ mddev_t *mddev;
|
|
|
+};
|
|
|
+
|
|
|
+static void plugger_unplug(struct blk_plug_cb *cb)
|
|
|
+{
|
|
|
+ struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
|
|
|
+ if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
|
|
|
+ md_wakeup_thread(mdcb->mddev->thread);
|
|
|
+ kfree(mdcb);
|
|
|
+}
|
|
|
+
|
|
|
+/* Check that an unplug wakeup will come shortly.
|
|
|
+ * If not, wakeup the md thread immediately
|
|
|
+ */
|
|
|
+int mddev_check_plugged(mddev_t *mddev)
|
|
|
+{
|
|
|
+ struct blk_plug *plug = current->plug;
|
|
|
+ struct md_plug_cb *mdcb;
|
|
|
+
|
|
|
+ if (!plug)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
|
|
|
+ if (mdcb->cb.callback == plugger_unplug &&
|
|
|
+ mdcb->mddev == mddev) {
|
|
|
+ /* Already on the list, move to top */
|
|
|
+ if (mdcb != list_first_entry(&plug->cb_list,
|
|
|
+ struct md_plug_cb,
|
|
|
+ cb.list))
|
|
|
+ list_move(&mdcb->cb.list, &plug->cb_list);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* Not currently on the callback list */
|
|
|
+ mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
|
|
|
+ if (!mdcb)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ mdcb->mddev = mddev;
|
|
|
+ mdcb->cb.callback = plugger_unplug;
|
|
|
+ atomic_inc(&mddev->plug_cnt);
|
|
|
+ list_add(&mdcb->cb.list, &plug->cb_list);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(mddev_check_plugged);
|
|
|
|
|
|
static inline mddev_t *mddev_get(mddev_t *mddev)
|
|
|
{
|
|
@@ -494,6 +549,7 @@ void mddev_init(mddev_t *mddev)
|
|
|
atomic_set(&mddev->active, 1);
|
|
|
atomic_set(&mddev->openers, 0);
|
|
|
atomic_set(&mddev->active_io, 0);
|
|
|
+ atomic_set(&mddev->plug_cnt, 0);
|
|
|
spin_lock_init(&mddev->write_lock);
|
|
|
atomic_set(&mddev->flush_pending, 0);
|
|
|
init_waitqueue_head(&mddev->sb_wait);
|