|
@@ -31,6 +31,8 @@
|
|
|
#include "drbd_req.h"
|
|
|
|
|
|
|
|
|
+static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
|
|
|
+
|
|
|
/* Update disk stats at start of I/O request */
|
|
|
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
|
|
|
{
|
|
@@ -558,20 +560,21 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|
|
/* assert something? */
|
|
|
if (req->rq_state & RQ_NET_PENDING)
|
|
|
dec_ap_pending(mdev);
|
|
|
+
|
|
|
+ p = !(req->rq_state & RQ_WRITE) && req->rq_state & RQ_NET_PENDING;
|
|
|
+
|
|
|
req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
|
|
|
req->rq_state |= RQ_NET_DONE;
|
|
|
if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
|
|
|
atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
|
|
|
|
|
|
- if (!(req->rq_state & RQ_WRITE) &&
|
|
|
- mdev->state.disk == D_UP_TO_DATE &&
|
|
|
- !IS_ERR_OR_NULL(req->private_bio))
|
|
|
- goto goto_read_retry_local;
|
|
|
-
|
|
|
/* if it is still queued, we may not complete it here.
|
|
|
* it will be canceled soon. */
|
|
|
- if (!(req->rq_state & RQ_NET_QUEUED))
|
|
|
+ if (!(req->rq_state & RQ_NET_QUEUED)) {
|
|
|
+ if (p)
|
|
|
+ goto goto_read_retry_local;
|
|
|
_req_may_be_done(req, m); /* Allowed while state.susp */
|
|
|
+ }
|
|
|
break;
|
|
|
|
|
|
case WRITE_ACKED_BY_PEER_AND_SIS:
|
|
@@ -631,9 +634,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|
|
|
|
|
req->rq_state |= RQ_NET_DONE;
|
|
|
|
|
|
- if (!(req->rq_state & RQ_WRITE) &&
|
|
|
- mdev->state.disk == D_UP_TO_DATE &&
|
|
|
- !IS_ERR_OR_NULL(req->private_bio))
|
|
|
+ if (!(req->rq_state & RQ_WRITE))
|
|
|
goto goto_read_retry_local;
|
|
|
|
|
|
_req_may_be_done_not_susp(req, m);
|
|
@@ -641,9 +642,16 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|
|
break;
|
|
|
|
|
|
goto_read_retry_local:
|
|
|
+ if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) {
|
|
|
+ _req_may_be_done_not_susp(req, m);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING));
|
|
|
req->rq_state |= RQ_LOCAL_PENDING;
|
|
|
- req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
|
|
|
- generic_make_request(req->private_bio);
|
|
|
+
|
|
|
+ get_ldev(mdev);
|
|
|
+ req->w.cb = w_restart_disk_io;
|
|
|
+ drbd_queue_work(&mdev->tconn->data.work, &req->w);
|
|
|
break;
|
|
|
|
|
|
case FAIL_FROZEN_DISK_IO:
|
|
@@ -706,11 +714,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|
|
dec_ap_pending(mdev);
|
|
|
req->rq_state &= ~RQ_NET_PENDING;
|
|
|
req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
|
|
|
- if (!IS_ERR_OR_NULL(req->private_bio)) {
|
|
|
- bio_put(req->private_bio);
|
|
|
- req->private_bio = NULL;
|
|
|
- put_ldev(mdev);
|
|
|
- }
|
|
|
_req_may_be_done_not_susp(req, m);
|
|
|
break;
|
|
|
};
|
|
@@ -840,7 +843,8 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
|
|
|
} else {
|
|
|
/* READ || READA */
|
|
|
if (local) {
|
|
|
- if (!drbd_may_do_local_read(mdev, sector, size)) {
|
|
|
+ if (!drbd_may_do_local_read(mdev, sector, size) ||
|
|
|
+ remote_due_to_read_balancing(mdev, sector)) {
|
|
|
/* we could kick the syncer to
|
|
|
* sync this extent asap, wait for
|
|
|
* it, then continue locally.
|
|
@@ -850,10 +854,6 @@ int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long s
|
|
|
bio_put(req->private_bio);
|
|
|
req->private_bio = NULL;
|
|
|
put_ldev(mdev);
|
|
|
- } else if (remote_due_to_read_balancing(mdev, sector)) {
|
|
|
- /* Keep the private bio in case we need it
|
|
|
- for a local retry */
|
|
|
- local = 0;
|
|
|
}
|
|
|
}
|
|
|
remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
|
|
@@ -1081,7 +1081,7 @@ fail_free_complete:
|
|
|
if (req->rq_state & RQ_IN_ACT_LOG)
|
|
|
drbd_al_complete_io(mdev, &req->i);
|
|
|
fail_and_free_req:
|
|
|
- if (!IS_ERR_OR_NULL(req->private_bio)) {
|
|
|
+ if (local) {
|
|
|
bio_put(req->private_bio);
|
|
|
req->private_bio = NULL;
|
|
|
put_ldev(mdev);
|