|
@@ -189,7 +189,7 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
|
|
|
|
|
|
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
|
|
|
{
|
|
|
- struct drbd_peer_request *e;
|
|
|
+ struct drbd_peer_request *peer_req;
|
|
|
struct list_head *le, *tle;
|
|
|
|
|
|
/* The EEs are always appended to the end of the list. Since
|
|
@@ -198,8 +198,8 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
|
|
|
stop to examine the list... */
|
|
|
|
|
|
list_for_each_safe(le, tle, &mdev->net_ee) {
|
|
|
- e = list_entry(le, struct drbd_peer_request, w.list);
|
|
|
- if (drbd_ee_has_active_page(e))
|
|
|
+ peer_req = list_entry(le, struct drbd_peer_request, w.list);
|
|
|
+ if (drbd_ee_has_active_page(peer_req))
|
|
|
break;
|
|
|
list_move(le, to_be_freed);
|
|
|
}
|
|
@@ -208,14 +208,14 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
|
|
|
static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
|
|
|
{
|
|
|
LIST_HEAD(reclaimed);
|
|
|
- struct drbd_peer_request *e, *t;
|
|
|
+ struct drbd_peer_request *peer_req, *t;
|
|
|
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
reclaim_net_ee(mdev, &reclaimed);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
|
|
|
- drbd_free_net_ee(mdev, e);
|
|
|
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
|
|
+ drbd_free_net_ee(mdev, peer_req);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -313,15 +313,15 @@ struct drbd_peer_request *
|
|
|
drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|
|
unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
|
|
|
{
|
|
|
- struct drbd_peer_request *e;
|
|
|
+ struct drbd_peer_request *peer_req;
|
|
|
struct page *page;
|
|
|
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
|
|
|
|
|
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
|
|
|
return NULL;
|
|
|
|
|
|
- e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
|
|
|
- if (!e) {
|
|
|
+ peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
|
|
|
+ if (!peer_req) {
|
|
|
if (!(gfp_mask & __GFP_NOWARN))
|
|
|
dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
|
|
|
return NULL;
|
|
@@ -331,45 +331,45 @@ drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|
|
if (!page)
|
|
|
goto fail;
|
|
|
|
|
|
- drbd_clear_interval(&e->i);
|
|
|
- e->i.size = data_size;
|
|
|
- e->i.sector = sector;
|
|
|
- e->i.local = false;
|
|
|
- e->i.waiting = false;
|
|
|
-
|
|
|
- e->epoch = NULL;
|
|
|
- e->mdev = mdev;
|
|
|
- e->pages = page;
|
|
|
- atomic_set(&e->pending_bios, 0);
|
|
|
- e->flags = 0;
|
|
|
+ drbd_clear_interval(&peer_req->i);
|
|
|
+ peer_req->i.size = data_size;
|
|
|
+ peer_req->i.sector = sector;
|
|
|
+ peer_req->i.local = false;
|
|
|
+ peer_req->i.waiting = false;
|
|
|
+
|
|
|
+ peer_req->epoch = NULL;
|
|
|
+ peer_req->mdev = mdev;
|
|
|
+ peer_req->pages = page;
|
|
|
+ atomic_set(&peer_req->pending_bios, 0);
|
|
|
+ peer_req->flags = 0;
|
|
|
/*
|
|
|
* The block_id is opaque to the receiver. It is not endianness
|
|
|
* converted, and sent back to the sender unchanged.
|
|
|
*/
|
|
|
- e->block_id = id;
|
|
|
+ peer_req->block_id = id;
|
|
|
|
|
|
- return e;
|
|
|
+ return peer_req;
|
|
|
|
|
|
fail:
|
|
|
- mempool_free(e, drbd_ee_mempool);
|
|
|
+ mempool_free(peer_req, drbd_ee_mempool);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *e,
|
|
|
+void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
|
|
|
int is_net)
|
|
|
{
|
|
|
- if (e->flags & EE_HAS_DIGEST)
|
|
|
- kfree(e->digest);
|
|
|
- drbd_pp_free(mdev, e->pages, is_net);
|
|
|
- D_ASSERT(atomic_read(&e->pending_bios) == 0);
|
|
|
- D_ASSERT(drbd_interval_empty(&e->i));
|
|
|
- mempool_free(e, drbd_ee_mempool);
|
|
|
+ if (peer_req->flags & EE_HAS_DIGEST)
|
|
|
+ kfree(peer_req->digest);
|
|
|
+ drbd_pp_free(mdev, peer_req->pages, is_net);
|
|
|
+ D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
|
|
|
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
|
|
|
+ mempool_free(peer_req, drbd_ee_mempool);
|
|
|
}
|
|
|
|
|
|
int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
|
|
|
{
|
|
|
LIST_HEAD(work_list);
|
|
|
- struct drbd_peer_request *e, *t;
|
|
|
+ struct drbd_peer_request *peer_req, *t;
|
|
|
int count = 0;
|
|
|
int is_net = list == &mdev->net_ee;
|
|
|
|
|
@@ -377,8 +377,8 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
|
|
|
list_splice_init(list, &work_list);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
- list_for_each_entry_safe(e, t, &work_list, w.list) {
|
|
|
- drbd_free_some_ee(mdev, e, is_net);
|
|
|
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
|
|
|
+ drbd_free_some_ee(mdev, peer_req, is_net);
|
|
|
count++;
|
|
|
}
|
|
|
return count;
|
|
@@ -398,7 +398,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
|
|
|
{
|
|
|
LIST_HEAD(work_list);
|
|
|
LIST_HEAD(reclaimed);
|
|
|
- struct drbd_peer_request *e, *t;
|
|
|
+ struct drbd_peer_request *peer_req, *t;
|
|
|
int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
|
|
|
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
@@ -406,17 +406,17 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
|
|
|
list_splice_init(&mdev->done_ee, &work_list);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
|
|
|
- drbd_free_net_ee(mdev, e);
|
|
|
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
|
|
+ drbd_free_net_ee(mdev, peer_req);
|
|
|
|
|
|
/* possible callbacks here:
|
|
|
* e_end_block, and e_end_resync_block, e_send_discard_ack.
|
|
|
* all ignore the last argument.
|
|
|
*/
|
|
|
- list_for_each_entry_safe(e, t, &work_list, w.list) {
|
|
|
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
|
|
|
/* list_del not necessary, next/prev members not touched */
|
|
|
- ok = e->w.cb(mdev, &e->w, !ok) && ok;
|
|
|
- drbd_free_ee(mdev, e);
|
|
|
+ ok = peer_req->w.cb(mdev, &peer_req->w, !ok) && ok;
|
|
|
+ drbd_free_ee(mdev, peer_req);
|
|
|
}
|
|
|
wake_up(&mdev->ee_wait);
|
|
|
|
|
@@ -1085,7 +1085,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
|
|
|
/**
|
|
|
* drbd_submit_ee()
|
|
|
* @mdev: DRBD device.
|
|
|
- * @e: peer request
|
|
|
+ * @peer_req: peer request
|
|
|
* @rw: flag field, see bio->bi_rw
|
|
|
*
|
|
|
* May spread the pages to multiple bios,
|
|
@@ -1099,14 +1099,14 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
|
|
|
* on certain Xen deployments.
|
|
|
*/
|
|
|
/* TODO allocate from our own bio_set. */
|
|
|
-int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_peer_request *e,
|
|
|
+int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
|
|
|
const unsigned rw, const int fault_type)
|
|
|
{
|
|
|
struct bio *bios = NULL;
|
|
|
struct bio *bio;
|
|
|
- struct page *page = e->pages;
|
|
|
- sector_t sector = e->i.sector;
|
|
|
- unsigned ds = e->i.size;
|
|
|
+ struct page *page = peer_req->pages;
|
|
|
+ sector_t sector = peer_req->i.sector;
|
|
|
+ unsigned ds = peer_req->i.size;
|
|
|
unsigned n_bios = 0;
|
|
|
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
|
|
|
int err = -ENOMEM;
|
|
@@ -1121,11 +1121,11 @@ next_bio:
|
|
|
dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
|
|
|
goto fail;
|
|
|
}
|
|
|
- /* > e->i.sector, unless this is the first bio */
|
|
|
+ /* > peer_req->i.sector, unless this is the first bio */
|
|
|
bio->bi_sector = sector;
|
|
|
bio->bi_bdev = mdev->ldev->backing_bdev;
|
|
|
bio->bi_rw = rw;
|
|
|
- bio->bi_private = e;
|
|
|
+ bio->bi_private = peer_req;
|
|
|
bio->bi_end_io = drbd_endio_sec;
|
|
|
|
|
|
bio->bi_next = bios;
|
|
@@ -1155,7 +1155,7 @@ next_bio:
|
|
|
D_ASSERT(page == NULL);
|
|
|
D_ASSERT(ds == 0);
|
|
|
|
|
|
- atomic_set(&e->pending_bios, n_bios);
|
|
|
+ atomic_set(&peer_req->pending_bios, n_bios);
|
|
|
do {
|
|
|
bio = bios;
|
|
|
bios = bios->bi_next;
|
|
@@ -1175,9 +1175,9 @@ fail:
|
|
|
}
|
|
|
|
|
|
static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
|
|
|
- struct drbd_peer_request *e)
|
|
|
+ struct drbd_peer_request *peer_req)
|
|
|
{
|
|
|
- struct drbd_interval *i = &e->i;
|
|
|
+ struct drbd_interval *i = &peer_req->i;
|
|
|
|
|
|
drbd_remove_interval(&mdev->write_requests, i);
|
|
|
drbd_clear_interval(i);
|
|
@@ -1266,7 +1266,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|
|
int data_size) __must_hold(local)
|
|
|
{
|
|
|
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
|
|
|
- struct drbd_peer_request *e;
|
|
|
+ struct drbd_peer_request *peer_req;
|
|
|
struct page *page;
|
|
|
int dgs, ds, rr;
|
|
|
void *dig_in = mdev->tconn->int_dig_in;
|
|
@@ -1309,12 +1309,12 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|
|
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
|
|
|
* "criss-cross" setup, that might cause write-out on some other DRBD,
|
|
|
* which in turn might block on the other node at this very place. */
|
|
|
- e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
|
|
|
- if (!e)
|
|
|
+ peer_req = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
|
|
|
+ if (!peer_req)
|
|
|
return NULL;
|
|
|
|
|
|
ds = data_size;
|
|
|
- page = e->pages;
|
|
|
+ page = peer_req->pages;
|
|
|
page_chain_for_each(page) {
|
|
|
unsigned len = min_t(int, ds, PAGE_SIZE);
|
|
|
data = kmap(page);
|
|
@@ -1325,7 +1325,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|
|
}
|
|
|
kunmap(page);
|
|
|
if (rr != len) {
|
|
|
- drbd_free_ee(mdev, e);
|
|
|
+ drbd_free_ee(mdev, peer_req);
|
|
|
if (!signal_pending(current))
|
|
|
dev_warn(DEV, "short read receiving data: read %d expected %d\n",
|
|
|
rr, len);
|
|
@@ -1335,18 +1335,18 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
|
|
|
}
|
|
|
|
|
|
if (dgs) {
|
|
|
- drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, e, dig_vv);
|
|
|
+ drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv);
|
|
|
if (memcmp(dig_in, dig_vv, dgs)) {
|
|
|
dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
|
|
|
(unsigned long long)sector, data_size);
|
|
|
drbd_bcast_ee(mdev, "digest failed",
|
|
|
- dgs, dig_in, dig_vv, e);
|
|
|
- drbd_free_ee(mdev, e);
|
|
|
+ dgs, dig_in, dig_vv, peer_req);
|
|
|
+ drbd_free_ee(mdev, peer_req);
|
|
|
return NULL;
|
|
|
}
|
|
|
}
|
|
|
mdev->recv_cnt += data_size>>9;
|
|
|
- return e;
|
|
|
+ return peer_req;
|
|
|
}
|
|
|
|
|
|
/* drbd_drain_block() just takes a data block
|
|
@@ -1445,20 +1445,20 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
|
|
|
* drbd_process_done_ee() by asender only */
|
|
|
static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
|
|
{
|
|
|
- struct drbd_peer_request *e = (struct drbd_peer_request *)w;
|
|
|
- sector_t sector = e->i.sector;
|
|
|
+ struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
|
|
|
+ sector_t sector = peer_req->i.sector;
|
|
|
int ok;
|
|
|
|
|
|
- D_ASSERT(drbd_interval_empty(&e->i));
|
|
|
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
|
|
|
|
|
|
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
|
|
- drbd_set_in_sync(mdev, sector, e->i.size);
|
|
|
- ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
|
|
|
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
|
|
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
|
|
|
+ ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
|
|
|
} else {
|
|
|
/* Record failure to sync */
|
|
|
- drbd_rs_failed_io(mdev, sector, e->i.size);
|
|
|
+ drbd_rs_failed_io(mdev, sector, peer_req->i.size);
|
|
|
|
|
|
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
|
|
|
+ ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
|
|
|
}
|
|
|
dec_unacked(mdev);
|
|
|
|
|
@@ -1467,10 +1467,10 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
|
|
|
|
|
|
static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
|
|
|
{
|
|
|
- struct drbd_peer_request *e;
|
|
|
+ struct drbd_peer_request *peer_req;
|
|
|
|
|
|
- e = read_in_block(mdev, ID_SYNCER, sector, data_size);
|
|
|
- if (!e)
|
|
|
+ peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
|
|
|
+ if (!peer_req)
|
|
|
goto fail;
|
|
|
|
|
|
dec_rs_pending(mdev);
|
|
@@ -1479,23 +1479,23 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
|
|
|
/* corresponding dec_unacked() in e_end_resync_block()
|
|
|
* respective _drbd_clear_done_ee */
|
|
|
|
|
|
- e->w.cb = e_end_resync_block;
|
|
|
+ peer_req->w.cb = e_end_resync_block;
|
|
|
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
- list_add(&e->w.list, &mdev->sync_ee);
|
|
|
+ list_add(&peer_req->w.list, &mdev->sync_ee);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
|
|
|
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
|
|
|
+ if (drbd_submit_ee(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
|
|
|
return true;
|
|
|
|
|
|
/* don't care for the reason here */
|
|
|
dev_err(DEV, "submit failed, triggering re-connect\n");
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
- list_del(&e->w.list);
|
|
|
+ list_del(&peer_req->w.list);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
- drbd_free_ee(mdev, e);
|
|
|
+ drbd_free_ee(mdev, peer_req);
|
|
|
fail:
|
|
|
put_ldev(mdev);
|
|
|
return false;
|
|
@@ -1582,21 +1582,21 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
*/
|
|
|
static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
|
|
{
|
|
|
- struct drbd_peer_request *e = (struct drbd_peer_request *)w;
|
|
|
- sector_t sector = e->i.sector;
|
|
|
+ struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
|
|
|
+ sector_t sector = peer_req->i.sector;
|
|
|
int ok = 1, pcmd;
|
|
|
|
|
|
if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
|
|
|
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
|
|
|
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
|
|
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
|
|
|
mdev->state.conn <= C_PAUSED_SYNC_T &&
|
|
|
- e->flags & EE_MAY_SET_IN_SYNC) ?
|
|
|
+ peer_req->flags & EE_MAY_SET_IN_SYNC) ?
|
|
|
P_RS_WRITE_ACK : P_WRITE_ACK;
|
|
|
- ok &= drbd_send_ack(mdev, pcmd, e);
|
|
|
+ ok &= drbd_send_ack(mdev, pcmd, peer_req);
|
|
|
if (pcmd == P_RS_WRITE_ACK)
|
|
|
- drbd_set_in_sync(mdev, sector, e->i.size);
|
|
|
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
|
|
|
} else {
|
|
|
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
|
|
|
+ ok = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
|
|
|
/* we expect it to be marked out of sync anyways...
|
|
|
* maybe assert this? */
|
|
|
}
|
|
@@ -1606,28 +1606,28 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
|
|
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
|
|
|
if (mdev->tconn->net_conf->two_primaries) {
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
- D_ASSERT(!drbd_interval_empty(&e->i));
|
|
|
- drbd_remove_epoch_entry_interval(mdev, e);
|
|
|
+ D_ASSERT(!drbd_interval_empty(&peer_req->i));
|
|
|
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
} else
|
|
|
- D_ASSERT(drbd_interval_empty(&e->i));
|
|
|
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
|
|
|
|
|
|
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
|
|
|
+ drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
|
|
|
|
|
|
return ok;
|
|
|
}
|
|
|
|
|
|
static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
|
|
|
{
|
|
|
- struct drbd_peer_request *e = (struct drbd_peer_request *)w;
|
|
|
+ struct drbd_peer_request *peer_req = (struct drbd_peer_request *)w;
|
|
|
int ok = 1;
|
|
|
|
|
|
D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
|
|
|
- ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
|
|
|
+ ok = drbd_send_ack(mdev, P_DISCARD_ACK, peer_req);
|
|
|
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
- D_ASSERT(!drbd_interval_empty(&e->i));
|
|
|
- drbd_remove_epoch_entry_interval(mdev, e);
|
|
|
+ D_ASSERT(!drbd_interval_empty(&peer_req->i));
|
|
|
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
dec_unacked(mdev);
|
|
@@ -1731,7 +1731,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
unsigned int data_size)
|
|
|
{
|
|
|
sector_t sector;
|
|
|
- struct drbd_peer_request *e;
|
|
|
+ struct drbd_peer_request *peer_req;
|
|
|
struct p_data *p = &mdev->tconn->data.rbuf.data;
|
|
|
int rw = WRITE;
|
|
|
u32 dp_flags;
|
|
@@ -1753,24 +1753,24 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
* the end of this function. */
|
|
|
|
|
|
sector = be64_to_cpu(p->sector);
|
|
|
- e = read_in_block(mdev, p->block_id, sector, data_size);
|
|
|
- if (!e) {
|
|
|
+ peer_req = read_in_block(mdev, p->block_id, sector, data_size);
|
|
|
+ if (!peer_req) {
|
|
|
put_ldev(mdev);
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- e->w.cb = e_end_block;
|
|
|
+ peer_req->w.cb = e_end_block;
|
|
|
|
|
|
dp_flags = be32_to_cpu(p->dp_flags);
|
|
|
rw |= wire_flags_to_bio(mdev, dp_flags);
|
|
|
|
|
|
if (dp_flags & DP_MAY_SET_IN_SYNC)
|
|
|
- e->flags |= EE_MAY_SET_IN_SYNC;
|
|
|
+ peer_req->flags |= EE_MAY_SET_IN_SYNC;
|
|
|
|
|
|
spin_lock(&mdev->epoch_lock);
|
|
|
- e->epoch = mdev->current_epoch;
|
|
|
- atomic_inc(&e->epoch->epoch_size);
|
|
|
- atomic_inc(&e->epoch->active);
|
|
|
+ peer_req->epoch = mdev->current_epoch;
|
|
|
+ atomic_inc(&peer_req->epoch->epoch_size);
|
|
|
+ atomic_inc(&peer_req->epoch->active);
|
|
|
spin_unlock(&mdev->epoch_lock);
|
|
|
|
|
|
/* I'm the receiver, I do hold a net_cnt reference. */
|
|
@@ -1779,7 +1779,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
} else {
|
|
|
/* don't get the req_lock yet,
|
|
|
* we may sleep in drbd_wait_peer_seq */
|
|
|
- const int size = e->i.size;
|
|
|
+ const int size = peer_req->i.size;
|
|
|
const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
|
|
|
DEFINE_WAIT(wait);
|
|
|
int first;
|
|
@@ -1856,8 +1856,8 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
|
|
|
(unsigned long long)sector);
|
|
|
inc_unacked(mdev);
|
|
|
- e->w.cb = e_send_discard_ack;
|
|
|
- list_add_tail(&e->w.list, &mdev->done_ee);
|
|
|
+ peer_req->w.cb = e_send_discard_ack;
|
|
|
+ list_add_tail(&peer_req->w.list, &mdev->done_ee);
|
|
|
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
@@ -1894,10 +1894,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
}
|
|
|
finish_wait(&mdev->misc_wait, &wait);
|
|
|
|
|
|
- drbd_insert_interval(&mdev->write_requests, &e->i);
|
|
|
+ drbd_insert_interval(&mdev->write_requests, &peer_req->i);
|
|
|
}
|
|
|
|
|
|
- list_add(&e->w.list, &mdev->active_ee);
|
|
|
+ list_add(&peer_req->w.list, &mdev->active_ee);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
switch (mdev->tconn->net_conf->wire_protocol) {
|
|
@@ -1909,7 +1909,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
case DRBD_PROT_B:
|
|
|
/* I really don't like it that the receiver thread
|
|
|
* sends on the msock, but anyways */
|
|
|
- drbd_send_ack(mdev, P_RECV_ACK, e);
|
|
|
+ drbd_send_ack(mdev, P_RECV_ACK, peer_req);
|
|
|
break;
|
|
|
case DRBD_PROT_A:
|
|
|
/* nothing to do */
|
|
@@ -1918,28 +1918,28 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
|
|
|
if (mdev->state.pdsk < D_INCONSISTENT) {
|
|
|
/* In case we have the only disk of the cluster, */
|
|
|
- drbd_set_out_of_sync(mdev, e->i.sector, e->i.size);
|
|
|
- e->flags |= EE_CALL_AL_COMPLETE_IO;
|
|
|
- e->flags &= ~EE_MAY_SET_IN_SYNC;
|
|
|
- drbd_al_begin_io(mdev, e->i.sector);
|
|
|
+ drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
|
|
|
+ peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
|
|
|
+ peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
|
|
|
+ drbd_al_begin_io(mdev, peer_req->i.sector);
|
|
|
}
|
|
|
|
|
|
- if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
|
|
|
+ if (drbd_submit_ee(mdev, peer_req, rw, DRBD_FAULT_DT_WR) == 0)
|
|
|
return true;
|
|
|
|
|
|
/* don't care for the reason here */
|
|
|
dev_err(DEV, "submit failed, triggering re-connect\n");
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
- list_del(&e->w.list);
|
|
|
- drbd_remove_epoch_entry_interval(mdev, e);
|
|
|
+ list_del(&peer_req->w.list);
|
|
|
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
- if (e->flags & EE_CALL_AL_COMPLETE_IO)
|
|
|
- drbd_al_complete_io(mdev, e->i.sector);
|
|
|
+ if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
|
|
|
+ drbd_al_complete_io(mdev, peer_req->i.sector);
|
|
|
|
|
|
out_interrupted:
|
|
|
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
|
|
|
+ drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
|
|
|
put_ldev(mdev);
|
|
|
- drbd_free_ee(mdev, e);
|
|
|
+ drbd_free_ee(mdev, peer_req);
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -2015,7 +2015,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
{
|
|
|
sector_t sector;
|
|
|
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
|
|
|
- struct drbd_peer_request *e;
|
|
|
+ struct drbd_peer_request *peer_req;
|
|
|
struct digest_info *di = NULL;
|
|
|
int size, verb;
|
|
|
unsigned int fault_type;
|
|
@@ -2066,21 +2066,21 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
|
|
|
* "criss-cross" setup, that might cause write-out on some other DRBD,
|
|
|
* which in turn might block on the other node at this very place. */
|
|
|
- e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
|
|
|
- if (!e) {
|
|
|
+ peer_req = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
|
|
|
+ if (!peer_req) {
|
|
|
put_ldev(mdev);
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
switch (cmd) {
|
|
|
case P_DATA_REQUEST:
|
|
|
- e->w.cb = w_e_end_data_req;
|
|
|
+ peer_req->w.cb = w_e_end_data_req;
|
|
|
fault_type = DRBD_FAULT_DT_RD;
|
|
|
/* application IO, don't drbd_rs_begin_io */
|
|
|
goto submit;
|
|
|
|
|
|
case P_RS_DATA_REQUEST:
|
|
|
- e->w.cb = w_e_end_rsdata_req;
|
|
|
+ peer_req->w.cb = w_e_end_rsdata_req;
|
|
|
fault_type = DRBD_FAULT_RS_RD;
|
|
|
/* used in the sector offset progress display */
|
|
|
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
|
|
@@ -2096,21 +2096,21 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
di->digest_size = digest_size;
|
|
|
di->digest = (((char *)di)+sizeof(struct digest_info));
|
|
|
|
|
|
- e->digest = di;
|
|
|
- e->flags |= EE_HAS_DIGEST;
|
|
|
+ peer_req->digest = di;
|
|
|
+ peer_req->flags |= EE_HAS_DIGEST;
|
|
|
|
|
|
if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
|
|
|
goto out_free_e;
|
|
|
|
|
|
if (cmd == P_CSUM_RS_REQUEST) {
|
|
|
D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
|
|
|
- e->w.cb = w_e_end_csum_rs_req;
|
|
|
+ peer_req->w.cb = w_e_end_csum_rs_req;
|
|
|
/* used in the sector offset progress display */
|
|
|
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
|
|
|
} else if (cmd == P_OV_REPLY) {
|
|
|
/* track progress, we may need to throttle */
|
|
|
atomic_add(size >> 9, &mdev->rs_sect_in);
|
|
|
- e->w.cb = w_e_end_ov_reply;
|
|
|
+ peer_req->w.cb = w_e_end_ov_reply;
|
|
|
dec_rs_pending(mdev);
|
|
|
/* drbd_rs_begin_io done when we sent this request,
|
|
|
* but accounting still needs to be done. */
|
|
@@ -2134,7 +2134,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packet cmd,
|
|
|
dev_info(DEV, "Online Verify start sector: %llu\n",
|
|
|
(unsigned long long)sector);
|
|
|
}
|
|
|
- e->w.cb = w_e_end_ov_req;
|
|
|
+ peer_req->w.cb = w_e_end_ov_req;
|
|
|
fault_type = DRBD_FAULT_RS_RD;
|
|
|
break;
|
|
|
|
|
@@ -2178,22 +2178,22 @@ submit_for_resync:
|
|
|
submit:
|
|
|
inc_unacked(mdev);
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
- list_add_tail(&e->w.list, &mdev->read_ee);
|
|
|
+ list_add_tail(&peer_req->w.list, &mdev->read_ee);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
|
|
|
- if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
|
|
|
+ if (drbd_submit_ee(mdev, peer_req, READ, fault_type) == 0)
|
|
|
return true;
|
|
|
|
|
|
/* don't care for the reason here */
|
|
|
dev_err(DEV, "submit failed, triggering re-connect\n");
|
|
|
spin_lock_irq(&mdev->tconn->req_lock);
|
|
|
- list_del(&e->w.list);
|
|
|
+ list_del(&peer_req->w.list);
|
|
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
|
|
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
|
|
|
|
|
|
out_free_e:
|
|
|
put_ldev(mdev);
|
|
|
- drbd_free_ee(mdev, e);
|
|
|
+ drbd_free_ee(mdev, peer_req);
|
|
|
return false;
|
|
|
}
|
|
|
|