|
@@ -160,12 +160,12 @@ unsigned int bvec_nr_vecs(unsigned short idx)
|
|
|
return bvec_slabs[idx].nr_vecs;
|
|
|
}
|
|
|
|
|
|
-void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
|
|
|
+void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
|
|
|
{
|
|
|
BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
|
|
|
|
|
|
if (idx == BIOVEC_MAX_IDX)
|
|
|
- mempool_free(bv, bs->bvec_pool);
|
|
|
+ mempool_free(bv, pool);
|
|
|
else {
|
|
|
struct biovec_slab *bvs = bvec_slabs + idx;
|
|
|
|
|
@@ -173,8 +173,8 @@ void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
|
|
|
- struct bio_set *bs)
|
|
|
+struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
|
|
|
+ mempool_t *pool)
|
|
|
{
|
|
|
struct bio_vec *bvl;
|
|
|
|
|
@@ -210,7 +210,7 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
|
|
|
*/
|
|
|
if (*idx == BIOVEC_MAX_IDX) {
|
|
|
fallback:
|
|
|
- bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
|
|
|
+ bvl = mempool_alloc(pool, gfp_mask);
|
|
|
} else {
|
|
|
struct biovec_slab *bvs = bvec_slabs + *idx;
|
|
|
gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
|
|
@@ -252,8 +252,8 @@ static void bio_free(struct bio *bio)
|
|
|
__bio_free(bio);
|
|
|
|
|
|
if (bs) {
|
|
|
- if (bio_has_allocated_vec(bio))
|
|
|
- bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
|
|
|
+ if (bio_flagged(bio, BIO_OWNS_VEC))
|
|
|
+ bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
|
|
|
|
|
|
/*
|
|
|
* If we have front padding, adjust the bio pointer before freeing
|
|
@@ -297,6 +297,54 @@ void bio_reset(struct bio *bio)
|
|
|
}
|
|
|
EXPORT_SYMBOL(bio_reset);
|
|
|
|
|
|
+static void bio_alloc_rescue(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
|
|
|
+ struct bio *bio;
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ spin_lock(&bs->rescue_lock);
|
|
|
+ bio = bio_list_pop(&bs->rescue_list);
|
|
|
+ spin_unlock(&bs->rescue_lock);
|
|
|
+
|
|
|
+ if (!bio)
|
|
|
+ break;
|
|
|
+
|
|
|
+ generic_make_request(bio);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void punt_bios_to_rescuer(struct bio_set *bs)
|
|
|
+{
|
|
|
+ struct bio_list punt, nopunt;
|
|
|
+ struct bio *bio;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * In order to guarantee forward progress we must punt only bios that
|
|
|
+ * were allocated from this bio_set; otherwise, if there was a bio on
|
|
|
+ * there for a stacking driver higher up in the stack, processing it
|
|
|
+ * could require allocating bios from this bio_set, and doing that from
|
|
|
+ * our own rescuer would be bad.
|
|
|
+ *
|
|
|
+ * Since bio lists are singly linked, pop them all instead of trying to
|
|
|
+ * remove from the middle of the list:
|
|
|
+ */
|
|
|
+
|
|
|
+ bio_list_init(&punt);
|
|
|
+ bio_list_init(&nopunt);
|
|
|
+
|
|
|
+ while ((bio = bio_list_pop(current->bio_list)))
|
|
|
+ bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
|
|
+
|
|
|
+ *current->bio_list = nopunt;
|
|
|
+
|
|
|
+ spin_lock(&bs->rescue_lock);
|
|
|
+ bio_list_merge(&bs->rescue_list, &punt);
|
|
|
+ spin_unlock(&bs->rescue_lock);
|
|
|
+
|
|
|
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* bio_alloc_bioset - allocate a bio for I/O
|
|
|
* @gfp_mask: the GFP_ mask given to the slab allocator
|
|
@@ -314,11 +362,27 @@ EXPORT_SYMBOL(bio_reset);
|
|
|
* previously allocated bio for IO before attempting to allocate a new one.
|
|
|
* Failure to do so can cause deadlocks under memory pressure.
|
|
|
*
|
|
|
+ * Note that when running under generic_make_request() (i.e. any block
|
|
|
+ * driver), bios are not submitted until after you return - see the code in
|
|
|
+ * generic_make_request() that converts recursion into iteration, to prevent
|
|
|
+ * stack overflows.
|
|
|
+ *
|
|
|
+ * This would normally mean allocating multiple bios under
|
|
|
+ * generic_make_request() would be susceptible to deadlocks, but we have
|
|
|
+ * deadlock avoidance code that resubmits any blocked bios from a rescuer
|
|
|
+ * thread.
|
|
|
+ *
|
|
|
+ * However, we do not guarantee forward progress for allocations from other
|
|
|
+ * mempools. Doing multiple allocations from the same mempool under
|
|
|
+ * generic_make_request() should be avoided - instead, use bio_set's front_pad
|
|
|
+ * for per bio allocations.
|
|
|
+ *
|
|
|
* RETURNS:
|
|
|
* Pointer to new bio on success, NULL on failure.
|
|
|
*/
|
|
|
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|
|
{
|
|
|
+ gfp_t saved_gfp = gfp_mask;
|
|
|
unsigned front_pad;
|
|
|
unsigned inline_vecs;
|
|
|
unsigned long idx = BIO_POOL_NONE;
|
|
@@ -336,7 +400,37 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|
|
front_pad = 0;
|
|
|
inline_vecs = nr_iovecs;
|
|
|
} else {
|
|
|
+ /*
|
|
|
+ * generic_make_request() converts recursion to iteration; this
|
|
|
+ * means if we're running beneath it, any bios we allocate and
|
|
|
+ * submit will not be submitted (and thus freed) until after we
|
|
|
+ * return.
|
|
|
+ *
|
|
|
+ * This exposes us to a potential deadlock if we allocate
|
|
|
+ * multiple bios from the same bio_set() while running
|
|
|
+ * underneath generic_make_request(). If we were to allocate
|
|
|
+ * multiple bios (say a stacking block driver that was splitting
|
|
|
+ * bios), we would deadlock if we exhausted the mempool's
|
|
|
+ * reserve.
|
|
|
+ *
|
|
|
+ * We solve this, and guarantee forward progress, with a rescuer
|
|
|
+ * workqueue per bio_set. If we go to allocate and there are
|
|
|
+ * bios on current->bio_list, we first try the allocation
|
|
|
+ * without __GFP_WAIT; if that fails, we punt those bios we
|
|
|
+ * would be blocking to the rescuer workqueue before we retry
|
|
|
+ * with the original gfp_flags.
|
|
|
+ */
|
|
|
+
|
|
|
+ if (current->bio_list && !bio_list_empty(current->bio_list))
|
|
|
+ gfp_mask &= ~__GFP_WAIT;
|
|
|
+
|
|
|
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
|
|
+ if (!p && gfp_mask != saved_gfp) {
|
|
|
+ punt_bios_to_rescuer(bs);
|
|
|
+ gfp_mask = saved_gfp;
|
|
|
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
|
|
|
+ }
|
|
|
+
|
|
|
front_pad = bs->front_pad;
|
|
|
inline_vecs = BIO_INLINE_VECS;
|
|
|
}
|
|
@@ -348,9 +442,17 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|
|
bio_init(bio);
|
|
|
|
|
|
if (nr_iovecs > inline_vecs) {
|
|
|
- bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
|
|
|
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
|
|
|
+ if (!bvl && gfp_mask != saved_gfp) {
|
|
|
+ punt_bios_to_rescuer(bs);
|
|
|
+ gfp_mask = saved_gfp;
|
|
|
+ bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
|
|
|
+ }
|
|
|
+
|
|
|
if (unlikely(!bvl))
|
|
|
goto err_free;
|
|
|
+
|
|
|
+ bio->bi_flags |= 1 << BIO_OWNS_VEC;
|
|
|
} else if (nr_iovecs) {
|
|
|
bvl = bio->bi_inline_vecs;
|
|
|
}
|
|
@@ -652,6 +754,181 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
|
|
|
}
|
|
|
EXPORT_SYMBOL(bio_add_page);
|
|
|
|
|
|
+struct submit_bio_ret {
|
|
|
+ struct completion event;
|
|
|
+ int error;
|
|
|
+};
|
|
|
+
|
|
|
+static void submit_bio_wait_endio(struct bio *bio, int error)
|
|
|
+{
|
|
|
+ struct submit_bio_ret *ret = bio->bi_private;
|
|
|
+
|
|
|
+ ret->error = error;
|
|
|
+ complete(&ret->event);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * submit_bio_wait - submit a bio, and wait until it completes
|
|
|
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
|
|
|
+ * @bio: The &struct bio which describes the I/O
|
|
|
+ *
|
|
|
+ * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
|
|
|
+ * bio_endio() on failure.
|
|
|
+ */
|
|
|
+int submit_bio_wait(int rw, struct bio *bio)
|
|
|
+{
|
|
|
+ struct submit_bio_ret ret;
|
|
|
+
|
|
|
+ rw |= REQ_SYNC;
|
|
|
+ init_completion(&ret.event);
|
|
|
+ bio->bi_private = &ret;
|
|
|
+ bio->bi_end_io = submit_bio_wait_endio;
|
|
|
+ submit_bio(rw, bio);
|
|
|
+ wait_for_completion(&ret.event);
|
|
|
+
|
|
|
+ return ret.error;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(submit_bio_wait);
|
|
|
+
|
|
|
+/**
|
|
|
+ * bio_advance - increment/complete a bio by some number of bytes
|
|
|
+ * @bio: bio to advance
|
|
|
+ * @bytes: number of bytes to complete
|
|
|
+ *
|
|
|
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
|
|
|
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
|
|
|
+ * be updated on the last bvec as well.
|
|
|
+ *
|
|
|
+ * @bio will then represent the remaining, uncompleted portion of the io.
|
|
|
+ */
|
|
|
+void bio_advance(struct bio *bio, unsigned bytes)
|
|
|
+{
|
|
|
+ if (bio_integrity(bio))
|
|
|
+ bio_integrity_advance(bio, bytes);
|
|
|
+
|
|
|
+ bio->bi_sector += bytes >> 9;
|
|
|
+ bio->bi_size -= bytes;
|
|
|
+
|
|
|
+ if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
|
|
|
+ return;
|
|
|
+
|
|
|
+ while (bytes) {
|
|
|
+ if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
|
|
|
+ WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
|
|
|
+ bio->bi_idx, bio->bi_vcnt);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (bytes >= bio_iovec(bio)->bv_len) {
|
|
|
+ bytes -= bio_iovec(bio)->bv_len;
|
|
|
+ bio->bi_idx++;
|
|
|
+ } else {
|
|
|
+ bio_iovec(bio)->bv_len -= bytes;
|
|
|
+ bio_iovec(bio)->bv_offset += bytes;
|
|
|
+ bytes = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(bio_advance);
|
|
|
+
|
|
|
+/**
|
|
|
+ * bio_alloc_pages - allocates a single page for each bvec in a bio
|
|
|
+ * @bio: bio to allocate pages for
|
|
|
+ * @gfp_mask: flags for allocation
|
|
|
+ *
|
|
|
+ * Allocates pages up to @bio->bi_vcnt.
|
|
|
+ *
|
|
|
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
|
|
|
+ * freed.
|
|
|
+ */
|
|
|
+int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct bio_vec *bv;
|
|
|
+
|
|
|
+ bio_for_each_segment_all(bv, bio, i) {
|
|
|
+ bv->bv_page = alloc_page(gfp_mask);
|
|
|
+ if (!bv->bv_page) {
|
|
|
+ while (--bv >= bio->bi_io_vec)
|
|
|
+ __free_page(bv->bv_page);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(bio_alloc_pages);
|
|
|
+
|
|
|
+/**
|
|
|
+ * bio_copy_data - copy contents of data buffers from one chain of bios to
|
|
|
+ * another
|
|
|
+ * @src: source bio list
|
|
|
+ * @dst: destination bio list
|
|
|
+ *
|
|
|
+ * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
|
|
|
+ * @src and @dst as linked lists of bios.
|
|
|
+ *
|
|
|
+ * Stops when it reaches the end of either @src or @dst - that is, copies
|
|
|
+ * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
|
|
|
+ */
|
|
|
+void bio_copy_data(struct bio *dst, struct bio *src)
|
|
|
+{
|
|
|
+ struct bio_vec *src_bv, *dst_bv;
|
|
|
+ unsigned src_offset, dst_offset, bytes;
|
|
|
+ void *src_p, *dst_p;
|
|
|
+
|
|
|
+ src_bv = bio_iovec(src);
|
|
|
+ dst_bv = bio_iovec(dst);
|
|
|
+
|
|
|
+ src_offset = src_bv->bv_offset;
|
|
|
+ dst_offset = dst_bv->bv_offset;
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
|
|
|
+ src_bv++;
|
|
|
+ if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
|
|
|
+ src = src->bi_next;
|
|
|
+ if (!src)
|
|
|
+ break;
|
|
|
+
|
|
|
+ src_bv = bio_iovec(src);
|
|
|
+ }
|
|
|
+
|
|
|
+ src_offset = src_bv->bv_offset;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
|
|
|
+ dst_bv++;
|
|
|
+ if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
|
|
|
+ dst = dst->bi_next;
|
|
|
+ if (!dst)
|
|
|
+ break;
|
|
|
+
|
|
|
+ dst_bv = bio_iovec(dst);
|
|
|
+ }
|
|
|
+
|
|
|
+ dst_offset = dst_bv->bv_offset;
|
|
|
+ }
|
|
|
+
|
|
|
+ bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
|
|
|
+ src_bv->bv_offset + src_bv->bv_len - src_offset);
|
|
|
+
|
|
|
+ src_p = kmap_atomic(src_bv->bv_page);
|
|
|
+ dst_p = kmap_atomic(dst_bv->bv_page);
|
|
|
+
|
|
|
+ memcpy(dst_p + dst_bv->bv_offset,
|
|
|
+ src_p + src_bv->bv_offset,
|
|
|
+ bytes);
|
|
|
+
|
|
|
+ kunmap_atomic(dst_p);
|
|
|
+ kunmap_atomic(src_p);
|
|
|
+
|
|
|
+ src_offset += bytes;
|
|
|
+ dst_offset += bytes;
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(bio_copy_data);
|
|
|
+
|
|
|
struct bio_map_data {
|
|
|
struct bio_vec *iovecs;
|
|
|
struct sg_iovec *sgvecs;
|
|
@@ -714,7 +991,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
|
|
|
int iov_idx = 0;
|
|
|
unsigned int iov_off = 0;
|
|
|
|
|
|
- __bio_for_each_segment(bvec, bio, i, 0) {
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
char *bv_addr = page_address(bvec->bv_page);
|
|
|
unsigned int bv_len = iovecs[i].bv_len;
|
|
|
|
|
@@ -896,7 +1173,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
|
|
return bio;
|
|
|
cleanup:
|
|
|
if (!map_data)
|
|
|
- bio_for_each_segment(bvec, bio, i)
|
|
|
+ bio_for_each_segment_all(bvec, bio, i)
|
|
|
__free_page(bvec->bv_page);
|
|
|
|
|
|
bio_put(bio);
|
|
@@ -1110,7 +1387,7 @@ static void __bio_unmap_user(struct bio *bio)
|
|
|
/*
|
|
|
* make sure we dirty pages we wrote to
|
|
|
*/
|
|
|
- __bio_for_each_segment(bvec, bio, i, 0) {
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
if (bio_data_dir(bio) == READ)
|
|
|
set_page_dirty_lock(bvec->bv_page);
|
|
|
|
|
@@ -1216,7 +1493,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
|
|
|
int i;
|
|
|
char *p = bmd->sgvecs[0].iov_base;
|
|
|
|
|
|
- __bio_for_each_segment(bvec, bio, i, 0) {
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
char *addr = page_address(bvec->bv_page);
|
|
|
int len = bmd->iovecs[i].bv_len;
|
|
|
|
|
@@ -1256,7 +1533,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
|
|
if (!reading) {
|
|
|
void *p = data;
|
|
|
|
|
|
- bio_for_each_segment(bvec, bio, i) {
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
char *addr = page_address(bvec->bv_page);
|
|
|
|
|
|
memcpy(addr, p, bvec->bv_len);
|
|
@@ -1301,11 +1578,11 @@ EXPORT_SYMBOL(bio_copy_kern);
|
|
|
*/
|
|
|
void bio_set_pages_dirty(struct bio *bio)
|
|
|
{
|
|
|
- struct bio_vec *bvec = bio->bi_io_vec;
|
|
|
+ struct bio_vec *bvec;
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < bio->bi_vcnt; i++) {
|
|
|
- struct page *page = bvec[i].bv_page;
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
+ struct page *page = bvec->bv_page;
|
|
|
|
|
|
if (page && !PageCompound(page))
|
|
|
set_page_dirty_lock(page);
|
|
@@ -1314,11 +1591,11 @@ void bio_set_pages_dirty(struct bio *bio)
|
|
|
|
|
|
static void bio_release_pages(struct bio *bio)
|
|
|
{
|
|
|
- struct bio_vec *bvec = bio->bi_io_vec;
|
|
|
+ struct bio_vec *bvec;
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < bio->bi_vcnt; i++) {
|
|
|
- struct page *page = bvec[i].bv_page;
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
+ struct page *page = bvec->bv_page;
|
|
|
|
|
|
if (page)
|
|
|
put_page(page);
|
|
@@ -1367,16 +1644,16 @@ static void bio_dirty_fn(struct work_struct *work)
|
|
|
|
|
|
void bio_check_pages_dirty(struct bio *bio)
|
|
|
{
|
|
|
- struct bio_vec *bvec = bio->bi_io_vec;
|
|
|
+ struct bio_vec *bvec;
|
|
|
int nr_clean_pages = 0;
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < bio->bi_vcnt; i++) {
|
|
|
- struct page *page = bvec[i].bv_page;
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
+ struct page *page = bvec->bv_page;
|
|
|
|
|
|
if (PageDirty(page) || PageCompound(page)) {
|
|
|
page_cache_release(page);
|
|
|
- bvec[i].bv_page = NULL;
|
|
|
+ bvec->bv_page = NULL;
|
|
|
} else {
|
|
|
nr_clean_pages++;
|
|
|
}
|
|
@@ -1479,8 +1756,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
|
|
|
trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
|
|
|
bi->bi_sector + first_sectors);
|
|
|
|
|
|
- BUG_ON(bi->bi_vcnt != 1 && bi->bi_vcnt != 0);
|
|
|
- BUG_ON(bi->bi_idx != 0);
|
|
|
+ BUG_ON(bio_segments(bi) > 1);
|
|
|
atomic_set(&bp->cnt, 3);
|
|
|
bp->error = 0;
|
|
|
bp->bio1 = *bi;
|
|
@@ -1490,8 +1766,8 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
|
|
|
bp->bio1.bi_size = first_sectors << 9;
|
|
|
|
|
|
if (bi->bi_vcnt != 0) {
|
|
|
- bp->bv1 = bi->bi_io_vec[0];
|
|
|
- bp->bv2 = bi->bi_io_vec[0];
|
|
|
+ bp->bv1 = *bio_iovec(bi);
|
|
|
+ bp->bv2 = *bio_iovec(bi);
|
|
|
|
|
|
if (bio_is_rw(bi)) {
|
|
|
bp->bv2.bv_offset += first_sectors << 9;
|
|
@@ -1543,7 +1819,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
|
|
|
if (index >= bio->bi_idx)
|
|
|
index = bio->bi_vcnt - 1;
|
|
|
|
|
|
- __bio_for_each_segment(bv, bio, i, 0) {
|
|
|
+ bio_for_each_segment_all(bv, bio, i) {
|
|
|
if (i == index) {
|
|
|
if (offset > bv->bv_offset)
|
|
|
sectors += (offset - bv->bv_offset) / sector_sz;
|
|
@@ -1561,29 +1837,25 @@ EXPORT_SYMBOL(bio_sector_offset);
|
|
|
* create memory pools for biovec's in a bio_set.
|
|
|
* use the global biovec slabs created for general use.
|
|
|
*/
|
|
|
-static int biovec_create_pools(struct bio_set *bs, int pool_entries)
|
|
|
+mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries)
|
|
|
{
|
|
|
struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
|
|
|
|
|
|
- bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
|
|
|
- if (!bs->bvec_pool)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void biovec_free_pools(struct bio_set *bs)
|
|
|
-{
|
|
|
- mempool_destroy(bs->bvec_pool);
|
|
|
+ return mempool_create_slab_pool(pool_entries, bp->slab);
|
|
|
}
|
|
|
|
|
|
void bioset_free(struct bio_set *bs)
|
|
|
{
|
|
|
+ if (bs->rescue_workqueue)
|
|
|
+ destroy_workqueue(bs->rescue_workqueue);
|
|
|
+
|
|
|
if (bs->bio_pool)
|
|
|
mempool_destroy(bs->bio_pool);
|
|
|
|
|
|
+ if (bs->bvec_pool)
|
|
|
+ mempool_destroy(bs->bvec_pool);
|
|
|
+
|
|
|
bioset_integrity_free(bs);
|
|
|
- biovec_free_pools(bs);
|
|
|
bio_put_slab(bs);
|
|
|
|
|
|
kfree(bs);
|
|
@@ -1614,6 +1886,10 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
|
|
|
|
|
|
bs->front_pad = front_pad;
|
|
|
|
|
|
+ spin_lock_init(&bs->rescue_lock);
|
|
|
+ bio_list_init(&bs->rescue_list);
|
|
|
+ INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
|
|
|
+
|
|
|
bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
|
|
|
if (!bs->bio_slab) {
|
|
|
kfree(bs);
|
|
@@ -1624,9 +1900,15 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
|
|
|
if (!bs->bio_pool)
|
|
|
goto bad;
|
|
|
|
|
|
- if (!biovec_create_pools(bs, pool_size))
|
|
|
- return bs;
|
|
|
+ bs->bvec_pool = biovec_create_pool(bs, pool_size);
|
|
|
+ if (!bs->bvec_pool)
|
|
|
+ goto bad;
|
|
|
+
|
|
|
+ bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
|
|
|
+ if (!bs->rescue_workqueue)
|
|
|
+ goto bad;
|
|
|
|
|
|
+ return bs;
|
|
|
bad:
|
|
|
bioset_free(bs);
|
|
|
return NULL;
|