|
@@ -42,21 +42,21 @@ struct convert_context {
|
|
|
unsigned int offset_out;
|
|
|
unsigned int idx_in;
|
|
|
unsigned int idx_out;
|
|
|
- sector_t sector;
|
|
|
- atomic_t pending;
|
|
|
+ sector_t cc_sector;
|
|
|
+ atomic_t cc_pending;
|
|
|
};
|
|
|
|
|
|
/*
|
|
|
* per bio private data
|
|
|
*/
|
|
|
struct dm_crypt_io {
|
|
|
- struct dm_target *target;
|
|
|
+ struct crypt_config *cc;
|
|
|
struct bio *base_bio;
|
|
|
struct work_struct work;
|
|
|
|
|
|
struct convert_context ctx;
|
|
|
|
|
|
- atomic_t pending;
|
|
|
+ atomic_t io_pending;
|
|
|
int error;
|
|
|
sector_t sector;
|
|
|
struct dm_crypt_io *base_io;
|
|
@@ -109,9 +109,6 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
|
|
|
*/
|
|
|
struct crypt_cpu {
|
|
|
struct ablkcipher_request *req;
|
|
|
- /* ESSIV: struct crypto_cipher *essiv_tfm */
|
|
|
- void *iv_private;
|
|
|
- struct crypto_ablkcipher *tfms[0];
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -151,6 +148,10 @@ struct crypt_config {
|
|
|
* per_cpu_ptr() only.
|
|
|
*/
|
|
|
struct crypt_cpu __percpu *cpu;
|
|
|
+
|
|
|
+ /* ESSIV: struct crypto_cipher *essiv_tfm */
|
|
|
+ void *iv_private;
|
|
|
+ struct crypto_ablkcipher **tfms;
|
|
|
unsigned tfms_count;
|
|
|
|
|
|
/*
|
|
@@ -193,7 +194,7 @@ static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
|
|
|
*/
|
|
|
static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
|
|
|
{
|
|
|
- return __this_cpu_ptr(cc->cpu)->tfms[0];
|
|
|
+ return cc->tfms[0];
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -258,7 +259,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
|
|
|
struct hash_desc desc;
|
|
|
struct scatterlist sg;
|
|
|
struct crypto_cipher *essiv_tfm;
|
|
|
- int err, cpu;
|
|
|
+ int err;
|
|
|
|
|
|
sg_init_one(&sg, cc->key, cc->key_size);
|
|
|
desc.tfm = essiv->hash_tfm;
|
|
@@ -268,14 +269,12 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
|
|
|
+ essiv_tfm = cc->iv_private;
|
|
|
|
|
|
- err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
|
|
|
- crypto_hash_digestsize(essiv->hash_tfm));
|
|
|
- if (err)
|
|
|
- return err;
|
|
|
- }
|
|
|
+ err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
|
|
|
+ crypto_hash_digestsize(essiv->hash_tfm));
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -286,16 +285,14 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
|
|
|
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
|
|
|
unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
|
|
|
struct crypto_cipher *essiv_tfm;
|
|
|
- int cpu, r, err = 0;
|
|
|
+ int r, err = 0;
|
|
|
|
|
|
memset(essiv->salt, 0, salt_size);
|
|
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
|
|
|
- r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
|
|
|
- if (r)
|
|
|
- err = r;
|
|
|
- }
|
|
|
+ essiv_tfm = cc->iv_private;
|
|
|
+ r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
|
|
|
+ if (r)
|
|
|
+ err = r;
|
|
|
|
|
|
return err;
|
|
|
}
|
|
@@ -335,8 +332,6 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
|
|
|
|
|
|
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
|
|
|
{
|
|
|
- int cpu;
|
|
|
- struct crypt_cpu *cpu_cc;
|
|
|
struct crypto_cipher *essiv_tfm;
|
|
|
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
|
|
|
|
|
@@ -346,15 +341,12 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
|
|
|
kzfree(essiv->salt);
|
|
|
essiv->salt = NULL;
|
|
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
|
|
- essiv_tfm = cpu_cc->iv_private;
|
|
|
+ essiv_tfm = cc->iv_private;
|
|
|
|
|
|
- if (essiv_tfm)
|
|
|
- crypto_free_cipher(essiv_tfm);
|
|
|
+ if (essiv_tfm)
|
|
|
+ crypto_free_cipher(essiv_tfm);
|
|
|
|
|
|
- cpu_cc->iv_private = NULL;
|
|
|
- }
|
|
|
+ cc->iv_private = NULL;
|
|
|
}
|
|
|
|
|
|
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|
@@ -363,7 +355,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|
|
struct crypto_cipher *essiv_tfm = NULL;
|
|
|
struct crypto_hash *hash_tfm = NULL;
|
|
|
u8 *salt = NULL;
|
|
|
- int err, cpu;
|
|
|
+ int err;
|
|
|
|
|
|
if (!opts) {
|
|
|
ti->error = "Digest algorithm missing for ESSIV mode";
|
|
@@ -388,15 +380,13 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
|
|
|
cc->iv_gen_private.essiv.salt = salt;
|
|
|
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
|
|
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- essiv_tfm = setup_essiv_cpu(cc, ti, salt,
|
|
|
- crypto_hash_digestsize(hash_tfm));
|
|
|
- if (IS_ERR(essiv_tfm)) {
|
|
|
- crypt_iv_essiv_dtr(cc);
|
|
|
- return PTR_ERR(essiv_tfm);
|
|
|
- }
|
|
|
- per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
|
|
|
+ essiv_tfm = setup_essiv_cpu(cc, ti, salt,
|
|
|
+ crypto_hash_digestsize(hash_tfm));
|
|
|
+ if (IS_ERR(essiv_tfm)) {
|
|
|
+ crypt_iv_essiv_dtr(cc);
|
|
|
+ return PTR_ERR(essiv_tfm);
|
|
|
}
|
|
|
+ cc->iv_private = essiv_tfm;
|
|
|
|
|
|
return 0;
|
|
|
|
|
@@ -410,7 +400,7 @@ bad:
|
|
|
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
|
|
|
struct dm_crypt_request *dmreq)
|
|
|
{
|
|
|
- struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
|
|
|
+ struct crypto_cipher *essiv_tfm = cc->iv_private;
|
|
|
|
|
|
memset(iv, 0, cc->iv_size);
|
|
|
*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
|
|
@@ -664,7 +654,7 @@ static void crypt_convert_init(struct crypt_config *cc,
|
|
|
ctx->offset_out = 0;
|
|
|
ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
|
|
|
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
|
|
|
- ctx->sector = sector + cc->iv_offset;
|
|
|
+ ctx->cc_sector = sector + cc->iv_offset;
|
|
|
init_completion(&ctx->restart);
|
|
|
}
|
|
|
|
|
@@ -695,12 +685,12 @@ static int crypt_convert_block(struct crypt_config *cc,
|
|
|
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
|
|
|
struct dm_crypt_request *dmreq;
|
|
|
u8 *iv;
|
|
|
- int r = 0;
|
|
|
+ int r;
|
|
|
|
|
|
dmreq = dmreq_of_req(cc, req);
|
|
|
iv = iv_of_dmreq(cc, dmreq);
|
|
|
|
|
|
- dmreq->iv_sector = ctx->sector;
|
|
|
+ dmreq->iv_sector = ctx->cc_sector;
|
|
|
dmreq->ctx = ctx;
|
|
|
sg_init_table(&dmreq->sg_in, 1);
|
|
|
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
|
|
@@ -749,12 +739,12 @@ static void crypt_alloc_req(struct crypt_config *cc,
|
|
|
struct convert_context *ctx)
|
|
|
{
|
|
|
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
|
|
- unsigned key_index = ctx->sector & (cc->tfms_count - 1);
|
|
|
+ unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
|
|
|
|
|
|
if (!this_cc->req)
|
|
|
this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
|
|
|
|
|
- ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
|
|
|
+ ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
|
|
|
ablkcipher_request_set_callback(this_cc->req,
|
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
|
kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
|
|
@@ -769,14 +759,14 @@ static int crypt_convert(struct crypt_config *cc,
|
|
|
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
|
|
int r;
|
|
|
|
|
|
- atomic_set(&ctx->pending, 1);
|
|
|
+ atomic_set(&ctx->cc_pending, 1);
|
|
|
|
|
|
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
|
|
|
ctx->idx_out < ctx->bio_out->bi_vcnt) {
|
|
|
|
|
|
crypt_alloc_req(cc, ctx);
|
|
|
|
|
|
- atomic_inc(&ctx->pending);
|
|
|
+ atomic_inc(&ctx->cc_pending);
|
|
|
|
|
|
r = crypt_convert_block(cc, ctx, this_cc->req);
|
|
|
|
|
@@ -788,19 +778,19 @@ static int crypt_convert(struct crypt_config *cc,
|
|
|
/* fall through*/
|
|
|
case -EINPROGRESS:
|
|
|
this_cc->req = NULL;
|
|
|
- ctx->sector++;
|
|
|
+ ctx->cc_sector++;
|
|
|
continue;
|
|
|
|
|
|
/* sync */
|
|
|
case 0:
|
|
|
- atomic_dec(&ctx->pending);
|
|
|
- ctx->sector++;
|
|
|
+ atomic_dec(&ctx->cc_pending);
|
|
|
+ ctx->cc_sector++;
|
|
|
cond_resched();
|
|
|
continue;
|
|
|
|
|
|
/* error */
|
|
|
default:
|
|
|
- atomic_dec(&ctx->pending);
|
|
|
+ atomic_dec(&ctx->cc_pending);
|
|
|
return r;
|
|
|
}
|
|
|
}
|
|
@@ -811,7 +801,7 @@ static int crypt_convert(struct crypt_config *cc,
|
|
|
static void dm_crypt_bio_destructor(struct bio *bio)
|
|
|
{
|
|
|
struct dm_crypt_io *io = bio->bi_private;
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
|
|
|
bio_free(bio, cc->bs);
|
|
|
}
|
|
@@ -825,7 +815,7 @@ static void dm_crypt_bio_destructor(struct bio *bio)
|
|
|
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
|
|
|
unsigned *out_of_pages)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
struct bio *clone;
|
|
|
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
|
|
@@ -884,26 +874,25 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
|
|
|
+static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
|
|
|
struct bio *bio, sector_t sector)
|
|
|
{
|
|
|
- struct crypt_config *cc = ti->private;
|
|
|
struct dm_crypt_io *io;
|
|
|
|
|
|
io = mempool_alloc(cc->io_pool, GFP_NOIO);
|
|
|
- io->target = ti;
|
|
|
+ io->cc = cc;
|
|
|
io->base_bio = bio;
|
|
|
io->sector = sector;
|
|
|
io->error = 0;
|
|
|
io->base_io = NULL;
|
|
|
- atomic_set(&io->pending, 0);
|
|
|
+ atomic_set(&io->io_pending, 0);
|
|
|
|
|
|
return io;
|
|
|
}
|
|
|
|
|
|
static void crypt_inc_pending(struct dm_crypt_io *io)
|
|
|
{
|
|
|
- atomic_inc(&io->pending);
|
|
|
+ atomic_inc(&io->io_pending);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -913,12 +902,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
|
|
|
*/
|
|
|
static void crypt_dec_pending(struct dm_crypt_io *io)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
struct bio *base_bio = io->base_bio;
|
|
|
struct dm_crypt_io *base_io = io->base_io;
|
|
|
int error = io->error;
|
|
|
|
|
|
- if (!atomic_dec_and_test(&io->pending))
|
|
|
+ if (!atomic_dec_and_test(&io->io_pending))
|
|
|
return;
|
|
|
|
|
|
mempool_free(io, cc->io_pool);
|
|
@@ -952,7 +941,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
|
|
|
static void crypt_endio(struct bio *clone, int error)
|
|
|
{
|
|
|
struct dm_crypt_io *io = clone->bi_private;
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
unsigned rw = bio_data_dir(clone);
|
|
|
|
|
|
if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
|
|
@@ -979,7 +968,7 @@ static void crypt_endio(struct bio *clone, int error)
|
|
|
|
|
|
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
|
|
|
clone->bi_private = io;
|
|
|
clone->bi_end_io = crypt_endio;
|
|
@@ -990,7 +979,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
|
|
|
|
|
|
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
struct bio *base_bio = io->base_bio;
|
|
|
struct bio *clone;
|
|
|
|
|
@@ -1038,7 +1027,7 @@ static void kcryptd_io(struct work_struct *work)
|
|
|
|
|
|
static void kcryptd_queue_io(struct dm_crypt_io *io)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
|
|
|
INIT_WORK(&io->work, kcryptd_io);
|
|
|
queue_work(cc->io_queue, &io->work);
|
|
@@ -1047,7 +1036,7 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
|
|
|
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
|
|
{
|
|
|
struct bio *clone = io->ctx.bio_out;
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
|
|
|
if (unlikely(io->error < 0)) {
|
|
|
crypt_free_buffer_pages(cc, clone);
|
|
@@ -1069,7 +1058,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
|
|
|
|
|
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
struct bio *clone;
|
|
|
struct dm_crypt_io *new_io;
|
|
|
int crypt_finished;
|
|
@@ -1107,7 +1096,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
|
|
if (r < 0)
|
|
|
io->error = -EIO;
|
|
|
|
|
|
- crypt_finished = atomic_dec_and_test(&io->ctx.pending);
|
|
|
+ crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
|
|
|
|
|
|
/* Encryption was already finished, submit io now */
|
|
|
if (crypt_finished) {
|
|
@@ -1135,7 +1124,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
|
|
* between fragments, so switch to a new dm_crypt_io structure.
|
|
|
*/
|
|
|
if (unlikely(!crypt_finished && remaining)) {
|
|
|
- new_io = crypt_io_alloc(io->target, io->base_bio,
|
|
|
+ new_io = crypt_io_alloc(io->cc, io->base_bio,
|
|
|
sector);
|
|
|
crypt_inc_pending(new_io);
|
|
|
crypt_convert_init(cc, &new_io->ctx, NULL,
|
|
@@ -1169,7 +1158,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
|
|
|
|
|
|
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
int r = 0;
|
|
|
|
|
|
crypt_inc_pending(io);
|
|
@@ -1181,7 +1170,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
|
|
|
if (r < 0)
|
|
|
io->error = -EIO;
|
|
|
|
|
|
- if (atomic_dec_and_test(&io->ctx.pending))
|
|
|
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
|
|
|
kcryptd_crypt_read_done(io);
|
|
|
|
|
|
crypt_dec_pending(io);
|
|
@@ -1193,7 +1182,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
|
|
struct dm_crypt_request *dmreq = async_req->data;
|
|
|
struct convert_context *ctx = dmreq->ctx;
|
|
|
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
|
|
|
if (error == -EINPROGRESS) {
|
|
|
complete(&ctx->restart);
|
|
@@ -1208,7 +1197,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
|
|
|
|
|
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
|
|
|
|
|
|
- if (!atomic_dec_and_test(&ctx->pending))
|
|
|
+ if (!atomic_dec_and_test(&ctx->cc_pending))
|
|
|
return;
|
|
|
|
|
|
if (bio_data_dir(io->base_bio) == READ)
|
|
@@ -1229,7 +1218,7 @@ static void kcryptd_crypt(struct work_struct *work)
|
|
|
|
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
|
|
{
|
|
|
- struct crypt_config *cc = io->target->private;
|
|
|
+ struct crypt_config *cc = io->cc;
|
|
|
|
|
|
INIT_WORK(&io->work, kcryptd_crypt);
|
|
|
queue_work(cc->crypt_queue, &io->work);
|
|
@@ -1241,7 +1230,6 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
|
|
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
|
|
|
{
|
|
|
char buffer[3];
|
|
|
- char *endp;
|
|
|
unsigned int i;
|
|
|
|
|
|
buffer[2] = '\0';
|
|
@@ -1250,9 +1238,7 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
|
|
|
buffer[0] = *hex++;
|
|
|
buffer[1] = *hex++;
|
|
|
|
|
|
- key[i] = (u8)simple_strtoul(buffer, &endp, 16);
|
|
|
-
|
|
|
- if (endp != &buffer[2])
|
|
|
+ if (kstrtou8(buffer, 16, &key[i]))
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -1276,29 +1262,38 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void crypt_free_tfms(struct crypt_config *cc, int cpu)
|
|
|
+static void crypt_free_tfms(struct crypt_config *cc)
|
|
|
{
|
|
|
- struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
|
|
unsigned i;
|
|
|
|
|
|
+ if (!cc->tfms)
|
|
|
+ return;
|
|
|
+
|
|
|
for (i = 0; i < cc->tfms_count; i++)
|
|
|
- if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
|
|
|
- crypto_free_ablkcipher(cpu_cc->tfms[i]);
|
|
|
- cpu_cc->tfms[i] = NULL;
|
|
|
+ if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
|
|
|
+ crypto_free_ablkcipher(cc->tfms[i]);
|
|
|
+ cc->tfms[i] = NULL;
|
|
|
}
|
|
|
+
|
|
|
+ kfree(cc->tfms);
|
|
|
+ cc->tfms = NULL;
|
|
|
}
|
|
|
|
|
|
-static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
|
|
|
+static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
|
|
|
{
|
|
|
- struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
|
|
unsigned i;
|
|
|
int err;
|
|
|
|
|
|
+ cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!cc->tfms)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
for (i = 0; i < cc->tfms_count; i++) {
|
|
|
- cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
|
|
|
- if (IS_ERR(cpu_cc->tfms[i])) {
|
|
|
- err = PTR_ERR(cpu_cc->tfms[i]);
|
|
|
- crypt_free_tfms(cc, cpu);
|
|
|
+ cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
|
|
|
+ if (IS_ERR(cc->tfms[i])) {
|
|
|
+ err = PTR_ERR(cc->tfms[i]);
|
|
|
+ crypt_free_tfms(cc);
|
|
|
return err;
|
|
|
}
|
|
|
}
|
|
@@ -1309,15 +1304,14 @@ static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
|
|
|
static int crypt_setkey_allcpus(struct crypt_config *cc)
|
|
|
{
|
|
|
unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
|
|
|
- int cpu, err = 0, i, r;
|
|
|
-
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- for (i = 0; i < cc->tfms_count; i++) {
|
|
|
- r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
|
|
|
- cc->key + (i * subkey_size), subkey_size);
|
|
|
- if (r)
|
|
|
- err = r;
|
|
|
- }
|
|
|
+ int err = 0, i, r;
|
|
|
+
|
|
|
+ for (i = 0; i < cc->tfms_count; i++) {
|
|
|
+ r = crypto_ablkcipher_setkey(cc->tfms[i],
|
|
|
+ cc->key + (i * subkey_size),
|
|
|
+ subkey_size);
|
|
|
+ if (r)
|
|
|
+ err = r;
|
|
|
}
|
|
|
|
|
|
return err;
|
|
@@ -1379,9 +1373,10 @@ static void crypt_dtr(struct dm_target *ti)
|
|
|
cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
|
|
if (cpu_cc->req)
|
|
|
mempool_free(cpu_cc->req, cc->req_pool);
|
|
|
- crypt_free_tfms(cc, cpu);
|
|
|
}
|
|
|
|
|
|
+ crypt_free_tfms(cc);
|
|
|
+
|
|
|
if (cc->bs)
|
|
|
bioset_free(cc->bs);
|
|
|
|
|
@@ -1414,7 +1409,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
|
|
|
struct crypt_config *cc = ti->private;
|
|
|
char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
|
|
|
char *cipher_api = NULL;
|
|
|
- int cpu, ret = -EINVAL;
|
|
|
+ int ret = -EINVAL;
|
|
|
char dummy;
|
|
|
|
|
|
/* Convert to crypto api definition? */
|
|
@@ -1455,8 +1450,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
|
|
|
if (tmp)
|
|
|
DMWARN("Ignoring unexpected additional cipher options");
|
|
|
|
|
|
- cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
|
|
|
- cc->tfms_count * sizeof(*(cc->cpu->tfms)),
|
|
|
+ cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
|
|
|
__alignof__(struct crypt_cpu));
|
|
|
if (!cc->cpu) {
|
|
|
ti->error = "Cannot allocate per cpu state";
|
|
@@ -1489,12 +1483,10 @@ static int crypt_ctr_cipher(struct dm_target *ti,
|
|
|
}
|
|
|
|
|
|
/* Allocate cipher */
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- ret = crypt_alloc_tfms(cc, cpu, cipher_api);
|
|
|
- if (ret < 0) {
|
|
|
- ti->error = "Error allocating crypto tfm";
|
|
|
- goto bad;
|
|
|
- }
|
|
|
+ ret = crypt_alloc_tfms(cc, cipher_api);
|
|
|
+ if (ret < 0) {
|
|
|
+ ti->error = "Error allocating crypto tfm";
|
|
|
+ goto bad;
|
|
|
}
|
|
|
|
|
|
/* Initialize and set key */
|
|
@@ -1702,7 +1694,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
|
}
|
|
|
|
|
|
ti->num_flush_requests = 1;
|
|
|
- ti->discard_zeroes_data_unsupported = 1;
|
|
|
+ ti->discard_zeroes_data_unsupported = true;
|
|
|
|
|
|
return 0;
|
|
|
|
|
@@ -1715,7 +1707,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
|
|
|
union map_info *map_context)
|
|
|
{
|
|
|
struct dm_crypt_io *io;
|
|
|
- struct crypt_config *cc;
|
|
|
+ struct crypt_config *cc = ti->private;
|
|
|
|
|
|
/*
|
|
|
* If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
|
|
@@ -1723,14 +1715,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
|
|
|
* - for REQ_DISCARD caller must use flush if IO ordering matters
|
|
|
*/
|
|
|
if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
|
|
|
- cc = ti->private;
|
|
|
bio->bi_bdev = cc->dev->bdev;
|
|
|
if (bio_sectors(bio))
|
|
|
bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
|
|
|
return DM_MAPIO_REMAPPED;
|
|
|
}
|
|
|
|
|
|
- io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
|
|
|
+ io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
|
|
|
|
|
|
if (bio_data_dir(io->base_bio) == READ) {
|
|
|
if (kcryptd_io_read(io, GFP_NOWAIT))
|
|
@@ -1742,7 +1733,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
|
|
|
}
|
|
|
|
|
|
static int crypt_status(struct dm_target *ti, status_type_t type,
|
|
|
- char *result, unsigned int maxlen)
|
|
|
+ unsigned status_flags, char *result, unsigned maxlen)
|
|
|
{
|
|
|
struct crypt_config *cc = ti->private;
|
|
|
unsigned int sz = 0;
|