dm-crypt.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. /*
  2. * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
  3. * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
  4. * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/err.h>
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/bio.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/mempool.h>
  15. #include <linux/slab.h>
  16. #include <linux/crypto.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/backing-dev.h>
  19. #include <asm/atomic.h>
  20. #include <linux/scatterlist.h>
  21. #include <asm/page.h>
  22. #include <asm/unaligned.h>
  23. #include "dm.h"
  24. #define DM_MSG_PREFIX "crypt"
  25. #define MESG_STR(x) x, sizeof(x)
  26. /*
  27. * context holding the current state of a multi-part conversion
  28. */
  29. struct convert_context {
  30. struct bio *bio_in;
  31. struct bio *bio_out;
  32. unsigned int offset_in;
  33. unsigned int offset_out;
  34. unsigned int idx_in;
  35. unsigned int idx_out;
  36. sector_t sector;
  37. };
  38. /*
  39. * per bio private data
  40. */
  41. struct dm_crypt_io {
  42. struct dm_target *target;
  43. struct bio *base_bio;
  44. struct work_struct work;
  45. struct convert_context ctx;
  46. atomic_t pending;
  47. int error;
  48. };
  49. struct crypt_config;
  50. struct crypt_iv_operations {
  51. int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
  52. const char *opts);
  53. void (*dtr)(struct crypt_config *cc);
  54. const char *(*status)(struct crypt_config *cc);
  55. int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
  56. };
  57. /*
  58. * Crypt: maps a linear range of a block device
  59. * and encrypts / decrypts at the same time.
  60. */
  61. enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
  62. struct crypt_config {
  63. struct dm_dev *dev;
  64. sector_t start;
  65. /*
  66. * pool for per bio private data and
  67. * for encryption buffer pages
  68. */
  69. mempool_t *io_pool;
  70. mempool_t *page_pool;
  71. struct bio_set *bs;
  72. struct workqueue_struct *io_queue;
  73. struct workqueue_struct *crypt_queue;
  74. /*
  75. * crypto related data
  76. */
  77. struct crypt_iv_operations *iv_gen_ops;
  78. char *iv_mode;
  79. union {
  80. struct crypto_cipher *essiv_tfm;
  81. int benbi_shift;
  82. } iv_gen_private;
  83. sector_t iv_offset;
  84. unsigned int iv_size;
  85. char cipher[CRYPTO_MAX_ALG_NAME];
  86. char chainmode[CRYPTO_MAX_ALG_NAME];
  87. struct crypto_blkcipher *tfm;
  88. unsigned long flags;
  89. unsigned int key_size;
  90. u8 key[0];
  91. };
  92. #define MIN_IOS 16
  93. #define MIN_POOL_PAGES 32
  94. #define MIN_BIO_PAGES 8
  95. static struct kmem_cache *_crypt_io_pool;
  96. static void clone_init(struct dm_crypt_io *, struct bio *);
  97. /*
  98. * Different IV generation algorithms:
  99. *
  100. * plain: the initial vector is the 32-bit little-endian version of the sector
  101. * number, padded with zeros if necessary.
  102. *
  103. * essiv: "encrypted sector|salt initial vector", the sector number is
  104. * encrypted with the bulk cipher using a salt as key. The salt
  105. * should be derived from the bulk cipher's key via hashing.
  106. *
  107. * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
  108. * (needed for LRW-32-AES and possible other narrow block modes)
  109. *
  110. * null: the initial vector is always zero. Provides compatibility with
  111. * obsolete loop_fish2 devices. Do not use for new devices.
  112. *
  113. * plumb: unimplemented, see:
  114. * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  115. */
  116. static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  117. {
  118. memset(iv, 0, cc->iv_size);
  119. *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
  120. return 0;
  121. }
  122. static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
  123. const char *opts)
  124. {
  125. struct crypto_cipher *essiv_tfm;
  126. struct crypto_hash *hash_tfm;
  127. struct hash_desc desc;
  128. struct scatterlist sg;
  129. unsigned int saltsize;
  130. u8 *salt;
  131. int err;
  132. if (opts == NULL) {
  133. ti->error = "Digest algorithm missing for ESSIV mode";
  134. return -EINVAL;
  135. }
  136. /* Hash the cipher key with the given hash algorithm */
  137. hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
  138. if (IS_ERR(hash_tfm)) {
  139. ti->error = "Error initializing ESSIV hash";
  140. return PTR_ERR(hash_tfm);
  141. }
  142. saltsize = crypto_hash_digestsize(hash_tfm);
  143. salt = kmalloc(saltsize, GFP_KERNEL);
  144. if (salt == NULL) {
  145. ti->error = "Error kmallocing salt storage in ESSIV";
  146. crypto_free_hash(hash_tfm);
  147. return -ENOMEM;
  148. }
  149. sg_init_one(&sg, cc->key, cc->key_size);
  150. desc.tfm = hash_tfm;
  151. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  152. err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
  153. crypto_free_hash(hash_tfm);
  154. if (err) {
  155. ti->error = "Error calculating hash in ESSIV";
  156. kfree(salt);
  157. return err;
  158. }
  159. /* Setup the essiv_tfm with the given salt */
  160. essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
  161. if (IS_ERR(essiv_tfm)) {
  162. ti->error = "Error allocating crypto tfm for ESSIV";
  163. kfree(salt);
  164. return PTR_ERR(essiv_tfm);
  165. }
  166. if (crypto_cipher_blocksize(essiv_tfm) !=
  167. crypto_blkcipher_ivsize(cc->tfm)) {
  168. ti->error = "Block size of ESSIV cipher does "
  169. "not match IV size of block cipher";
  170. crypto_free_cipher(essiv_tfm);
  171. kfree(salt);
  172. return -EINVAL;
  173. }
  174. err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
  175. if (err) {
  176. ti->error = "Failed to set key for ESSIV cipher";
  177. crypto_free_cipher(essiv_tfm);
  178. kfree(salt);
  179. return err;
  180. }
  181. kfree(salt);
  182. cc->iv_gen_private.essiv_tfm = essiv_tfm;
  183. return 0;
  184. }
  185. static void crypt_iv_essiv_dtr(struct crypt_config *cc)
  186. {
  187. crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
  188. cc->iv_gen_private.essiv_tfm = NULL;
  189. }
  190. static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  191. {
  192. memset(iv, 0, cc->iv_size);
  193. *(u64 *)iv = cpu_to_le64(sector);
  194. crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
  195. return 0;
  196. }
  197. static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
  198. const char *opts)
  199. {
  200. unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
  201. int log = ilog2(bs);
  202. /* we need to calculate how far we must shift the sector count
  203. * to get the cipher block count, we use this shift in _gen */
  204. if (1 << log != bs) {
  205. ti->error = "cypher blocksize is not a power of 2";
  206. return -EINVAL;
  207. }
  208. if (log > 9) {
  209. ti->error = "cypher blocksize is > 512";
  210. return -EINVAL;
  211. }
  212. cc->iv_gen_private.benbi_shift = 9 - log;
  213. return 0;
  214. }
  215. static void crypt_iv_benbi_dtr(struct crypt_config *cc)
  216. {
  217. }
  218. static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  219. {
  220. __be64 val;
  221. memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
  222. val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
  223. put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
  224. return 0;
  225. }
  226. static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  227. {
  228. memset(iv, 0, cc->iv_size);
  229. return 0;
  230. }
  231. static struct crypt_iv_operations crypt_iv_plain_ops = {
  232. .generator = crypt_iv_plain_gen
  233. };
  234. static struct crypt_iv_operations crypt_iv_essiv_ops = {
  235. .ctr = crypt_iv_essiv_ctr,
  236. .dtr = crypt_iv_essiv_dtr,
  237. .generator = crypt_iv_essiv_gen
  238. };
  239. static struct crypt_iv_operations crypt_iv_benbi_ops = {
  240. .ctr = crypt_iv_benbi_ctr,
  241. .dtr = crypt_iv_benbi_dtr,
  242. .generator = crypt_iv_benbi_gen
  243. };
  244. static struct crypt_iv_operations crypt_iv_null_ops = {
  245. .generator = crypt_iv_null_gen
  246. };
  247. static int
  248. crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
  249. struct scatterlist *in, unsigned int length,
  250. int write, sector_t sector)
  251. {
  252. u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
  253. struct blkcipher_desc desc = {
  254. .tfm = cc->tfm,
  255. .info = iv,
  256. .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
  257. };
  258. int r;
  259. if (cc->iv_gen_ops) {
  260. r = cc->iv_gen_ops->generator(cc, iv, sector);
  261. if (r < 0)
  262. return r;
  263. if (write)
  264. r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
  265. else
  266. r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
  267. } else {
  268. if (write)
  269. r = crypto_blkcipher_encrypt(&desc, out, in, length);
  270. else
  271. r = crypto_blkcipher_decrypt(&desc, out, in, length);
  272. }
  273. return r;
  274. }
  275. static void crypt_convert_init(struct crypt_config *cc,
  276. struct convert_context *ctx,
  277. struct bio *bio_out, struct bio *bio_in,
  278. sector_t sector)
  279. {
  280. ctx->bio_in = bio_in;
  281. ctx->bio_out = bio_out;
  282. ctx->offset_in = 0;
  283. ctx->offset_out = 0;
  284. ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
  285. ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
  286. ctx->sector = sector + cc->iv_offset;
  287. }
  288. /*
  289. * Encrypt / decrypt data from one bio to another one (can be the same one)
  290. */
  291. static int crypt_convert(struct crypt_config *cc,
  292. struct convert_context *ctx)
  293. {
  294. int r = 0;
  295. while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
  296. ctx->idx_out < ctx->bio_out->bi_vcnt) {
  297. struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
  298. struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
  299. struct scatterlist sg_in, sg_out;
  300. sg_init_table(&sg_in, 1);
  301. sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);
  302. sg_init_table(&sg_out, 1);
  303. sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);
  304. ctx->offset_in += sg_in.length;
  305. if (ctx->offset_in >= bv_in->bv_len) {
  306. ctx->offset_in = 0;
  307. ctx->idx_in++;
  308. }
  309. ctx->offset_out += sg_out.length;
  310. if (ctx->offset_out >= bv_out->bv_len) {
  311. ctx->offset_out = 0;
  312. ctx->idx_out++;
  313. }
  314. r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
  315. bio_data_dir(ctx->bio_in) == WRITE, ctx->sector);
  316. if (r < 0)
  317. break;
  318. ctx->sector++;
  319. }
  320. return r;
  321. }
  322. static void dm_crypt_bio_destructor(struct bio *bio)
  323. {
  324. struct dm_crypt_io *io = bio->bi_private;
  325. struct crypt_config *cc = io->target->private;
  326. bio_free(bio, cc->bs);
  327. }
  328. /*
  329. * Generate a new unfragmented bio with the given size
  330. * This should never violate the device limitations
  331. * May return a smaller bio when running out of pages
  332. */
  333. static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
  334. {
  335. struct crypt_config *cc = io->target->private;
  336. struct bio *clone;
  337. unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  338. gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
  339. unsigned i, len;
  340. struct page *page;
  341. clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
  342. if (!clone)
  343. return NULL;
  344. clone_init(io, clone);
  345. for (i = 0; i < nr_iovecs; i++) {
  346. page = mempool_alloc(cc->page_pool, gfp_mask);
  347. if (!page)
  348. break;
  349. /*
  350. * if additional pages cannot be allocated without waiting,
  351. * return a partially allocated bio, the caller will then try
  352. * to allocate additional bios while submitting this partial bio
  353. */
  354. if (i == (MIN_BIO_PAGES - 1))
  355. gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
  356. len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
  357. if (!bio_add_page(clone, page, len, 0)) {
  358. mempool_free(page, cc->page_pool);
  359. break;
  360. }
  361. size -= len;
  362. }
  363. if (!clone->bi_size) {
  364. bio_put(clone);
  365. return NULL;
  366. }
  367. return clone;
  368. }
  369. static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
  370. {
  371. unsigned int i;
  372. struct bio_vec *bv;
  373. for (i = 0; i < clone->bi_vcnt; i++) {
  374. bv = bio_iovec_idx(clone, i);
  375. BUG_ON(!bv->bv_page);
  376. mempool_free(bv->bv_page, cc->page_pool);
  377. bv->bv_page = NULL;
  378. }
  379. }
  380. /*
  381. * One of the bios was finished. Check for completion of
  382. * the whole request and correctly clean up the buffer.
  383. */
  384. static void crypt_dec_pending(struct dm_crypt_io *io, int error)
  385. {
  386. struct crypt_config *cc = (struct crypt_config *) io->target->private;
  387. if (error < 0)
  388. io->error = error;
  389. if (!atomic_dec_and_test(&io->pending))
  390. return;
  391. bio_endio(io->base_bio, io->error);
  392. mempool_free(io, cc->io_pool);
  393. }
  394. /*
  395. * kcryptd/kcryptd_io:
  396. *
  397. * Needed because it would be very unwise to do decryption in an
  398. * interrupt context.
  399. *
  400. * kcryptd performs the actual encryption or decryption.
  401. *
  402. * kcryptd_io performs the IO submission.
  403. *
  404. * They must be separated as otherwise the final stages could be
  405. * starved by new requests which can block in the first stages due
  406. * to memory allocation.
  407. */
  408. static void kcryptd_do_work(struct work_struct *work);
  409. static void kcryptd_do_crypt(struct work_struct *work);
  410. static void kcryptd_queue_io(struct dm_crypt_io *io)
  411. {
  412. struct crypt_config *cc = io->target->private;
  413. INIT_WORK(&io->work, kcryptd_do_work);
  414. queue_work(cc->io_queue, &io->work);
  415. }
  416. static void kcryptd_queue_crypt(struct dm_crypt_io *io)
  417. {
  418. struct crypt_config *cc = io->target->private;
  419. INIT_WORK(&io->work, kcryptd_do_crypt);
  420. queue_work(cc->crypt_queue, &io->work);
  421. }
  422. static void crypt_endio(struct bio *clone, int error)
  423. {
  424. struct dm_crypt_io *io = clone->bi_private;
  425. struct crypt_config *cc = io->target->private;
  426. unsigned read_io = bio_data_dir(clone) == READ;
  427. if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
  428. error = -EIO;
  429. /*
  430. * free the processed pages
  431. */
  432. if (!read_io) {
  433. crypt_free_buffer_pages(cc, clone);
  434. goto out;
  435. }
  436. if (unlikely(error))
  437. goto out;
  438. bio_put(clone);
  439. kcryptd_queue_crypt(io);
  440. return;
  441. out:
  442. bio_put(clone);
  443. crypt_dec_pending(io, error);
  444. }
  445. static void clone_init(struct dm_crypt_io *io, struct bio *clone)
  446. {
  447. struct crypt_config *cc = io->target->private;
  448. clone->bi_private = io;
  449. clone->bi_end_io = crypt_endio;
  450. clone->bi_bdev = cc->dev->bdev;
  451. clone->bi_rw = io->base_bio->bi_rw;
  452. clone->bi_destructor = dm_crypt_bio_destructor;
  453. }
  454. static void process_read(struct dm_crypt_io *io)
  455. {
  456. struct crypt_config *cc = io->target->private;
  457. struct bio *base_bio = io->base_bio;
  458. struct bio *clone;
  459. sector_t sector = base_bio->bi_sector - io->target->begin;
  460. atomic_inc(&io->pending);
  461. /*
  462. * The block layer might modify the bvec array, so always
  463. * copy the required bvecs because we need the original
  464. * one in order to decrypt the whole bio data *afterwards*.
  465. */
  466. clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
  467. if (unlikely(!clone)) {
  468. crypt_dec_pending(io, -ENOMEM);
  469. return;
  470. }
  471. clone_init(io, clone);
  472. clone->bi_idx = 0;
  473. clone->bi_vcnt = bio_segments(base_bio);
  474. clone->bi_size = base_bio->bi_size;
  475. clone->bi_sector = cc->start + sector;
  476. memcpy(clone->bi_io_vec, bio_iovec(base_bio),
  477. sizeof(struct bio_vec) * clone->bi_vcnt);
  478. generic_make_request(clone);
  479. }
  480. static void process_write(struct dm_crypt_io *io)
  481. {
  482. struct crypt_config *cc = io->target->private;
  483. struct bio *base_bio = io->base_bio;
  484. struct bio *clone;
  485. unsigned remaining = base_bio->bi_size;
  486. sector_t sector = base_bio->bi_sector - io->target->begin;
  487. atomic_inc(&io->pending);
  488. crypt_convert_init(cc, &io->ctx, NULL, base_bio, sector);
  489. /*
  490. * The allocated buffers can be smaller than the whole bio,
  491. * so repeat the whole process until all the data can be handled.
  492. */
  493. while (remaining) {
  494. clone = crypt_alloc_buffer(io, remaining);
  495. if (unlikely(!clone)) {
  496. crypt_dec_pending(io, -ENOMEM);
  497. return;
  498. }
  499. io->ctx.bio_out = clone;
  500. io->ctx.idx_out = 0;
  501. if (unlikely(crypt_convert(cc, &io->ctx) < 0)) {
  502. crypt_free_buffer_pages(cc, clone);
  503. bio_put(clone);
  504. crypt_dec_pending(io, -EIO);
  505. return;
  506. }
  507. /* crypt_convert should have filled the clone bio */
  508. BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
  509. clone->bi_sector = cc->start + sector;
  510. remaining -= clone->bi_size;
  511. sector += bio_sectors(clone);
  512. /* Grab another reference to the io struct
  513. * before we kick off the request */
  514. if (remaining)
  515. atomic_inc(&io->pending);
  516. generic_make_request(clone);
  517. /* Do not reference clone after this - it
  518. * may be gone already. */
  519. /* out of memory -> run queues */
  520. if (remaining)
  521. congestion_wait(WRITE, HZ/100);
  522. }
  523. }
  524. static void process_read_endio(struct dm_crypt_io *io)
  525. {
  526. struct crypt_config *cc = io->target->private;
  527. crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
  528. io->base_bio->bi_sector - io->target->begin);
  529. crypt_dec_pending(io, crypt_convert(cc, &io->ctx));
  530. }
  531. static void kcryptd_do_work(struct work_struct *work)
  532. {
  533. struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
  534. if (bio_data_dir(io->base_bio) == READ)
  535. process_read(io);
  536. }
  537. static void kcryptd_do_crypt(struct work_struct *work)
  538. {
  539. struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
  540. if (bio_data_dir(io->base_bio) == READ)
  541. process_read_endio(io);
  542. else
  543. process_write(io);
  544. }
  545. /*
  546. * Decode key from its hex representation
  547. */
  548. static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
  549. {
  550. char buffer[3];
  551. char *endp;
  552. unsigned int i;
  553. buffer[2] = '\0';
  554. for (i = 0; i < size; i++) {
  555. buffer[0] = *hex++;
  556. buffer[1] = *hex++;
  557. key[i] = (u8)simple_strtoul(buffer, &endp, 16);
  558. if (endp != &buffer[2])
  559. return -EINVAL;
  560. }
  561. if (*hex != '\0')
  562. return -EINVAL;
  563. return 0;
  564. }
  565. /*
  566. * Encode key into its hex representation
  567. */
  568. static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
  569. {
  570. unsigned int i;
  571. for (i = 0; i < size; i++) {
  572. sprintf(hex, "%02x", *key);
  573. hex += 2;
  574. key++;
  575. }
  576. }
  577. static int crypt_set_key(struct crypt_config *cc, char *key)
  578. {
  579. unsigned key_size = strlen(key) >> 1;
  580. if (cc->key_size && cc->key_size != key_size)
  581. return -EINVAL;
  582. cc->key_size = key_size; /* initial settings */
  583. if ((!key_size && strcmp(key, "-")) ||
  584. (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
  585. return -EINVAL;
  586. set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
  587. return 0;
  588. }
  589. static int crypt_wipe_key(struct crypt_config *cc)
  590. {
  591. clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
  592. memset(&cc->key, 0, cc->key_size * sizeof(u8));
  593. return 0;
  594. }
  595. /*
  596. * Construct an encryption mapping:
  597. * <cipher> <key> <iv_offset> <dev_path> <start>
  598. */
  599. static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  600. {
  601. struct crypt_config *cc;
  602. struct crypto_blkcipher *tfm;
  603. char *tmp;
  604. char *cipher;
  605. char *chainmode;
  606. char *ivmode;
  607. char *ivopts;
  608. unsigned int key_size;
  609. unsigned long long tmpll;
  610. if (argc != 5) {
  611. ti->error = "Not enough arguments";
  612. return -EINVAL;
  613. }
  614. tmp = argv[0];
  615. cipher = strsep(&tmp, "-");
  616. chainmode = strsep(&tmp, "-");
  617. ivopts = strsep(&tmp, "-");
  618. ivmode = strsep(&ivopts, ":");
  619. if (tmp)
  620. DMWARN("Unexpected additional cipher options");
  621. key_size = strlen(argv[1]) >> 1;
  622. cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
  623. if (cc == NULL) {
  624. ti->error =
  625. "Cannot allocate transparent encryption context";
  626. return -ENOMEM;
  627. }
  628. if (crypt_set_key(cc, argv[1])) {
  629. ti->error = "Error decoding key";
  630. goto bad_cipher;
  631. }
  632. /* Compatiblity mode for old dm-crypt cipher strings */
  633. if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
  634. chainmode = "cbc";
  635. ivmode = "plain";
  636. }
  637. if (strcmp(chainmode, "ecb") && !ivmode) {
  638. ti->error = "This chaining mode requires an IV mechanism";
  639. goto bad_cipher;
  640. }
  641. if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
  642. chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
  643. ti->error = "Chain mode + cipher name is too long";
  644. goto bad_cipher;
  645. }
  646. tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
  647. if (IS_ERR(tfm)) {
  648. ti->error = "Error allocating crypto tfm";
  649. goto bad_cipher;
  650. }
  651. strcpy(cc->cipher, cipher);
  652. strcpy(cc->chainmode, chainmode);
  653. cc->tfm = tfm;
  654. /*
  655. * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
  656. * See comments at iv code
  657. */
  658. if (ivmode == NULL)
  659. cc->iv_gen_ops = NULL;
  660. else if (strcmp(ivmode, "plain") == 0)
  661. cc->iv_gen_ops = &crypt_iv_plain_ops;
  662. else if (strcmp(ivmode, "essiv") == 0)
  663. cc->iv_gen_ops = &crypt_iv_essiv_ops;
  664. else if (strcmp(ivmode, "benbi") == 0)
  665. cc->iv_gen_ops = &crypt_iv_benbi_ops;
  666. else if (strcmp(ivmode, "null") == 0)
  667. cc->iv_gen_ops = &crypt_iv_null_ops;
  668. else {
  669. ti->error = "Invalid IV mode";
  670. goto bad_ivmode;
  671. }
  672. if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
  673. cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
  674. goto bad_ivmode;
  675. cc->iv_size = crypto_blkcipher_ivsize(tfm);
  676. if (cc->iv_size)
  677. /* at least a 64 bit sector number should fit in our buffer */
  678. cc->iv_size = max(cc->iv_size,
  679. (unsigned int)(sizeof(u64) / sizeof(u8)));
  680. else {
  681. if (cc->iv_gen_ops) {
  682. DMWARN("Selected cipher does not support IVs");
  683. if (cc->iv_gen_ops->dtr)
  684. cc->iv_gen_ops->dtr(cc);
  685. cc->iv_gen_ops = NULL;
  686. }
  687. }
  688. cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
  689. if (!cc->io_pool) {
  690. ti->error = "Cannot allocate crypt io mempool";
  691. goto bad_slab_pool;
  692. }
  693. cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
  694. if (!cc->page_pool) {
  695. ti->error = "Cannot allocate page mempool";
  696. goto bad_page_pool;
  697. }
  698. cc->bs = bioset_create(MIN_IOS, MIN_IOS);
  699. if (!cc->bs) {
  700. ti->error = "Cannot allocate crypt bioset";
  701. goto bad_bs;
  702. }
  703. if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
  704. ti->error = "Error setting key";
  705. goto bad_device;
  706. }
  707. if (sscanf(argv[2], "%llu", &tmpll) != 1) {
  708. ti->error = "Invalid iv_offset sector";
  709. goto bad_device;
  710. }
  711. cc->iv_offset = tmpll;
  712. if (sscanf(argv[4], "%llu", &tmpll) != 1) {
  713. ti->error = "Invalid device sector";
  714. goto bad_device;
  715. }
  716. cc->start = tmpll;
  717. if (dm_get_device(ti, argv[3], cc->start, ti->len,
  718. dm_table_get_mode(ti->table), &cc->dev)) {
  719. ti->error = "Device lookup failed";
  720. goto bad_device;
  721. }
  722. if (ivmode && cc->iv_gen_ops) {
  723. if (ivopts)
  724. *(ivopts - 1) = ':';
  725. cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
  726. if (!cc->iv_mode) {
  727. ti->error = "Error kmallocing iv_mode string";
  728. goto bad_ivmode_string;
  729. }
  730. strcpy(cc->iv_mode, ivmode);
  731. } else
  732. cc->iv_mode = NULL;
  733. cc->io_queue = create_singlethread_workqueue("kcryptd_io");
  734. if (!cc->io_queue) {
  735. ti->error = "Couldn't create kcryptd io queue";
  736. goto bad_io_queue;
  737. }
  738. cc->crypt_queue = create_singlethread_workqueue("kcryptd");
  739. if (!cc->crypt_queue) {
  740. ti->error = "Couldn't create kcryptd queue";
  741. goto bad_crypt_queue;
  742. }
  743. ti->private = cc;
  744. return 0;
  745. bad_crypt_queue:
  746. destroy_workqueue(cc->io_queue);
  747. bad_io_queue:
  748. kfree(cc->iv_mode);
  749. bad_ivmode_string:
  750. dm_put_device(ti, cc->dev);
  751. bad_device:
  752. bioset_free(cc->bs);
  753. bad_bs:
  754. mempool_destroy(cc->page_pool);
  755. bad_page_pool:
  756. mempool_destroy(cc->io_pool);
  757. bad_slab_pool:
  758. if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
  759. cc->iv_gen_ops->dtr(cc);
  760. bad_ivmode:
  761. crypto_free_blkcipher(tfm);
  762. bad_cipher:
  763. /* Must zero key material before freeing */
  764. memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
  765. kfree(cc);
  766. return -EINVAL;
  767. }
  768. static void crypt_dtr(struct dm_target *ti)
  769. {
  770. struct crypt_config *cc = (struct crypt_config *) ti->private;
  771. destroy_workqueue(cc->io_queue);
  772. destroy_workqueue(cc->crypt_queue);
  773. bioset_free(cc->bs);
  774. mempool_destroy(cc->page_pool);
  775. mempool_destroy(cc->io_pool);
  776. kfree(cc->iv_mode);
  777. if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
  778. cc->iv_gen_ops->dtr(cc);
  779. crypto_free_blkcipher(cc->tfm);
  780. dm_put_device(ti, cc->dev);
  781. /* Must zero key material before freeing */
  782. memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
  783. kfree(cc);
  784. }
  785. static int crypt_map(struct dm_target *ti, struct bio *bio,
  786. union map_info *map_context)
  787. {
  788. struct crypt_config *cc = ti->private;
  789. struct dm_crypt_io *io;
  790. io = mempool_alloc(cc->io_pool, GFP_NOIO);
  791. io->target = ti;
  792. io->base_bio = bio;
  793. io->error = 0;
  794. atomic_set(&io->pending, 0);
  795. if (bio_data_dir(io->base_bio) == READ)
  796. kcryptd_queue_io(io);
  797. else
  798. kcryptd_queue_crypt(io);
  799. return DM_MAPIO_SUBMITTED;
  800. }
  801. static int crypt_status(struct dm_target *ti, status_type_t type,
  802. char *result, unsigned int maxlen)
  803. {
  804. struct crypt_config *cc = (struct crypt_config *) ti->private;
  805. unsigned int sz = 0;
  806. switch (type) {
  807. case STATUSTYPE_INFO:
  808. result[0] = '\0';
  809. break;
  810. case STATUSTYPE_TABLE:
  811. if (cc->iv_mode)
  812. DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
  813. cc->iv_mode);
  814. else
  815. DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
  816. if (cc->key_size > 0) {
  817. if ((maxlen - sz) < ((cc->key_size << 1) + 1))
  818. return -ENOMEM;
  819. crypt_encode_key(result + sz, cc->key, cc->key_size);
  820. sz += cc->key_size << 1;
  821. } else {
  822. if (sz >= maxlen)
  823. return -ENOMEM;
  824. result[sz++] = '-';
  825. }
  826. DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
  827. cc->dev->name, (unsigned long long)cc->start);
  828. break;
  829. }
  830. return 0;
  831. }
  832. static void crypt_postsuspend(struct dm_target *ti)
  833. {
  834. struct crypt_config *cc = ti->private;
  835. set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
  836. }
  837. static int crypt_preresume(struct dm_target *ti)
  838. {
  839. struct crypt_config *cc = ti->private;
  840. if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
  841. DMERR("aborting resume - crypt key is not set.");
  842. return -EAGAIN;
  843. }
  844. return 0;
  845. }
  846. static void crypt_resume(struct dm_target *ti)
  847. {
  848. struct crypt_config *cc = ti->private;
  849. clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
  850. }
  851. /* Message interface
  852. * key set <key>
  853. * key wipe
  854. */
  855. static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
  856. {
  857. struct crypt_config *cc = ti->private;
  858. if (argc < 2)
  859. goto error;
  860. if (!strnicmp(argv[0], MESG_STR("key"))) {
  861. if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
  862. DMWARN("not suspended during key manipulation.");
  863. return -EINVAL;
  864. }
  865. if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
  866. return crypt_set_key(cc, argv[2]);
  867. if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
  868. return crypt_wipe_key(cc);
  869. }
  870. error:
  871. DMWARN("unrecognised message received.");
  872. return -EINVAL;
  873. }
  874. static struct target_type crypt_target = {
  875. .name = "crypt",
  876. .version= {1, 5, 0},
  877. .module = THIS_MODULE,
  878. .ctr = crypt_ctr,
  879. .dtr = crypt_dtr,
  880. .map = crypt_map,
  881. .status = crypt_status,
  882. .postsuspend = crypt_postsuspend,
  883. .preresume = crypt_preresume,
  884. .resume = crypt_resume,
  885. .message = crypt_message,
  886. };
  887. static int __init dm_crypt_init(void)
  888. {
  889. int r;
  890. _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
  891. if (!_crypt_io_pool)
  892. return -ENOMEM;
  893. r = dm_register_target(&crypt_target);
  894. if (r < 0) {
  895. DMERR("register failed %d", r);
  896. kmem_cache_destroy(_crypt_io_pool);
  897. }
  898. return r;
  899. }
  900. static void __exit dm_crypt_exit(void)
  901. {
  902. int r = dm_unregister_target(&crypt_target);
  903. if (r < 0)
  904. DMERR("unregister failed %d", r);
  905. kmem_cache_destroy(_crypt_io_pool);
  906. }
  907. module_init(dm_crypt_init);
  908. module_exit(dm_crypt_exit);
  909. MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
  910. MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
  911. MODULE_LICENSE("GPL");