dm-crypt.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. /*
  2. * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
  3. * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
  4. * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/err.h>
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/bio.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/mempool.h>
  15. #include <linux/slab.h>
  16. #include <linux/crypto.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/backing-dev.h>
  19. #include <asm/atomic.h>
  20. #include <linux/scatterlist.h>
  21. #include <asm/page.h>
  22. #include <asm/unaligned.h>
  23. #include "dm.h"
  24. #define DM_MSG_PREFIX "crypt"
  25. #define MESG_STR(x) x, sizeof(x)
  26. /*
  27. * per bio private data
  28. */
  29. struct dm_crypt_io {
  30. struct dm_target *target;
  31. struct bio *base_bio;
  32. struct work_struct work;
  33. atomic_t pending;
  34. int error;
  35. int post_process;
  36. };
  37. /*
  38. * context holding the current state of a multi-part conversion
  39. */
  40. struct convert_context {
  41. struct bio *bio_in;
  42. struct bio *bio_out;
  43. unsigned int offset_in;
  44. unsigned int offset_out;
  45. unsigned int idx_in;
  46. unsigned int idx_out;
  47. sector_t sector;
  48. int write;
  49. };
  50. struct crypt_config;
  51. struct crypt_iv_operations {
  52. int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
  53. const char *opts);
  54. void (*dtr)(struct crypt_config *cc);
  55. const char *(*status)(struct crypt_config *cc);
  56. int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
  57. };
  58. /*
  59. * Crypt: maps a linear range of a block device
  60. * and encrypts / decrypts at the same time.
  61. */
  62. enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
  63. struct crypt_config {
  64. struct dm_dev *dev;
  65. sector_t start;
  66. /*
  67. * pool for per bio private data and
  68. * for encryption buffer pages
  69. */
  70. mempool_t *io_pool;
  71. mempool_t *page_pool;
  72. struct bio_set *bs;
  73. /*
  74. * crypto related data
  75. */
  76. struct crypt_iv_operations *iv_gen_ops;
  77. char *iv_mode;
  78. union {
  79. struct crypto_cipher *essiv_tfm;
  80. int benbi_shift;
  81. } iv_gen_private;
  82. sector_t iv_offset;
  83. unsigned int iv_size;
  84. char cipher[CRYPTO_MAX_ALG_NAME];
  85. char chainmode[CRYPTO_MAX_ALG_NAME];
  86. struct crypto_blkcipher *tfm;
  87. unsigned long flags;
  88. unsigned int key_size;
  89. u8 key[0];
  90. };
  91. #define MIN_IOS 16
  92. #define MIN_POOL_PAGES 32
  93. #define MIN_BIO_PAGES 8
  94. static struct kmem_cache *_crypt_io_pool;
  95. static void clone_init(struct dm_crypt_io *, struct bio *);
  96. /*
  97. * Different IV generation algorithms:
  98. *
  99. * plain: the initial vector is the 32-bit little-endian version of the sector
  100. * number, padded with zeros if neccessary.
  101. *
  102. * essiv: "encrypted sector|salt initial vector", the sector number is
  103. * encrypted with the bulk cipher using a salt as key. The salt
  104. * should be derived from the bulk cipher's key via hashing.
  105. *
  106. * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
  107. * (needed for LRW-32-AES and possible other narrow block modes)
  108. *
  109. * null: the initial vector is always zero. Provides compatibility with
  110. * obsolete loop_fish2 devices. Do not use for new devices.
  111. *
  112. * plumb: unimplemented, see:
  113. * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  114. */
  115. static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  116. {
  117. memset(iv, 0, cc->iv_size);
  118. *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
  119. return 0;
  120. }
  121. static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
  122. const char *opts)
  123. {
  124. struct crypto_cipher *essiv_tfm;
  125. struct crypto_hash *hash_tfm;
  126. struct hash_desc desc;
  127. struct scatterlist sg;
  128. unsigned int saltsize;
  129. u8 *salt;
  130. int err;
  131. if (opts == NULL) {
  132. ti->error = "Digest algorithm missing for ESSIV mode";
  133. return -EINVAL;
  134. }
  135. /* Hash the cipher key with the given hash algorithm */
  136. hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
  137. if (IS_ERR(hash_tfm)) {
  138. ti->error = "Error initializing ESSIV hash";
  139. return PTR_ERR(hash_tfm);
  140. }
  141. saltsize = crypto_hash_digestsize(hash_tfm);
  142. salt = kmalloc(saltsize, GFP_KERNEL);
  143. if (salt == NULL) {
  144. ti->error = "Error kmallocing salt storage in ESSIV";
  145. crypto_free_hash(hash_tfm);
  146. return -ENOMEM;
  147. }
  148. sg_set_buf(&sg, cc->key, cc->key_size);
  149. desc.tfm = hash_tfm;
  150. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  151. err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
  152. crypto_free_hash(hash_tfm);
  153. if (err) {
  154. ti->error = "Error calculating hash in ESSIV";
  155. return err;
  156. }
  157. /* Setup the essiv_tfm with the given salt */
  158. essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
  159. if (IS_ERR(essiv_tfm)) {
  160. ti->error = "Error allocating crypto tfm for ESSIV";
  161. kfree(salt);
  162. return PTR_ERR(essiv_tfm);
  163. }
  164. if (crypto_cipher_blocksize(essiv_tfm) !=
  165. crypto_blkcipher_ivsize(cc->tfm)) {
  166. ti->error = "Block size of ESSIV cipher does "
  167. "not match IV size of block cipher";
  168. crypto_free_cipher(essiv_tfm);
  169. kfree(salt);
  170. return -EINVAL;
  171. }
  172. err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
  173. if (err) {
  174. ti->error = "Failed to set key for ESSIV cipher";
  175. crypto_free_cipher(essiv_tfm);
  176. kfree(salt);
  177. return err;
  178. }
  179. kfree(salt);
  180. cc->iv_gen_private.essiv_tfm = essiv_tfm;
  181. return 0;
  182. }
  183. static void crypt_iv_essiv_dtr(struct crypt_config *cc)
  184. {
  185. crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
  186. cc->iv_gen_private.essiv_tfm = NULL;
  187. }
  188. static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  189. {
  190. memset(iv, 0, cc->iv_size);
  191. *(u64 *)iv = cpu_to_le64(sector);
  192. crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
  193. return 0;
  194. }
  195. static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
  196. const char *opts)
  197. {
  198. unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
  199. int log = ilog2(bs);
  200. /* we need to calculate how far we must shift the sector count
  201. * to get the cipher block count, we use this shift in _gen */
  202. if (1 << log != bs) {
  203. ti->error = "cypher blocksize is not a power of 2";
  204. return -EINVAL;
  205. }
  206. if (log > 9) {
  207. ti->error = "cypher blocksize is > 512";
  208. return -EINVAL;
  209. }
  210. cc->iv_gen_private.benbi_shift = 9 - log;
  211. return 0;
  212. }
  213. static void crypt_iv_benbi_dtr(struct crypt_config *cc)
  214. {
  215. }
  216. static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  217. {
  218. __be64 val;
  219. memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
  220. val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
  221. put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
  222. return 0;
  223. }
  224. static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  225. {
  226. memset(iv, 0, cc->iv_size);
  227. return 0;
  228. }
  229. static struct crypt_iv_operations crypt_iv_plain_ops = {
  230. .generator = crypt_iv_plain_gen
  231. };
  232. static struct crypt_iv_operations crypt_iv_essiv_ops = {
  233. .ctr = crypt_iv_essiv_ctr,
  234. .dtr = crypt_iv_essiv_dtr,
  235. .generator = crypt_iv_essiv_gen
  236. };
  237. static struct crypt_iv_operations crypt_iv_benbi_ops = {
  238. .ctr = crypt_iv_benbi_ctr,
  239. .dtr = crypt_iv_benbi_dtr,
  240. .generator = crypt_iv_benbi_gen
  241. };
  242. static struct crypt_iv_operations crypt_iv_null_ops = {
  243. .generator = crypt_iv_null_gen
  244. };
  245. static int
  246. crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
  247. struct scatterlist *in, unsigned int length,
  248. int write, sector_t sector)
  249. {
  250. u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
  251. struct blkcipher_desc desc = {
  252. .tfm = cc->tfm,
  253. .info = iv,
  254. .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
  255. };
  256. int r;
  257. if (cc->iv_gen_ops) {
  258. r = cc->iv_gen_ops->generator(cc, iv, sector);
  259. if (r < 0)
  260. return r;
  261. if (write)
  262. r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
  263. else
  264. r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
  265. } else {
  266. if (write)
  267. r = crypto_blkcipher_encrypt(&desc, out, in, length);
  268. else
  269. r = crypto_blkcipher_decrypt(&desc, out, in, length);
  270. }
  271. return r;
  272. }
  273. static void
  274. crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
  275. struct bio *bio_out, struct bio *bio_in,
  276. sector_t sector, int write)
  277. {
  278. ctx->bio_in = bio_in;
  279. ctx->bio_out = bio_out;
  280. ctx->offset_in = 0;
  281. ctx->offset_out = 0;
  282. ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
  283. ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
  284. ctx->sector = sector + cc->iv_offset;
  285. ctx->write = write;
  286. }
  287. /*
  288. * Encrypt / decrypt data from one bio to another one (can be the same one)
  289. */
  290. static int crypt_convert(struct crypt_config *cc,
  291. struct convert_context *ctx)
  292. {
  293. int r = 0;
  294. while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
  295. ctx->idx_out < ctx->bio_out->bi_vcnt) {
  296. struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
  297. struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
  298. struct scatterlist sg_in = {
  299. .page = bv_in->bv_page,
  300. .offset = bv_in->bv_offset + ctx->offset_in,
  301. .length = 1 << SECTOR_SHIFT
  302. };
  303. struct scatterlist sg_out = {
  304. .page = bv_out->bv_page,
  305. .offset = bv_out->bv_offset + ctx->offset_out,
  306. .length = 1 << SECTOR_SHIFT
  307. };
  308. ctx->offset_in += sg_in.length;
  309. if (ctx->offset_in >= bv_in->bv_len) {
  310. ctx->offset_in = 0;
  311. ctx->idx_in++;
  312. }
  313. ctx->offset_out += sg_out.length;
  314. if (ctx->offset_out >= bv_out->bv_len) {
  315. ctx->offset_out = 0;
  316. ctx->idx_out++;
  317. }
  318. r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
  319. ctx->write, ctx->sector);
  320. if (r < 0)
  321. break;
  322. ctx->sector++;
  323. }
  324. return r;
  325. }
  326. static void dm_crypt_bio_destructor(struct bio *bio)
  327. {
  328. struct dm_crypt_io *io = bio->bi_private;
  329. struct crypt_config *cc = io->target->private;
  330. bio_free(bio, cc->bs);
  331. }
  332. /*
  333. * Generate a new unfragmented bio with the given size
  334. * This should never violate the device limitations
  335. * May return a smaller bio when running out of pages
  336. */
  337. static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
  338. {
  339. struct crypt_config *cc = io->target->private;
  340. struct bio *clone;
  341. unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  342. gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
  343. unsigned int i;
  344. clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
  345. if (!clone)
  346. return NULL;
  347. clone_init(io, clone);
  348. for (i = 0; i < nr_iovecs; i++) {
  349. struct bio_vec *bv = bio_iovec_idx(clone, i);
  350. bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
  351. if (!bv->bv_page)
  352. break;
  353. /*
  354. * if additional pages cannot be allocated without waiting,
  355. * return a partially allocated bio, the caller will then try
  356. * to allocate additional bios while submitting this partial bio
  357. */
  358. if (i == (MIN_BIO_PAGES - 1))
  359. gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
  360. bv->bv_offset = 0;
  361. if (size > PAGE_SIZE)
  362. bv->bv_len = PAGE_SIZE;
  363. else
  364. bv->bv_len = size;
  365. clone->bi_size += bv->bv_len;
  366. clone->bi_vcnt++;
  367. size -= bv->bv_len;
  368. }
  369. if (!clone->bi_size) {
  370. bio_put(clone);
  371. return NULL;
  372. }
  373. return clone;
  374. }
  375. static void crypt_free_buffer_pages(struct crypt_config *cc,
  376. struct bio *clone, unsigned int bytes)
  377. {
  378. unsigned int i, start, end;
  379. struct bio_vec *bv;
  380. /*
  381. * This is ugly, but Jens Axboe thinks that using bi_idx in the
  382. * endio function is too dangerous at the moment, so I calculate the
  383. * correct position using bi_vcnt and bi_size.
  384. * The bv_offset and bv_len fields might already be modified but we
  385. * know that we always allocated whole pages.
  386. * A fix to the bi_idx issue in the kernel is in the works, so
  387. * we will hopefully be able to revert to the cleaner solution soon.
  388. */
  389. i = clone->bi_vcnt - 1;
  390. bv = bio_iovec_idx(clone, i);
  391. end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
  392. start = end - bytes;
  393. start >>= PAGE_SHIFT;
  394. if (!clone->bi_size)
  395. end = clone->bi_vcnt;
  396. else
  397. end >>= PAGE_SHIFT;
  398. for (i = start; i < end; i++) {
  399. bv = bio_iovec_idx(clone, i);
  400. BUG_ON(!bv->bv_page);
  401. mempool_free(bv->bv_page, cc->page_pool);
  402. bv->bv_page = NULL;
  403. }
  404. }
  405. /*
  406. * One of the bios was finished. Check for completion of
  407. * the whole request and correctly clean up the buffer.
  408. */
  409. static void dec_pending(struct dm_crypt_io *io, int error)
  410. {
  411. struct crypt_config *cc = (struct crypt_config *) io->target->private;
  412. if (error < 0)
  413. io->error = error;
  414. if (!atomic_dec_and_test(&io->pending))
  415. return;
  416. bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
  417. mempool_free(io, cc->io_pool);
  418. }
  419. /*
  420. * kcryptd:
  421. *
  422. * Needed because it would be very unwise to do decryption in an
  423. * interrupt context.
  424. */
  425. static struct workqueue_struct *_kcryptd_workqueue;
  426. static void kcryptd_do_work(struct work_struct *work);
  427. static void kcryptd_queue_io(struct dm_crypt_io *io)
  428. {
  429. INIT_WORK(&io->work, kcryptd_do_work);
  430. queue_work(_kcryptd_workqueue, &io->work);
  431. }
  432. static int crypt_endio(struct bio *clone, unsigned int done, int error)
  433. {
  434. struct dm_crypt_io *io = clone->bi_private;
  435. struct crypt_config *cc = io->target->private;
  436. unsigned read_io = bio_data_dir(clone) == READ;
  437. /*
  438. * free the processed pages, even if
  439. * it's only a partially completed write
  440. */
  441. if (!read_io)
  442. crypt_free_buffer_pages(cc, clone, done);
  443. /* keep going - not finished yet */
  444. if (unlikely(clone->bi_size))
  445. return 1;
  446. if (!read_io)
  447. goto out;
  448. if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
  449. error = -EIO;
  450. goto out;
  451. }
  452. bio_put(clone);
  453. io->post_process = 1;
  454. kcryptd_queue_io(io);
  455. return 0;
  456. out:
  457. bio_put(clone);
  458. dec_pending(io, error);
  459. return error;
  460. }
  461. static void clone_init(struct dm_crypt_io *io, struct bio *clone)
  462. {
  463. struct crypt_config *cc = io->target->private;
  464. clone->bi_private = io;
  465. clone->bi_end_io = crypt_endio;
  466. clone->bi_bdev = cc->dev->bdev;
  467. clone->bi_rw = io->base_bio->bi_rw;
  468. clone->bi_destructor = dm_crypt_bio_destructor;
  469. }
  470. static void process_read(struct dm_crypt_io *io)
  471. {
  472. struct crypt_config *cc = io->target->private;
  473. struct bio *base_bio = io->base_bio;
  474. struct bio *clone;
  475. sector_t sector = base_bio->bi_sector - io->target->begin;
  476. atomic_inc(&io->pending);
  477. /*
  478. * The block layer might modify the bvec array, so always
  479. * copy the required bvecs because we need the original
  480. * one in order to decrypt the whole bio data *afterwards*.
  481. */
  482. clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
  483. if (unlikely(!clone)) {
  484. dec_pending(io, -ENOMEM);
  485. return;
  486. }
  487. clone_init(io, clone);
  488. clone->bi_idx = 0;
  489. clone->bi_vcnt = bio_segments(base_bio);
  490. clone->bi_size = base_bio->bi_size;
  491. clone->bi_sector = cc->start + sector;
  492. memcpy(clone->bi_io_vec, bio_iovec(base_bio),
  493. sizeof(struct bio_vec) * clone->bi_vcnt);
  494. generic_make_request(clone);
  495. }
  496. static void process_write(struct dm_crypt_io *io)
  497. {
  498. struct crypt_config *cc = io->target->private;
  499. struct bio *base_bio = io->base_bio;
  500. struct bio *clone;
  501. struct convert_context ctx;
  502. unsigned remaining = base_bio->bi_size;
  503. sector_t sector = base_bio->bi_sector - io->target->begin;
  504. atomic_inc(&io->pending);
  505. crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
  506. /*
  507. * The allocated buffers can be smaller than the whole bio,
  508. * so repeat the whole process until all the data can be handled.
  509. */
  510. while (remaining) {
  511. clone = crypt_alloc_buffer(io, remaining);
  512. if (unlikely(!clone)) {
  513. dec_pending(io, -ENOMEM);
  514. return;
  515. }
  516. ctx.bio_out = clone;
  517. ctx.idx_out = 0;
  518. if (unlikely(crypt_convert(cc, &ctx) < 0)) {
  519. crypt_free_buffer_pages(cc, clone, clone->bi_size);
  520. bio_put(clone);
  521. dec_pending(io, -EIO);
  522. return;
  523. }
  524. /* crypt_convert should have filled the clone bio */
  525. BUG_ON(ctx.idx_out < clone->bi_vcnt);
  526. clone->bi_sector = cc->start + sector;
  527. remaining -= clone->bi_size;
  528. sector += bio_sectors(clone);
  529. /* Grab another reference to the io struct
  530. * before we kick off the request */
  531. if (remaining)
  532. atomic_inc(&io->pending);
  533. generic_make_request(clone);
  534. /* Do not reference clone after this - it
  535. * may be gone already. */
  536. /* out of memory -> run queues */
  537. if (remaining)
  538. congestion_wait(WRITE, HZ/100);
  539. }
  540. }
  541. static void process_read_endio(struct dm_crypt_io *io)
  542. {
  543. struct crypt_config *cc = io->target->private;
  544. struct convert_context ctx;
  545. crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
  546. io->base_bio->bi_sector - io->target->begin, 0);
  547. dec_pending(io, crypt_convert(cc, &ctx));
  548. }
  549. static void kcryptd_do_work(struct work_struct *work)
  550. {
  551. struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
  552. if (io->post_process)
  553. process_read_endio(io);
  554. else if (bio_data_dir(io->base_bio) == READ)
  555. process_read(io);
  556. else
  557. process_write(io);
  558. }
  559. /*
  560. * Decode key from its hex representation
  561. */
  562. static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
  563. {
  564. char buffer[3];
  565. char *endp;
  566. unsigned int i;
  567. buffer[2] = '\0';
  568. for (i = 0; i < size; i++) {
  569. buffer[0] = *hex++;
  570. buffer[1] = *hex++;
  571. key[i] = (u8)simple_strtoul(buffer, &endp, 16);
  572. if (endp != &buffer[2])
  573. return -EINVAL;
  574. }
  575. if (*hex != '\0')
  576. return -EINVAL;
  577. return 0;
  578. }
  579. /*
  580. * Encode key into its hex representation
  581. */
  582. static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
  583. {
  584. unsigned int i;
  585. for (i = 0; i < size; i++) {
  586. sprintf(hex, "%02x", *key);
  587. hex += 2;
  588. key++;
  589. }
  590. }
  591. static int crypt_set_key(struct crypt_config *cc, char *key)
  592. {
  593. unsigned key_size = strlen(key) >> 1;
  594. if (cc->key_size && cc->key_size != key_size)
  595. return -EINVAL;
  596. cc->key_size = key_size; /* initial settings */
  597. if ((!key_size && strcmp(key, "-")) ||
  598. (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
  599. return -EINVAL;
  600. set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
  601. return 0;
  602. }
  603. static int crypt_wipe_key(struct crypt_config *cc)
  604. {
  605. clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
  606. memset(&cc->key, 0, cc->key_size * sizeof(u8));
  607. return 0;
  608. }
  609. /*
  610. * Construct an encryption mapping:
  611. * <cipher> <key> <iv_offset> <dev_path> <start>
  612. */
  613. static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  614. {
  615. struct crypt_config *cc;
  616. struct crypto_blkcipher *tfm;
  617. char *tmp;
  618. char *cipher;
  619. char *chainmode;
  620. char *ivmode;
  621. char *ivopts;
  622. unsigned int key_size;
  623. unsigned long long tmpll;
  624. if (argc != 5) {
  625. ti->error = "Not enough arguments";
  626. return -EINVAL;
  627. }
  628. tmp = argv[0];
  629. cipher = strsep(&tmp, "-");
  630. chainmode = strsep(&tmp, "-");
  631. ivopts = strsep(&tmp, "-");
  632. ivmode = strsep(&ivopts, ":");
  633. if (tmp)
  634. DMWARN("Unexpected additional cipher options");
  635. key_size = strlen(argv[1]) >> 1;
  636. cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
  637. if (cc == NULL) {
  638. ti->error =
  639. "Cannot allocate transparent encryption context";
  640. return -ENOMEM;
  641. }
  642. if (crypt_set_key(cc, argv[1])) {
  643. ti->error = "Error decoding key";
  644. goto bad1;
  645. }
  646. /* Compatiblity mode for old dm-crypt cipher strings */
  647. if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
  648. chainmode = "cbc";
  649. ivmode = "plain";
  650. }
  651. if (strcmp(chainmode, "ecb") && !ivmode) {
  652. ti->error = "This chaining mode requires an IV mechanism";
  653. goto bad1;
  654. }
  655. if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode,
  656. cipher) >= CRYPTO_MAX_ALG_NAME) {
  657. ti->error = "Chain mode + cipher name is too long";
  658. goto bad1;
  659. }
  660. tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
  661. if (IS_ERR(tfm)) {
  662. ti->error = "Error allocating crypto tfm";
  663. goto bad1;
  664. }
  665. strcpy(cc->cipher, cipher);
  666. strcpy(cc->chainmode, chainmode);
  667. cc->tfm = tfm;
  668. /*
  669. * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
  670. * See comments at iv code
  671. */
  672. if (ivmode == NULL)
  673. cc->iv_gen_ops = NULL;
  674. else if (strcmp(ivmode, "plain") == 0)
  675. cc->iv_gen_ops = &crypt_iv_plain_ops;
  676. else if (strcmp(ivmode, "essiv") == 0)
  677. cc->iv_gen_ops = &crypt_iv_essiv_ops;
  678. else if (strcmp(ivmode, "benbi") == 0)
  679. cc->iv_gen_ops = &crypt_iv_benbi_ops;
  680. else if (strcmp(ivmode, "null") == 0)
  681. cc->iv_gen_ops = &crypt_iv_null_ops;
  682. else {
  683. ti->error = "Invalid IV mode";
  684. goto bad2;
  685. }
  686. if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
  687. cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
  688. goto bad2;
  689. cc->iv_size = crypto_blkcipher_ivsize(tfm);
  690. if (cc->iv_size)
  691. /* at least a 64 bit sector number should fit in our buffer */
  692. cc->iv_size = max(cc->iv_size,
  693. (unsigned int)(sizeof(u64) / sizeof(u8)));
  694. else {
  695. if (cc->iv_gen_ops) {
  696. DMWARN("Selected cipher does not support IVs");
  697. if (cc->iv_gen_ops->dtr)
  698. cc->iv_gen_ops->dtr(cc);
  699. cc->iv_gen_ops = NULL;
  700. }
  701. }
  702. cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
  703. if (!cc->io_pool) {
  704. ti->error = "Cannot allocate crypt io mempool";
  705. goto bad3;
  706. }
  707. cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
  708. if (!cc->page_pool) {
  709. ti->error = "Cannot allocate page mempool";
  710. goto bad4;
  711. }
  712. cc->bs = bioset_create(MIN_IOS, MIN_IOS);
  713. if (!cc->bs) {
  714. ti->error = "Cannot allocate crypt bioset";
  715. goto bad_bs;
  716. }
  717. if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
  718. ti->error = "Error setting key";
  719. goto bad5;
  720. }
  721. if (sscanf(argv[2], "%llu", &tmpll) != 1) {
  722. ti->error = "Invalid iv_offset sector";
  723. goto bad5;
  724. }
  725. cc->iv_offset = tmpll;
  726. if (sscanf(argv[4], "%llu", &tmpll) != 1) {
  727. ti->error = "Invalid device sector";
  728. goto bad5;
  729. }
  730. cc->start = tmpll;
  731. if (dm_get_device(ti, argv[3], cc->start, ti->len,
  732. dm_table_get_mode(ti->table), &cc->dev)) {
  733. ti->error = "Device lookup failed";
  734. goto bad5;
  735. }
  736. if (ivmode && cc->iv_gen_ops) {
  737. if (ivopts)
  738. *(ivopts - 1) = ':';
  739. cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
  740. if (!cc->iv_mode) {
  741. ti->error = "Error kmallocing iv_mode string";
  742. goto bad5;
  743. }
  744. strcpy(cc->iv_mode, ivmode);
  745. } else
  746. cc->iv_mode = NULL;
  747. ti->private = cc;
  748. return 0;
  749. bad5:
  750. bioset_free(cc->bs);
  751. bad_bs:
  752. mempool_destroy(cc->page_pool);
  753. bad4:
  754. mempool_destroy(cc->io_pool);
  755. bad3:
  756. if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
  757. cc->iv_gen_ops->dtr(cc);
  758. bad2:
  759. crypto_free_blkcipher(tfm);
  760. bad1:
  761. /* Must zero key material before freeing */
  762. memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
  763. kfree(cc);
  764. return -EINVAL;
  765. }
  766. static void crypt_dtr(struct dm_target *ti)
  767. {
  768. struct crypt_config *cc = (struct crypt_config *) ti->private;
  769. bioset_free(cc->bs);
  770. mempool_destroy(cc->page_pool);
  771. mempool_destroy(cc->io_pool);
  772. kfree(cc->iv_mode);
  773. if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
  774. cc->iv_gen_ops->dtr(cc);
  775. crypto_free_blkcipher(cc->tfm);
  776. dm_put_device(ti, cc->dev);
  777. /* Must zero key material before freeing */
  778. memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
  779. kfree(cc);
  780. }
  781. static int crypt_map(struct dm_target *ti, struct bio *bio,
  782. union map_info *map_context)
  783. {
  784. struct crypt_config *cc = ti->private;
  785. struct dm_crypt_io *io;
  786. io = mempool_alloc(cc->io_pool, GFP_NOIO);
  787. io->target = ti;
  788. io->base_bio = bio;
  789. io->error = io->post_process = 0;
  790. atomic_set(&io->pending, 0);
  791. kcryptd_queue_io(io);
  792. return DM_MAPIO_SUBMITTED;
  793. }
  794. static int crypt_status(struct dm_target *ti, status_type_t type,
  795. char *result, unsigned int maxlen)
  796. {
  797. struct crypt_config *cc = (struct crypt_config *) ti->private;
  798. unsigned int sz = 0;
  799. switch (type) {
  800. case STATUSTYPE_INFO:
  801. result[0] = '\0';
  802. break;
  803. case STATUSTYPE_TABLE:
  804. if (cc->iv_mode)
  805. DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
  806. cc->iv_mode);
  807. else
  808. DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
  809. if (cc->key_size > 0) {
  810. if ((maxlen - sz) < ((cc->key_size << 1) + 1))
  811. return -ENOMEM;
  812. crypt_encode_key(result + sz, cc->key, cc->key_size);
  813. sz += cc->key_size << 1;
  814. } else {
  815. if (sz >= maxlen)
  816. return -ENOMEM;
  817. result[sz++] = '-';
  818. }
  819. DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
  820. cc->dev->name, (unsigned long long)cc->start);
  821. break;
  822. }
  823. return 0;
  824. }
  825. static void crypt_postsuspend(struct dm_target *ti)
  826. {
  827. struct crypt_config *cc = ti->private;
  828. set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
  829. }
  830. static int crypt_preresume(struct dm_target *ti)
  831. {
  832. struct crypt_config *cc = ti->private;
  833. if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
  834. DMERR("aborting resume - crypt key is not set.");
  835. return -EAGAIN;
  836. }
  837. return 0;
  838. }
  839. static void crypt_resume(struct dm_target *ti)
  840. {
  841. struct crypt_config *cc = ti->private;
  842. clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
  843. }
  844. /* Message interface
  845. * key set <key>
  846. * key wipe
  847. */
  848. static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
  849. {
  850. struct crypt_config *cc = ti->private;
  851. if (argc < 2)
  852. goto error;
  853. if (!strnicmp(argv[0], MESG_STR("key"))) {
  854. if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
  855. DMWARN("not suspended during key manipulation.");
  856. return -EINVAL;
  857. }
  858. if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
  859. return crypt_set_key(cc, argv[2]);
  860. if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
  861. return crypt_wipe_key(cc);
  862. }
  863. error:
  864. DMWARN("unrecognised message received.");
  865. return -EINVAL;
  866. }
  867. static struct target_type crypt_target = {
  868. .name = "crypt",
  869. .version= {1, 5, 0},
  870. .module = THIS_MODULE,
  871. .ctr = crypt_ctr,
  872. .dtr = crypt_dtr,
  873. .map = crypt_map,
  874. .status = crypt_status,
  875. .postsuspend = crypt_postsuspend,
  876. .preresume = crypt_preresume,
  877. .resume = crypt_resume,
  878. .message = crypt_message,
  879. };
  880. static int __init dm_crypt_init(void)
  881. {
  882. int r;
  883. _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
  884. if (!_crypt_io_pool)
  885. return -ENOMEM;
  886. _kcryptd_workqueue = create_workqueue("kcryptd");
  887. if (!_kcryptd_workqueue) {
  888. r = -ENOMEM;
  889. DMERR("couldn't create kcryptd");
  890. goto bad1;
  891. }
  892. r = dm_register_target(&crypt_target);
  893. if (r < 0) {
  894. DMERR("register failed %d", r);
  895. goto bad2;
  896. }
  897. return 0;
  898. bad2:
  899. destroy_workqueue(_kcryptd_workqueue);
  900. bad1:
  901. kmem_cache_destroy(_crypt_io_pool);
  902. return r;
  903. }
  904. static void __exit dm_crypt_exit(void)
  905. {
  906. int r = dm_unregister_target(&crypt_target);
  907. if (r < 0)
  908. DMERR("unregister failed %d", r);
  909. destroy_workqueue(_kcryptd_workqueue);
  910. kmem_cache_destroy(_crypt_io_pool);
  911. }
  912. module_init(dm_crypt_init);
  913. module_exit(dm_crypt_exit);
  914. MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
  915. MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
  916. MODULE_LICENSE("GPL");