dm-crypt.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
  3. * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
  4. * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include <linux/err.h>
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/bio.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/mempool.h>
  15. #include <linux/slab.h>
  16. #include <linux/crypto.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/backing-dev.h>
  19. #include <asm/atomic.h>
  20. #include <linux/scatterlist.h>
  21. #include <asm/page.h>
  22. #include <asm/unaligned.h>
  23. #include "dm.h"
  24. #define DM_MSG_PREFIX "crypt"
  25. #define MESG_STR(x) x, sizeof(x)
  26. /*
  27. * per bio private data
  28. */
  29. struct dm_crypt_io {
  30. struct dm_target *target;
  31. struct bio *base_bio;
  32. struct work_struct work;
  33. atomic_t pending;
  34. int error;
  35. int post_process;
  36. };
  37. /*
  38. * context holding the current state of a multi-part conversion
  39. */
  40. struct convert_context {
  41. struct bio *bio_in;
  42. struct bio *bio_out;
  43. unsigned int offset_in;
  44. unsigned int offset_out;
  45. unsigned int idx_in;
  46. unsigned int idx_out;
  47. sector_t sector;
  48. int write;
  49. };
  50. struct crypt_config;
  51. struct crypt_iv_operations {
  52. int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
  53. const char *opts);
  54. void (*dtr)(struct crypt_config *cc);
  55. const char *(*status)(struct crypt_config *cc);
  56. int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
  57. };
  58. /*
  59. * Crypt: maps a linear range of a block device
  60. * and encrypts / decrypts at the same time.
  61. */
  62. enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
  63. struct crypt_config {
  64. struct dm_dev *dev;
  65. sector_t start;
  66. /*
  67. * pool for per bio private data and
  68. * for encryption buffer pages
  69. */
  70. mempool_t *io_pool;
  71. mempool_t *page_pool;
  72. struct bio_set *bs;
  73. /*
  74. * crypto related data
  75. */
  76. struct crypt_iv_operations *iv_gen_ops;
  77. char *iv_mode;
  78. union {
  79. struct crypto_cipher *essiv_tfm;
  80. int benbi_shift;
  81. } iv_gen_private;
  82. sector_t iv_offset;
  83. unsigned int iv_size;
  84. char cipher[CRYPTO_MAX_ALG_NAME];
  85. char chainmode[CRYPTO_MAX_ALG_NAME];
  86. struct crypto_blkcipher *tfm;
  87. unsigned long flags;
  88. unsigned int key_size;
  89. u8 key[0];
  90. };
  91. #define MIN_IOS 16
  92. #define MIN_POOL_PAGES 32
  93. #define MIN_BIO_PAGES 8
  94. static struct kmem_cache *_crypt_io_pool;
  95. static void clone_init(struct dm_crypt_io *, struct bio *);
  96. /*
  97. * Different IV generation algorithms:
  98. *
  99. * plain: the initial vector is the 32-bit little-endian version of the sector
  100. * number, padded with zeros if necessary.
  101. *
  102. * essiv: "encrypted sector|salt initial vector", the sector number is
  103. * encrypted with the bulk cipher using a salt as key. The salt
  104. * should be derived from the bulk cipher's key via hashing.
  105. *
  106. * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
  107. * (needed for LRW-32-AES and possible other narrow block modes)
  108. *
  109. * null: the initial vector is always zero. Provides compatibility with
  110. * obsolete loop_fish2 devices. Do not use for new devices.
  111. *
  112. * plumb: unimplemented, see:
  113. * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  114. */
  115. static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  116. {
  117. memset(iv, 0, cc->iv_size);
  118. *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
  119. return 0;
  120. }
  121. static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
  122. const char *opts)
  123. {
  124. struct crypto_cipher *essiv_tfm;
  125. struct crypto_hash *hash_tfm;
  126. struct hash_desc desc;
  127. struct scatterlist sg;
  128. unsigned int saltsize;
  129. u8 *salt;
  130. int err;
  131. if (opts == NULL) {
  132. ti->error = "Digest algorithm missing for ESSIV mode";
  133. return -EINVAL;
  134. }
  135. /* Hash the cipher key with the given hash algorithm */
  136. hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
  137. if (IS_ERR(hash_tfm)) {
  138. ti->error = "Error initializing ESSIV hash";
  139. return PTR_ERR(hash_tfm);
  140. }
  141. saltsize = crypto_hash_digestsize(hash_tfm);
  142. salt = kmalloc(saltsize, GFP_KERNEL);
  143. if (salt == NULL) {
  144. ti->error = "Error kmallocing salt storage in ESSIV";
  145. crypto_free_hash(hash_tfm);
  146. return -ENOMEM;
  147. }
  148. sg_set_buf(&sg, cc->key, cc->key_size);
  149. desc.tfm = hash_tfm;
  150. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  151. err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
  152. crypto_free_hash(hash_tfm);
  153. if (err) {
  154. ti->error = "Error calculating hash in ESSIV";
  155. return err;
  156. }
  157. /* Setup the essiv_tfm with the given salt */
  158. essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
  159. if (IS_ERR(essiv_tfm)) {
  160. ti->error = "Error allocating crypto tfm for ESSIV";
  161. kfree(salt);
  162. return PTR_ERR(essiv_tfm);
  163. }
  164. if (crypto_cipher_blocksize(essiv_tfm) !=
  165. crypto_blkcipher_ivsize(cc->tfm)) {
  166. ti->error = "Block size of ESSIV cipher does "
  167. "not match IV size of block cipher";
  168. crypto_free_cipher(essiv_tfm);
  169. kfree(salt);
  170. return -EINVAL;
  171. }
  172. err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
  173. if (err) {
  174. ti->error = "Failed to set key for ESSIV cipher";
  175. crypto_free_cipher(essiv_tfm);
  176. kfree(salt);
  177. return err;
  178. }
  179. kfree(salt);
  180. cc->iv_gen_private.essiv_tfm = essiv_tfm;
  181. return 0;
  182. }
  183. static void crypt_iv_essiv_dtr(struct crypt_config *cc)
  184. {
  185. crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
  186. cc->iv_gen_private.essiv_tfm = NULL;
  187. }
  188. static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  189. {
  190. memset(iv, 0, cc->iv_size);
  191. *(u64 *)iv = cpu_to_le64(sector);
  192. crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
  193. return 0;
  194. }
  195. static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
  196. const char *opts)
  197. {
  198. unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
  199. int log = ilog2(bs);
  200. /* we need to calculate how far we must shift the sector count
  201. * to get the cipher block count, we use this shift in _gen */
  202. if (1 << log != bs) {
  203. ti->error = "cypher blocksize is not a power of 2";
  204. return -EINVAL;
  205. }
  206. if (log > 9) {
  207. ti->error = "cypher blocksize is > 512";
  208. return -EINVAL;
  209. }
  210. cc->iv_gen_private.benbi_shift = 9 - log;
  211. return 0;
  212. }
  213. static void crypt_iv_benbi_dtr(struct crypt_config *cc)
  214. {
  215. }
  216. static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  217. {
  218. __be64 val;
  219. memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
  220. val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
  221. put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
  222. return 0;
  223. }
  224. static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
  225. {
  226. memset(iv, 0, cc->iv_size);
  227. return 0;
  228. }
  229. static struct crypt_iv_operations crypt_iv_plain_ops = {
  230. .generator = crypt_iv_plain_gen
  231. };
  232. static struct crypt_iv_operations crypt_iv_essiv_ops = {
  233. .ctr = crypt_iv_essiv_ctr,
  234. .dtr = crypt_iv_essiv_dtr,
  235. .generator = crypt_iv_essiv_gen
  236. };
  237. static struct crypt_iv_operations crypt_iv_benbi_ops = {
  238. .ctr = crypt_iv_benbi_ctr,
  239. .dtr = crypt_iv_benbi_dtr,
  240. .generator = crypt_iv_benbi_gen
  241. };
  242. static struct crypt_iv_operations crypt_iv_null_ops = {
  243. .generator = crypt_iv_null_gen
  244. };
  245. static int
  246. crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
  247. struct scatterlist *in, unsigned int length,
  248. int write, sector_t sector)
  249. {
  250. u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
  251. struct blkcipher_desc desc = {
  252. .tfm = cc->tfm,
  253. .info = iv,
  254. .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
  255. };
  256. int r;
  257. if (cc->iv_gen_ops) {
  258. r = cc->iv_gen_ops->generator(cc, iv, sector);
  259. if (r < 0)
  260. return r;
  261. if (write)
  262. r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
  263. else
  264. r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
  265. } else {
  266. if (write)
  267. r = crypto_blkcipher_encrypt(&desc, out, in, length);
  268. else
  269. r = crypto_blkcipher_decrypt(&desc, out, in, length);
  270. }
  271. return r;
  272. }
  273. static void
  274. crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
  275. struct bio *bio_out, struct bio *bio_in,
  276. sector_t sector, int write)
  277. {
  278. ctx->bio_in = bio_in;
  279. ctx->bio_out = bio_out;
  280. ctx->offset_in = 0;
  281. ctx->offset_out = 0;
  282. ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
  283. ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
  284. ctx->sector = sector + cc->iv_offset;
  285. ctx->write = write;
  286. }
  287. /*
  288. * Encrypt / decrypt data from one bio to another one (can be the same one)
  289. */
  290. static int crypt_convert(struct crypt_config *cc,
  291. struct convert_context *ctx)
  292. {
  293. int r = 0;
  294. while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
  295. ctx->idx_out < ctx->bio_out->bi_vcnt) {
  296. struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
  297. struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
  298. struct scatterlist sg_in = {
  299. .page = bv_in->bv_page,
  300. .offset = bv_in->bv_offset + ctx->offset_in,
  301. .length = 1 << SECTOR_SHIFT
  302. };
  303. struct scatterlist sg_out = {
  304. .page = bv_out->bv_page,
  305. .offset = bv_out->bv_offset + ctx->offset_out,
  306. .length = 1 << SECTOR_SHIFT
  307. };
  308. ctx->offset_in += sg_in.length;
  309. if (ctx->offset_in >= bv_in->bv_len) {
  310. ctx->offset_in = 0;
  311. ctx->idx_in++;
  312. }
  313. ctx->offset_out += sg_out.length;
  314. if (ctx->offset_out >= bv_out->bv_len) {
  315. ctx->offset_out = 0;
  316. ctx->idx_out++;
  317. }
  318. r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
  319. ctx->write, ctx->sector);
  320. if (r < 0)
  321. break;
  322. ctx->sector++;
  323. }
  324. return r;
  325. }
  326. static void dm_crypt_bio_destructor(struct bio *bio)
  327. {
  328. struct dm_crypt_io *io = bio->bi_private;
  329. struct crypt_config *cc = io->target->private;
  330. bio_free(bio, cc->bs);
  331. }
  332. /*
  333. * Generate a new unfragmented bio with the given size
  334. * This should never violate the device limitations
  335. * May return a smaller bio when running out of pages
  336. */
  337. static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
  338. {
  339. struct crypt_config *cc = io->target->private;
  340. struct bio *clone;
  341. unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  342. gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
  343. unsigned int i;
  344. clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
  345. if (!clone)
  346. return NULL;
  347. clone_init(io, clone);
  348. for (i = 0; i < nr_iovecs; i++) {
  349. struct bio_vec *bv = bio_iovec_idx(clone, i);
  350. bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
  351. if (!bv->bv_page)
  352. break;
  353. /*
  354. * if additional pages cannot be allocated without waiting,
  355. * return a partially allocated bio, the caller will then try
  356. * to allocate additional bios while submitting this partial bio
  357. */
  358. if (i == (MIN_BIO_PAGES - 1))
  359. gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
  360. bv->bv_offset = 0;
  361. if (size > PAGE_SIZE)
  362. bv->bv_len = PAGE_SIZE;
  363. else
  364. bv->bv_len = size;
  365. clone->bi_size += bv->bv_len;
  366. clone->bi_vcnt++;
  367. size -= bv->bv_len;
  368. }
  369. if (!clone->bi_size) {
  370. bio_put(clone);
  371. return NULL;
  372. }
  373. return clone;
  374. }
  375. static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
  376. {
  377. unsigned int i;
  378. struct bio_vec *bv;
  379. for (i = 0; i < clone->bi_vcnt; i++) {
  380. bv = bio_iovec_idx(clone, i);
  381. BUG_ON(!bv->bv_page);
  382. mempool_free(bv->bv_page, cc->page_pool);
  383. bv->bv_page = NULL;
  384. }
  385. }
  386. /*
  387. * One of the bios was finished. Check for completion of
  388. * the whole request and correctly clean up the buffer.
  389. */
  390. static void dec_pending(struct dm_crypt_io *io, int error)
  391. {
  392. struct crypt_config *cc = (struct crypt_config *) io->target->private;
  393. if (error < 0)
  394. io->error = error;
  395. if (!atomic_dec_and_test(&io->pending))
  396. return;
  397. bio_endio(io->base_bio, io->error);
  398. mempool_free(io, cc->io_pool);
  399. }
  400. /*
  401. * kcryptd:
  402. *
  403. * Needed because it would be very unwise to do decryption in an
  404. * interrupt context.
  405. */
  406. static struct workqueue_struct *_kcryptd_workqueue;
  407. static void kcryptd_do_work(struct work_struct *work);
  408. static void kcryptd_queue_io(struct dm_crypt_io *io)
  409. {
  410. INIT_WORK(&io->work, kcryptd_do_work);
  411. queue_work(_kcryptd_workqueue, &io->work);
  412. }
  413. static void crypt_endio(struct bio *clone, int error)
  414. {
  415. struct dm_crypt_io *io = clone->bi_private;
  416. struct crypt_config *cc = io->target->private;
  417. unsigned read_io = bio_data_dir(clone) == READ;
  418. /*
  419. * free the processed pages
  420. */
  421. if (!read_io) {
  422. crypt_free_buffer_pages(cc, clone);
  423. goto out;
  424. }
  425. if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
  426. error = -EIO;
  427. goto out;
  428. }
  429. bio_put(clone);
  430. io->post_process = 1;
  431. kcryptd_queue_io(io);
  432. return;
  433. out:
  434. bio_put(clone);
  435. dec_pending(io, error);
  436. }
  437. static void clone_init(struct dm_crypt_io *io, struct bio *clone)
  438. {
  439. struct crypt_config *cc = io->target->private;
  440. clone->bi_private = io;
  441. clone->bi_end_io = crypt_endio;
  442. clone->bi_bdev = cc->dev->bdev;
  443. clone->bi_rw = io->base_bio->bi_rw;
  444. clone->bi_destructor = dm_crypt_bio_destructor;
  445. }
  446. static void process_read(struct dm_crypt_io *io)
  447. {
  448. struct crypt_config *cc = io->target->private;
  449. struct bio *base_bio = io->base_bio;
  450. struct bio *clone;
  451. sector_t sector = base_bio->bi_sector - io->target->begin;
  452. atomic_inc(&io->pending);
  453. /*
  454. * The block layer might modify the bvec array, so always
  455. * copy the required bvecs because we need the original
  456. * one in order to decrypt the whole bio data *afterwards*.
  457. */
  458. clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
  459. if (unlikely(!clone)) {
  460. dec_pending(io, -ENOMEM);
  461. return;
  462. }
  463. clone_init(io, clone);
  464. clone->bi_idx = 0;
  465. clone->bi_vcnt = bio_segments(base_bio);
  466. clone->bi_size = base_bio->bi_size;
  467. clone->bi_sector = cc->start + sector;
  468. memcpy(clone->bi_io_vec, bio_iovec(base_bio),
  469. sizeof(struct bio_vec) * clone->bi_vcnt);
  470. generic_make_request(clone);
  471. }
  472. static void process_write(struct dm_crypt_io *io)
  473. {
  474. struct crypt_config *cc = io->target->private;
  475. struct bio *base_bio = io->base_bio;
  476. struct bio *clone;
  477. struct convert_context ctx;
  478. unsigned remaining = base_bio->bi_size;
  479. sector_t sector = base_bio->bi_sector - io->target->begin;
  480. atomic_inc(&io->pending);
  481. crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
  482. /*
  483. * The allocated buffers can be smaller than the whole bio,
  484. * so repeat the whole process until all the data can be handled.
  485. */
  486. while (remaining) {
  487. clone = crypt_alloc_buffer(io, remaining);
  488. if (unlikely(!clone)) {
  489. dec_pending(io, -ENOMEM);
  490. return;
  491. }
  492. ctx.bio_out = clone;
  493. ctx.idx_out = 0;
  494. if (unlikely(crypt_convert(cc, &ctx) < 0)) {
  495. crypt_free_buffer_pages(cc, clone);
  496. bio_put(clone);
  497. dec_pending(io, -EIO);
  498. return;
  499. }
  500. /* crypt_convert should have filled the clone bio */
  501. BUG_ON(ctx.idx_out < clone->bi_vcnt);
  502. clone->bi_sector = cc->start + sector;
  503. remaining -= clone->bi_size;
  504. sector += bio_sectors(clone);
  505. /* Grab another reference to the io struct
  506. * before we kick off the request */
  507. if (remaining)
  508. atomic_inc(&io->pending);
  509. generic_make_request(clone);
  510. /* Do not reference clone after this - it
  511. * may be gone already. */
  512. /* out of memory -> run queues */
  513. if (remaining)
  514. congestion_wait(WRITE, HZ/100);
  515. }
  516. }
  517. static void process_read_endio(struct dm_crypt_io *io)
  518. {
  519. struct crypt_config *cc = io->target->private;
  520. struct convert_context ctx;
  521. crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
  522. io->base_bio->bi_sector - io->target->begin, 0);
  523. dec_pending(io, crypt_convert(cc, &ctx));
  524. }
  525. static void kcryptd_do_work(struct work_struct *work)
  526. {
  527. struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
  528. if (io->post_process)
  529. process_read_endio(io);
  530. else if (bio_data_dir(io->base_bio) == READ)
  531. process_read(io);
  532. else
  533. process_write(io);
  534. }
  535. /*
  536. * Decode key from its hex representation
  537. */
  538. static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
  539. {
  540. char buffer[3];
  541. char *endp;
  542. unsigned int i;
  543. buffer[2] = '\0';
  544. for (i = 0; i < size; i++) {
  545. buffer[0] = *hex++;
  546. buffer[1] = *hex++;
  547. key[i] = (u8)simple_strtoul(buffer, &endp, 16);
  548. if (endp != &buffer[2])
  549. return -EINVAL;
  550. }
  551. if (*hex != '\0')
  552. return -EINVAL;
  553. return 0;
  554. }
  555. /*
  556. * Encode key into its hex representation
  557. */
  558. static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
  559. {
  560. unsigned int i;
  561. for (i = 0; i < size; i++) {
  562. sprintf(hex, "%02x", *key);
  563. hex += 2;
  564. key++;
  565. }
  566. }
  567. static int crypt_set_key(struct crypt_config *cc, char *key)
  568. {
  569. unsigned key_size = strlen(key) >> 1;
  570. if (cc->key_size && cc->key_size != key_size)
  571. return -EINVAL;
  572. cc->key_size = key_size; /* initial settings */
  573. if ((!key_size && strcmp(key, "-")) ||
  574. (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
  575. return -EINVAL;
  576. set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
  577. return 0;
  578. }
  579. static int crypt_wipe_key(struct crypt_config *cc)
  580. {
  581. clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
  582. memset(&cc->key, 0, cc->key_size * sizeof(u8));
  583. return 0;
  584. }
  585. /*
  586. * Construct an encryption mapping:
  587. * <cipher> <key> <iv_offset> <dev_path> <start>
  588. */
  589. static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  590. {
  591. struct crypt_config *cc;
  592. struct crypto_blkcipher *tfm;
  593. char *tmp;
  594. char *cipher;
  595. char *chainmode;
  596. char *ivmode;
  597. char *ivopts;
  598. unsigned int key_size;
  599. unsigned long long tmpll;
  600. if (argc != 5) {
  601. ti->error = "Not enough arguments";
  602. return -EINVAL;
  603. }
  604. tmp = argv[0];
  605. cipher = strsep(&tmp, "-");
  606. chainmode = strsep(&tmp, "-");
  607. ivopts = strsep(&tmp, "-");
  608. ivmode = strsep(&ivopts, ":");
  609. if (tmp)
  610. DMWARN("Unexpected additional cipher options");
  611. key_size = strlen(argv[1]) >> 1;
  612. cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
  613. if (cc == NULL) {
  614. ti->error =
  615. "Cannot allocate transparent encryption context";
  616. return -ENOMEM;
  617. }
  618. if (crypt_set_key(cc, argv[1])) {
  619. ti->error = "Error decoding key";
  620. goto bad1;
  621. }
  622. /* Compatiblity mode for old dm-crypt cipher strings */
  623. if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
  624. chainmode = "cbc";
  625. ivmode = "plain";
  626. }
  627. if (strcmp(chainmode, "ecb") && !ivmode) {
  628. ti->error = "This chaining mode requires an IV mechanism";
  629. goto bad1;
  630. }
  631. if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode,
  632. cipher) >= CRYPTO_MAX_ALG_NAME) {
  633. ti->error = "Chain mode + cipher name is too long";
  634. goto bad1;
  635. }
  636. tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
  637. if (IS_ERR(tfm)) {
  638. ti->error = "Error allocating crypto tfm";
  639. goto bad1;
  640. }
  641. strcpy(cc->cipher, cipher);
  642. strcpy(cc->chainmode, chainmode);
  643. cc->tfm = tfm;
  644. /*
  645. * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
  646. * See comments at iv code
  647. */
  648. if (ivmode == NULL)
  649. cc->iv_gen_ops = NULL;
  650. else if (strcmp(ivmode, "plain") == 0)
  651. cc->iv_gen_ops = &crypt_iv_plain_ops;
  652. else if (strcmp(ivmode, "essiv") == 0)
  653. cc->iv_gen_ops = &crypt_iv_essiv_ops;
  654. else if (strcmp(ivmode, "benbi") == 0)
  655. cc->iv_gen_ops = &crypt_iv_benbi_ops;
  656. else if (strcmp(ivmode, "null") == 0)
  657. cc->iv_gen_ops = &crypt_iv_null_ops;
  658. else {
  659. ti->error = "Invalid IV mode";
  660. goto bad2;
  661. }
  662. if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
  663. cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
  664. goto bad2;
  665. cc->iv_size = crypto_blkcipher_ivsize(tfm);
  666. if (cc->iv_size)
  667. /* at least a 64 bit sector number should fit in our buffer */
  668. cc->iv_size = max(cc->iv_size,
  669. (unsigned int)(sizeof(u64) / sizeof(u8)));
  670. else {
  671. if (cc->iv_gen_ops) {
  672. DMWARN("Selected cipher does not support IVs");
  673. if (cc->iv_gen_ops->dtr)
  674. cc->iv_gen_ops->dtr(cc);
  675. cc->iv_gen_ops = NULL;
  676. }
  677. }
  678. cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
  679. if (!cc->io_pool) {
  680. ti->error = "Cannot allocate crypt io mempool";
  681. goto bad3;
  682. }
  683. cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
  684. if (!cc->page_pool) {
  685. ti->error = "Cannot allocate page mempool";
  686. goto bad4;
  687. }
  688. cc->bs = bioset_create(MIN_IOS, MIN_IOS);
  689. if (!cc->bs) {
  690. ti->error = "Cannot allocate crypt bioset";
  691. goto bad_bs;
  692. }
  693. if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
  694. ti->error = "Error setting key";
  695. goto bad5;
  696. }
  697. if (sscanf(argv[2], "%llu", &tmpll) != 1) {
  698. ti->error = "Invalid iv_offset sector";
  699. goto bad5;
  700. }
  701. cc->iv_offset = tmpll;
  702. if (sscanf(argv[4], "%llu", &tmpll) != 1) {
  703. ti->error = "Invalid device sector";
  704. goto bad5;
  705. }
  706. cc->start = tmpll;
  707. if (dm_get_device(ti, argv[3], cc->start, ti->len,
  708. dm_table_get_mode(ti->table), &cc->dev)) {
  709. ti->error = "Device lookup failed";
  710. goto bad5;
  711. }
  712. if (ivmode && cc->iv_gen_ops) {
  713. if (ivopts)
  714. *(ivopts - 1) = ':';
  715. cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
  716. if (!cc->iv_mode) {
  717. ti->error = "Error kmallocing iv_mode string";
  718. goto bad5;
  719. }
  720. strcpy(cc->iv_mode, ivmode);
  721. } else
  722. cc->iv_mode = NULL;
  723. ti->private = cc;
  724. return 0;
  725. bad5:
  726. bioset_free(cc->bs);
  727. bad_bs:
  728. mempool_destroy(cc->page_pool);
  729. bad4:
  730. mempool_destroy(cc->io_pool);
  731. bad3:
  732. if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
  733. cc->iv_gen_ops->dtr(cc);
  734. bad2:
  735. crypto_free_blkcipher(tfm);
  736. bad1:
  737. /* Must zero key material before freeing */
  738. memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
  739. kfree(cc);
  740. return -EINVAL;
  741. }
  742. static void crypt_dtr(struct dm_target *ti)
  743. {
  744. struct crypt_config *cc = (struct crypt_config *) ti->private;
  745. flush_workqueue(_kcryptd_workqueue);
  746. bioset_free(cc->bs);
  747. mempool_destroy(cc->page_pool);
  748. mempool_destroy(cc->io_pool);
  749. kfree(cc->iv_mode);
  750. if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
  751. cc->iv_gen_ops->dtr(cc);
  752. crypto_free_blkcipher(cc->tfm);
  753. dm_put_device(ti, cc->dev);
  754. /* Must zero key material before freeing */
  755. memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
  756. kfree(cc);
  757. }
  758. static int crypt_map(struct dm_target *ti, struct bio *bio,
  759. union map_info *map_context)
  760. {
  761. struct crypt_config *cc = ti->private;
  762. struct dm_crypt_io *io;
  763. io = mempool_alloc(cc->io_pool, GFP_NOIO);
  764. io->target = ti;
  765. io->base_bio = bio;
  766. io->error = io->post_process = 0;
  767. atomic_set(&io->pending, 0);
  768. kcryptd_queue_io(io);
  769. return DM_MAPIO_SUBMITTED;
  770. }
  771. static int crypt_status(struct dm_target *ti, status_type_t type,
  772. char *result, unsigned int maxlen)
  773. {
  774. struct crypt_config *cc = (struct crypt_config *) ti->private;
  775. unsigned int sz = 0;
  776. switch (type) {
  777. case STATUSTYPE_INFO:
  778. result[0] = '\0';
  779. break;
  780. case STATUSTYPE_TABLE:
  781. if (cc->iv_mode)
  782. DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
  783. cc->iv_mode);
  784. else
  785. DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
  786. if (cc->key_size > 0) {
  787. if ((maxlen - sz) < ((cc->key_size << 1) + 1))
  788. return -ENOMEM;
  789. crypt_encode_key(result + sz, cc->key, cc->key_size);
  790. sz += cc->key_size << 1;
  791. } else {
  792. if (sz >= maxlen)
  793. return -ENOMEM;
  794. result[sz++] = '-';
  795. }
  796. DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
  797. cc->dev->name, (unsigned long long)cc->start);
  798. break;
  799. }
  800. return 0;
  801. }
  802. static void crypt_postsuspend(struct dm_target *ti)
  803. {
  804. struct crypt_config *cc = ti->private;
  805. set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
  806. }
  807. static int crypt_preresume(struct dm_target *ti)
  808. {
  809. struct crypt_config *cc = ti->private;
  810. if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
  811. DMERR("aborting resume - crypt key is not set.");
  812. return -EAGAIN;
  813. }
  814. return 0;
  815. }
  816. static void crypt_resume(struct dm_target *ti)
  817. {
  818. struct crypt_config *cc = ti->private;
  819. clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
  820. }
  821. /* Message interface
  822. * key set <key>
  823. * key wipe
  824. */
  825. static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
  826. {
  827. struct crypt_config *cc = ti->private;
  828. if (argc < 2)
  829. goto error;
  830. if (!strnicmp(argv[0], MESG_STR("key"))) {
  831. if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
  832. DMWARN("not suspended during key manipulation.");
  833. return -EINVAL;
  834. }
  835. if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
  836. return crypt_set_key(cc, argv[2]);
  837. if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
  838. return crypt_wipe_key(cc);
  839. }
  840. error:
  841. DMWARN("unrecognised message received.");
  842. return -EINVAL;
  843. }
  844. static struct target_type crypt_target = {
  845. .name = "crypt",
  846. .version= {1, 5, 0},
  847. .module = THIS_MODULE,
  848. .ctr = crypt_ctr,
  849. .dtr = crypt_dtr,
  850. .map = crypt_map,
  851. .status = crypt_status,
  852. .postsuspend = crypt_postsuspend,
  853. .preresume = crypt_preresume,
  854. .resume = crypt_resume,
  855. .message = crypt_message,
  856. };
  857. static int __init dm_crypt_init(void)
  858. {
  859. int r;
  860. _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
  861. if (!_crypt_io_pool)
  862. return -ENOMEM;
  863. _kcryptd_workqueue = create_workqueue("kcryptd");
  864. if (!_kcryptd_workqueue) {
  865. r = -ENOMEM;
  866. DMERR("couldn't create kcryptd");
  867. goto bad1;
  868. }
  869. r = dm_register_target(&crypt_target);
  870. if (r < 0) {
  871. DMERR("register failed %d", r);
  872. goto bad2;
  873. }
  874. return 0;
  875. bad2:
  876. destroy_workqueue(_kcryptd_workqueue);
  877. bad1:
  878. kmem_cache_destroy(_crypt_io_pool);
  879. return r;
  880. }
  881. static void __exit dm_crypt_exit(void)
  882. {
  883. int r = dm_unregister_target(&crypt_target);
  884. if (r < 0)
  885. DMERR("unregister failed %d", r);
  886. destroy_workqueue(_kcryptd_workqueue);
  887. kmem_cache_destroy(_crypt_io_pool);
  888. }
  889. module_init(dm_crypt_init);
  890. module_exit(dm_crypt_exit);
  891. MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
  892. MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
  893. MODULE_LICENSE("GPL");