xcbc.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /*
  2. * Copyright (C)2006 USAGI/WIDE Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. * Author:
  19. * Kazunori Miyazawa <miyazawa@linux-ipv6.org>
  20. */
  21. #include <linux/crypto.h>
  22. #include <linux/err.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/kernel.h>
  25. #include <linux/mm.h>
  26. #include <linux/rtnetlink.h>
  27. #include <linux/slab.h>
  28. #include <linux/scatterlist.h>
  29. #include "internal.h"
  30. static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
  31. 0x02020202, 0x02020202, 0x02020202, 0x02020202,
  32. 0x03030303, 0x03030303, 0x03030303, 0x03030303};
  33. /*
  34. * +------------------------
  35. * | <parent tfm>
  36. * +------------------------
  37. * | crypto_xcbc_ctx
  38. * +------------------------
  39. * | odds (block size)
  40. * +------------------------
  41. * | prev (block size)
  42. * +------------------------
  43. * | key (block size)
  44. * +------------------------
  45. * | consts (block size * 3)
  46. * +------------------------
  47. */
  48. struct crypto_xcbc_ctx {
  49. struct crypto_tfm *child;
  50. u8 *odds;
  51. u8 *prev;
  52. u8 *key;
  53. u8 *consts;
  54. void (*xor)(u8 *a, const u8 *b, unsigned int bs);
  55. unsigned int keylen;
  56. unsigned int len;
  57. };
  58. static void xor_128(u8 *a, const u8 *b, unsigned int bs)
  59. {
  60. ((u32 *)a)[0] ^= ((u32 *)b)[0];
  61. ((u32 *)a)[1] ^= ((u32 *)b)[1];
  62. ((u32 *)a)[2] ^= ((u32 *)b)[2];
  63. ((u32 *)a)[3] ^= ((u32 *)b)[3];
  64. }
  65. static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
  66. struct crypto_xcbc_ctx *ctx)
  67. {
  68. int bs = crypto_hash_blocksize(parent);
  69. int err = 0;
  70. u8 key1[bs];
  71. if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
  72. return err;
  73. ctx->child->__crt_alg->cra_cipher.cia_encrypt(ctx->child, key1,
  74. ctx->consts);
  75. return crypto_cipher_setkey(ctx->child, key1, bs);
  76. }
  77. static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
  78. const u8 *inkey, unsigned int keylen)
  79. {
  80. struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
  81. if (keylen != crypto_tfm_alg_blocksize(ctx->child))
  82. return -EINVAL;
  83. ctx->keylen = keylen;
  84. memcpy(ctx->key, inkey, keylen);
  85. ctx->consts = (u8*)ks;
  86. return _crypto_xcbc_digest_setkey(parent, ctx);
  87. }
  88. static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
  89. {
  90. struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
  91. int bs = crypto_hash_blocksize(pdesc->tfm);
  92. ctx->len = 0;
  93. memset(ctx->odds, 0, bs);
  94. memset(ctx->prev, 0, bs);
  95. return 0;
  96. }
  97. static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
  98. struct scatterlist *sg,
  99. unsigned int nbytes)
  100. {
  101. struct crypto_hash *parent = pdesc->tfm;
  102. struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
  103. struct crypto_tfm *tfm = ctx->child;
  104. int bs = crypto_hash_blocksize(parent);
  105. unsigned int i = 0;
  106. do {
  107. struct page *pg = sg[i].page;
  108. unsigned int offset = sg[i].offset;
  109. unsigned int slen = sg[i].length;
  110. while (slen > 0) {
  111. unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
  112. char *p = crypto_kmap(pg, 0) + offset;
  113. /* checking the data can fill the block */
  114. if ((ctx->len + len) <= bs) {
  115. memcpy(ctx->odds + ctx->len, p, len);
  116. ctx->len += len;
  117. slen -= len;
  118. /* checking the rest of the page */
  119. if (len + offset >= PAGE_SIZE) {
  120. offset = 0;
  121. pg++;
  122. } else
  123. offset += len;
  124. crypto_kunmap(p, 0);
  125. crypto_yield(tfm->crt_flags);
  126. continue;
  127. }
  128. /* filling odds with new data and encrypting it */
  129. memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
  130. len -= bs - ctx->len;
  131. p += bs - ctx->len;
  132. ctx->xor(ctx->prev, ctx->odds, bs);
  133. tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
  134. /* clearing the length */
  135. ctx->len = 0;
  136. /* encrypting the rest of data */
  137. while (len > bs) {
  138. ctx->xor(ctx->prev, p, bs);
  139. tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
  140. p += bs;
  141. len -= bs;
  142. }
  143. /* keeping the surplus of blocksize */
  144. if (len) {
  145. memcpy(ctx->odds, p, len);
  146. ctx->len = len;
  147. }
  148. crypto_kunmap(p, 0);
  149. crypto_yield(tfm->crt_flags);
  150. slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
  151. offset = 0;
  152. pg++;
  153. }
  154. nbytes-=sg[i].length;
  155. i++;
  156. } while (nbytes>0);
  157. return 0;
  158. }
  159. static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
  160. struct scatterlist *sg,
  161. unsigned int nbytes)
  162. {
  163. if (WARN_ON_ONCE(in_irq()))
  164. return -EDEADLK;
  165. return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
  166. }
  167. static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
  168. {
  169. struct crypto_hash *parent = pdesc->tfm;
  170. struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
  171. struct crypto_tfm *tfm = ctx->child;
  172. int bs = crypto_hash_blocksize(parent);
  173. int err = 0;
  174. if (ctx->len == bs) {
  175. u8 key2[bs];
  176. if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
  177. return err;
  178. tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key2, (const u8*)(ctx->consts+bs));
  179. ctx->xor(ctx->prev, ctx->odds, bs);
  180. ctx->xor(ctx->prev, key2, bs);
  181. _crypto_xcbc_digest_setkey(parent, ctx);
  182. tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
  183. } else {
  184. u8 key3[bs];
  185. unsigned int rlen;
  186. u8 *p = ctx->odds + ctx->len;
  187. *p = 0x80;
  188. p++;
  189. rlen = bs - ctx->len -1;
  190. if (rlen)
  191. memset(p, 0, rlen);
  192. if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
  193. return err;
  194. tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key3, (const u8*)(ctx->consts+bs*2));
  195. ctx->xor(ctx->prev, ctx->odds, bs);
  196. ctx->xor(ctx->prev, key3, bs);
  197. _crypto_xcbc_digest_setkey(parent, ctx);
  198. tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
  199. }
  200. return 0;
  201. }
  202. static int crypto_xcbc_digest(struct hash_desc *pdesc,
  203. struct scatterlist *sg, unsigned int nbytes, u8 *out)
  204. {
  205. if (WARN_ON_ONCE(in_irq()))
  206. return -EDEADLK;
  207. crypto_xcbc_digest_init(pdesc);
  208. crypto_xcbc_digest_update2(pdesc, sg, nbytes);
  209. return crypto_xcbc_digest_final(pdesc, out);
  210. }
  211. static int xcbc_init_tfm(struct crypto_tfm *tfm)
  212. {
  213. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  214. struct crypto_spawn *spawn = crypto_instance_ctx(inst);
  215. struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
  216. int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
  217. tfm = crypto_spawn_tfm(spawn);
  218. if (IS_ERR(tfm))
  219. return PTR_ERR(tfm);
  220. switch(bs) {
  221. case 16:
  222. ctx->xor = xor_128;
  223. break;
  224. default:
  225. return -EINVAL;
  226. }
  227. ctx->child = crypto_cipher_cast(tfm);
  228. ctx->odds = (u8*)(ctx+1);
  229. ctx->prev = ctx->odds + bs;
  230. ctx->key = ctx->prev + bs;
  231. return 0;
  232. };
  233. static void xcbc_exit_tfm(struct crypto_tfm *tfm)
  234. {
  235. struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
  236. crypto_free_cipher(ctx->child);
  237. }
  238. static struct crypto_instance *xcbc_alloc(void *param, unsigned int len)
  239. {
  240. struct crypto_instance *inst;
  241. struct crypto_alg *alg;
  242. alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
  243. CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
  244. if (IS_ERR(alg))
  245. return ERR_PTR(PTR_ERR(alg));
  246. switch(alg->cra_blocksize) {
  247. case 16:
  248. break;
  249. default:
  250. return ERR_PTR(PTR_ERR(alg));
  251. }
  252. inst = crypto_alloc_instance("xcbc", alg);
  253. if (IS_ERR(inst))
  254. goto out_put_alg;
  255. inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
  256. inst->alg.cra_priority = alg->cra_priority;
  257. inst->alg.cra_blocksize = alg->cra_blocksize;
  258. inst->alg.cra_alignmask = alg->cra_alignmask;
  259. inst->alg.cra_type = &crypto_hash_type;
  260. inst->alg.cra_hash.digestsize =
  261. (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
  262. CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
  263. alg->cra_blocksize;
  264. inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
  265. ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
  266. inst->alg.cra_init = xcbc_init_tfm;
  267. inst->alg.cra_exit = xcbc_exit_tfm;
  268. inst->alg.cra_hash.init = crypto_xcbc_digest_init;
  269. inst->alg.cra_hash.update = crypto_xcbc_digest_update;
  270. inst->alg.cra_hash.final = crypto_xcbc_digest_final;
  271. inst->alg.cra_hash.digest = crypto_xcbc_digest;
  272. inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey;
  273. out_put_alg:
  274. crypto_mod_put(alg);
  275. return inst;
  276. }
  277. static void xcbc_free(struct crypto_instance *inst)
  278. {
  279. crypto_drop_spawn(crypto_instance_ctx(inst));
  280. kfree(inst);
  281. }
  282. static struct crypto_template crypto_xcbc_tmpl = {
  283. .name = "xcbc",
  284. .alloc = xcbc_alloc,
  285. .free = xcbc_free,
  286. .module = THIS_MODULE,
  287. };
  288. static int __init crypto_xcbc_module_init(void)
  289. {
  290. return crypto_register_template(&crypto_xcbc_tmpl);
  291. }
  292. static void __exit crypto_xcbc_module_exit(void)
  293. {
  294. crypto_unregister_template(&crypto_xcbc_tmpl);
  295. }
  296. module_init(crypto_xcbc_module_init);
  297. module_exit(crypto_xcbc_module_exit);
  298. MODULE_LICENSE("GPL");
  299. MODULE_DESCRIPTION("XCBC keyed hash algorithm");