ixp4xx_crypto.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506
  1. /*
  2. * Intel IXP4xx NPE-C crypto driver
  3. *
  4. * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. */
  11. #include <linux/platform_device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/crypto.h>
  15. #include <linux/kernel.h>
  16. #include <linux/rtnetlink.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/spinlock.h>
  19. #include <crypto/ctr.h>
  20. #include <crypto/des.h>
  21. #include <crypto/aes.h>
  22. #include <crypto/sha.h>
  23. #include <crypto/algapi.h>
  24. #include <crypto/aead.h>
  25. #include <crypto/authenc.h>
  26. #include <crypto/scatterwalk.h>
  27. #include <mach/npe.h>
  28. #include <mach/qmgr.h>
  29. #define MAX_KEYLEN 32
  30. /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  31. #define NPE_CTX_LEN 80
  32. #define AES_BLOCK128 16
  33. #define NPE_OP_HASH_VERIFY 0x01
  34. #define NPE_OP_CCM_ENABLE 0x04
  35. #define NPE_OP_CRYPT_ENABLE 0x08
  36. #define NPE_OP_HASH_ENABLE 0x10
  37. #define NPE_OP_NOT_IN_PLACE 0x20
  38. #define NPE_OP_HMAC_DISABLE 0x40
  39. #define NPE_OP_CRYPT_ENCRYPT 0x80
  40. #define NPE_OP_CCM_GEN_MIC 0xcc
  41. #define NPE_OP_HASH_GEN_ICV 0x50
  42. #define NPE_OP_ENC_GEN_KEY 0xc9
  43. #define MOD_ECB 0x0000
  44. #define MOD_CTR 0x1000
  45. #define MOD_CBC_ENC 0x2000
  46. #define MOD_CBC_DEC 0x3000
  47. #define MOD_CCM_ENC 0x4000
  48. #define MOD_CCM_DEC 0x5000
  49. #define KEYLEN_128 4
  50. #define KEYLEN_192 6
  51. #define KEYLEN_256 8
  52. #define CIPH_DECR 0x0000
  53. #define CIPH_ENCR 0x0400
  54. #define MOD_DES 0x0000
  55. #define MOD_TDEA2 0x0100
  56. #define MOD_3DES 0x0200
  57. #define MOD_AES 0x0800
  58. #define MOD_AES128 (0x0800 | KEYLEN_128)
  59. #define MOD_AES192 (0x0900 | KEYLEN_192)
  60. #define MOD_AES256 (0x0a00 | KEYLEN_256)
  61. #define MAX_IVLEN 16
  62. #define NPE_ID 2 /* NPE C */
  63. #define NPE_QLEN 16
  64. /* Space for registering when the first
  65. * NPE_QLEN crypt_ctl are busy */
  66. #define NPE_QLEN_TOTAL 64
  67. #define SEND_QID 29
  68. #define RECV_QID 30
  69. #define CTL_FLAG_UNUSED 0x0000
  70. #define CTL_FLAG_USED 0x1000
  71. #define CTL_FLAG_PERFORM_ABLK 0x0001
  72. #define CTL_FLAG_GEN_ICV 0x0002
  73. #define CTL_FLAG_GEN_REVAES 0x0004
  74. #define CTL_FLAG_PERFORM_AEAD 0x0008
  75. #define CTL_FLAG_MASK 0x000f
  76. #define HMAC_IPAD_VALUE 0x36
  77. #define HMAC_OPAD_VALUE 0x5C
  78. #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  79. #define MD5_DIGEST_SIZE 16
  80. struct buffer_desc {
  81. u32 phys_next;
  82. u16 buf_len;
  83. u16 pkt_len;
  84. u32 phys_addr;
  85. u32 __reserved[4];
  86. struct buffer_desc *next;
  87. };
  88. struct crypt_ctl {
  89. u8 mode; /* NPE_OP_* operation mode */
  90. u8 init_len;
  91. u16 reserved;
  92. u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
  93. u32 icv_rev_aes; /* icv or rev aes */
  94. u32 src_buf;
  95. u32 dst_buf;
  96. u16 auth_offs; /* Authentication start offset */
  97. u16 auth_len; /* Authentication data length */
  98. u16 crypt_offs; /* Cryption start offset */
  99. u16 crypt_len; /* Cryption data length */
  100. u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
  101. u32 crypto_ctx; /* NPE Crypto Param structure address */
  102. /* Used by Host: 4*4 bytes*/
  103. unsigned ctl_flags;
  104. union {
  105. struct ablkcipher_request *ablk_req;
  106. struct aead_request *aead_req;
  107. struct crypto_tfm *tfm;
  108. } data;
  109. struct buffer_desc *regist_buf;
  110. u8 *regist_ptr;
  111. };
  112. struct ablk_ctx {
  113. struct buffer_desc *src;
  114. struct buffer_desc *dst;
  115. unsigned src_nents;
  116. unsigned dst_nents;
  117. };
  118. struct aead_ctx {
  119. struct buffer_desc *buffer;
  120. unsigned short assoc_nents;
  121. unsigned short src_nents;
  122. struct scatterlist ivlist;
  123. /* used when the hmac is not on one sg entry */
  124. u8 *hmac_virt;
  125. int encrypt;
  126. };
  127. struct ix_hash_algo {
  128. u32 cfgword;
  129. unsigned char *icv;
  130. };
  131. struct ix_sa_dir {
  132. unsigned char *npe_ctx;
  133. dma_addr_t npe_ctx_phys;
  134. int npe_ctx_idx;
  135. u8 npe_mode;
  136. };
  137. struct ixp_ctx {
  138. struct ix_sa_dir encrypt;
  139. struct ix_sa_dir decrypt;
  140. int authkey_len;
  141. u8 authkey[MAX_KEYLEN];
  142. int enckey_len;
  143. u8 enckey[MAX_KEYLEN];
  144. u8 salt[MAX_IVLEN];
  145. u8 nonce[CTR_RFC3686_NONCE_SIZE];
  146. unsigned salted;
  147. atomic_t configuring;
  148. struct completion completion;
  149. };
  150. struct ixp_alg {
  151. struct crypto_alg crypto;
  152. const struct ix_hash_algo *hash;
  153. u32 cfg_enc;
  154. u32 cfg_dec;
  155. int registered;
  156. };
  157. static const struct ix_hash_algo hash_alg_md5 = {
  158. .cfgword = 0xAA010004,
  159. .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
  160. "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
  161. };
  162. static const struct ix_hash_algo hash_alg_sha1 = {
  163. .cfgword = 0x00000005,
  164. .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
  165. "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
  166. };
  167. static struct npe *npe_c;
  168. static struct dma_pool *buffer_pool = NULL;
  169. static struct dma_pool *ctx_pool = NULL;
  170. static struct crypt_ctl *crypt_virt = NULL;
  171. static dma_addr_t crypt_phys;
  172. static int support_aes = 1;
  173. static void dev_release(struct device *dev)
  174. {
  175. return;
  176. }
  177. #define DRIVER_NAME "ixp4xx_crypto"
  178. static struct platform_device pseudo_dev = {
  179. .name = DRIVER_NAME,
  180. .id = 0,
  181. .num_resources = 0,
  182. .dev = {
  183. .coherent_dma_mask = DMA_32BIT_MASK,
  184. .release = dev_release,
  185. }
  186. };
  187. static struct device *dev = &pseudo_dev.dev;
  188. static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
  189. {
  190. return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
  191. }
  192. static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
  193. {
  194. return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
  195. }
  196. static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
  197. {
  198. return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
  199. }
  200. static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
  201. {
  202. return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
  203. }
  204. static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
  205. {
  206. return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
  207. }
  208. static int setup_crypt_desc(void)
  209. {
  210. BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
  211. crypt_virt = dma_alloc_coherent(dev,
  212. NPE_QLEN * sizeof(struct crypt_ctl),
  213. &crypt_phys, GFP_KERNEL);
  214. if (!crypt_virt)
  215. return -ENOMEM;
  216. memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
  217. return 0;
  218. }
  219. static spinlock_t desc_lock;
  220. static struct crypt_ctl *get_crypt_desc(void)
  221. {
  222. int i;
  223. static int idx = 0;
  224. unsigned long flags;
  225. spin_lock_irqsave(&desc_lock, flags);
  226. if (unlikely(!crypt_virt))
  227. setup_crypt_desc();
  228. if (unlikely(!crypt_virt)) {
  229. spin_unlock_irqrestore(&desc_lock, flags);
  230. return NULL;
  231. }
  232. i = idx;
  233. if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
  234. if (++idx >= NPE_QLEN)
  235. idx = 0;
  236. crypt_virt[i].ctl_flags = CTL_FLAG_USED;
  237. spin_unlock_irqrestore(&desc_lock, flags);
  238. return crypt_virt +i;
  239. } else {
  240. spin_unlock_irqrestore(&desc_lock, flags);
  241. return NULL;
  242. }
  243. }
  244. static spinlock_t emerg_lock;
  245. static struct crypt_ctl *get_crypt_desc_emerg(void)
  246. {
  247. int i;
  248. static int idx = NPE_QLEN;
  249. struct crypt_ctl *desc;
  250. unsigned long flags;
  251. desc = get_crypt_desc();
  252. if (desc)
  253. return desc;
  254. if (unlikely(!crypt_virt))
  255. return NULL;
  256. spin_lock_irqsave(&emerg_lock, flags);
  257. i = idx;
  258. if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
  259. if (++idx >= NPE_QLEN_TOTAL)
  260. idx = NPE_QLEN;
  261. crypt_virt[i].ctl_flags = CTL_FLAG_USED;
  262. spin_unlock_irqrestore(&emerg_lock, flags);
  263. return crypt_virt +i;
  264. } else {
  265. spin_unlock_irqrestore(&emerg_lock, flags);
  266. return NULL;
  267. }
  268. }
  269. static void free_buf_chain(struct buffer_desc *buf, u32 phys)
  270. {
  271. while (buf) {
  272. struct buffer_desc *buf1;
  273. u32 phys1;
  274. buf1 = buf->next;
  275. phys1 = buf->phys_next;
  276. dma_pool_free(buffer_pool, buf, phys);
  277. buf = buf1;
  278. phys = phys1;
  279. }
  280. }
  281. static struct tasklet_struct crypto_done_tasklet;
  282. static void finish_scattered_hmac(struct crypt_ctl *crypt)
  283. {
  284. struct aead_request *req = crypt->data.aead_req;
  285. struct aead_ctx *req_ctx = aead_request_ctx(req);
  286. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  287. int authsize = crypto_aead_authsize(tfm);
  288. int decryptlen = req->cryptlen - authsize;
  289. if (req_ctx->encrypt) {
  290. scatterwalk_map_and_copy(req_ctx->hmac_virt,
  291. req->src, decryptlen, authsize, 1);
  292. }
  293. dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
  294. }
  295. static void one_packet(dma_addr_t phys)
  296. {
  297. struct crypt_ctl *crypt;
  298. struct ixp_ctx *ctx;
  299. int failed;
  300. enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
  301. failed = phys & 0x1 ? -EBADMSG : 0;
  302. phys &= ~0x3;
  303. crypt = crypt_phys2virt(phys);
  304. switch (crypt->ctl_flags & CTL_FLAG_MASK) {
  305. case CTL_FLAG_PERFORM_AEAD: {
  306. struct aead_request *req = crypt->data.aead_req;
  307. struct aead_ctx *req_ctx = aead_request_ctx(req);
  308. dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
  309. DMA_TO_DEVICE);
  310. dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
  311. dma_unmap_sg(dev, req->src, req_ctx->src_nents,
  312. DMA_BIDIRECTIONAL);
  313. free_buf_chain(req_ctx->buffer, crypt->src_buf);
  314. if (req_ctx->hmac_virt) {
  315. finish_scattered_hmac(crypt);
  316. }
  317. req->base.complete(&req->base, failed);
  318. break;
  319. }
  320. case CTL_FLAG_PERFORM_ABLK: {
  321. struct ablkcipher_request *req = crypt->data.ablk_req;
  322. struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
  323. int nents;
  324. if (req_ctx->dst) {
  325. nents = req_ctx->dst_nents;
  326. dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
  327. free_buf_chain(req_ctx->dst, crypt->dst_buf);
  328. src_direction = DMA_TO_DEVICE;
  329. }
  330. nents = req_ctx->src_nents;
  331. dma_unmap_sg(dev, req->src, nents, src_direction);
  332. free_buf_chain(req_ctx->src, crypt->src_buf);
  333. req->base.complete(&req->base, failed);
  334. break;
  335. }
  336. case CTL_FLAG_GEN_ICV:
  337. ctx = crypto_tfm_ctx(crypt->data.tfm);
  338. dma_pool_free(ctx_pool, crypt->regist_ptr,
  339. crypt->regist_buf->phys_addr);
  340. dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
  341. if (atomic_dec_and_test(&ctx->configuring))
  342. complete(&ctx->completion);
  343. break;
  344. case CTL_FLAG_GEN_REVAES:
  345. ctx = crypto_tfm_ctx(crypt->data.tfm);
  346. *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
  347. if (atomic_dec_and_test(&ctx->configuring))
  348. complete(&ctx->completion);
  349. break;
  350. default:
  351. BUG();
  352. }
  353. crypt->ctl_flags = CTL_FLAG_UNUSED;
  354. }
  355. static void irqhandler(void *_unused)
  356. {
  357. tasklet_schedule(&crypto_done_tasklet);
  358. }
  359. static void crypto_done_action(unsigned long arg)
  360. {
  361. int i;
  362. for(i=0; i<4; i++) {
  363. dma_addr_t phys = qmgr_get_entry(RECV_QID);
  364. if (!phys)
  365. return;
  366. one_packet(phys);
  367. }
  368. tasklet_schedule(&crypto_done_tasklet);
  369. }
  370. static int init_ixp_crypto(void)
  371. {
  372. int ret = -ENODEV;
  373. if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
  374. IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
  375. printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
  376. return ret;
  377. }
  378. npe_c = npe_request(NPE_ID);
  379. if (!npe_c)
  380. return ret;
  381. if (!npe_running(npe_c)) {
  382. npe_load_firmware(npe_c, npe_name(npe_c), dev);
  383. }
  384. /* buffer_pool will also be used to sometimes store the hmac,
  385. * so assure it is large enough
  386. */
  387. BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
  388. buffer_pool = dma_pool_create("buffer", dev,
  389. sizeof(struct buffer_desc), 32, 0);
  390. ret = -ENOMEM;
  391. if (!buffer_pool) {
  392. goto err;
  393. }
  394. ctx_pool = dma_pool_create("context", dev,
  395. NPE_CTX_LEN, 16, 0);
  396. if (!ctx_pool) {
  397. goto err;
  398. }
  399. ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
  400. if (ret)
  401. goto err;
  402. ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
  403. if (ret) {
  404. qmgr_release_queue(SEND_QID);
  405. goto err;
  406. }
  407. qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
  408. tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
  409. qmgr_enable_irq(RECV_QID);
  410. return 0;
  411. err:
  412. if (ctx_pool)
  413. dma_pool_destroy(ctx_pool);
  414. if (buffer_pool)
  415. dma_pool_destroy(buffer_pool);
  416. npe_release(npe_c);
  417. return ret;
  418. }
  419. static void release_ixp_crypto(void)
  420. {
  421. qmgr_disable_irq(RECV_QID);
  422. tasklet_kill(&crypto_done_tasklet);
  423. qmgr_release_queue(SEND_QID);
  424. qmgr_release_queue(RECV_QID);
  425. dma_pool_destroy(ctx_pool);
  426. dma_pool_destroy(buffer_pool);
  427. npe_release(npe_c);
  428. if (crypt_virt) {
  429. dma_free_coherent(dev,
  430. NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
  431. crypt_virt, crypt_phys);
  432. }
  433. return;
  434. }
  435. static void reset_sa_dir(struct ix_sa_dir *dir)
  436. {
  437. memset(dir->npe_ctx, 0, NPE_CTX_LEN);
  438. dir->npe_ctx_idx = 0;
  439. dir->npe_mode = 0;
  440. }
  441. static int init_sa_dir(struct ix_sa_dir *dir)
  442. {
  443. dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
  444. if (!dir->npe_ctx) {
  445. return -ENOMEM;
  446. }
  447. reset_sa_dir(dir);
  448. return 0;
  449. }
  450. static void free_sa_dir(struct ix_sa_dir *dir)
  451. {
  452. memset(dir->npe_ctx, 0, NPE_CTX_LEN);
  453. dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
  454. }
  455. static int init_tfm(struct crypto_tfm *tfm)
  456. {
  457. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  458. int ret;
  459. atomic_set(&ctx->configuring, 0);
  460. ret = init_sa_dir(&ctx->encrypt);
  461. if (ret)
  462. return ret;
  463. ret = init_sa_dir(&ctx->decrypt);
  464. if (ret) {
  465. free_sa_dir(&ctx->encrypt);
  466. }
  467. return ret;
  468. }
  469. static int init_tfm_ablk(struct crypto_tfm *tfm)
  470. {
  471. tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
  472. return init_tfm(tfm);
  473. }
  474. static int init_tfm_aead(struct crypto_tfm *tfm)
  475. {
  476. tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
  477. return init_tfm(tfm);
  478. }
  479. static void exit_tfm(struct crypto_tfm *tfm)
  480. {
  481. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  482. free_sa_dir(&ctx->encrypt);
  483. free_sa_dir(&ctx->decrypt);
  484. }
  485. static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
  486. int init_len, u32 ctx_addr, const u8 *key, int key_len)
  487. {
  488. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  489. struct crypt_ctl *crypt;
  490. struct buffer_desc *buf;
  491. int i;
  492. u8 *pad;
  493. u32 pad_phys, buf_phys;
  494. BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
  495. pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
  496. if (!pad)
  497. return -ENOMEM;
  498. buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
  499. if (!buf) {
  500. dma_pool_free(ctx_pool, pad, pad_phys);
  501. return -ENOMEM;
  502. }
  503. crypt = get_crypt_desc_emerg();
  504. if (!crypt) {
  505. dma_pool_free(ctx_pool, pad, pad_phys);
  506. dma_pool_free(buffer_pool, buf, buf_phys);
  507. return -EAGAIN;
  508. }
  509. memcpy(pad, key, key_len);
  510. memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
  511. for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
  512. pad[i] ^= xpad;
  513. }
  514. crypt->data.tfm = tfm;
  515. crypt->regist_ptr = pad;
  516. crypt->regist_buf = buf;
  517. crypt->auth_offs = 0;
  518. crypt->auth_len = HMAC_PAD_BLOCKLEN;
  519. crypt->crypto_ctx = ctx_addr;
  520. crypt->src_buf = buf_phys;
  521. crypt->icv_rev_aes = target;
  522. crypt->mode = NPE_OP_HASH_GEN_ICV;
  523. crypt->init_len = init_len;
  524. crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
  525. buf->next = 0;
  526. buf->buf_len = HMAC_PAD_BLOCKLEN;
  527. buf->pkt_len = 0;
  528. buf->phys_addr = pad_phys;
  529. atomic_inc(&ctx->configuring);
  530. qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
  531. BUG_ON(qmgr_stat_overflow(SEND_QID));
  532. return 0;
  533. }
  534. static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
  535. const u8 *key, int key_len, unsigned digest_len)
  536. {
  537. u32 itarget, otarget, npe_ctx_addr;
  538. unsigned char *cinfo;
  539. int init_len, ret = 0;
  540. u32 cfgword;
  541. struct ix_sa_dir *dir;
  542. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  543. const struct ix_hash_algo *algo;
  544. dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
  545. cinfo = dir->npe_ctx + dir->npe_ctx_idx;
  546. algo = ix_hash(tfm);
  547. /* write cfg word to cryptinfo */
  548. cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
  549. *(u32*)cinfo = cpu_to_be32(cfgword);
  550. cinfo += sizeof(cfgword);
  551. /* write ICV to cryptinfo */
  552. memcpy(cinfo, algo->icv, digest_len);
  553. cinfo += digest_len;
  554. itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
  555. + sizeof(algo->cfgword);
  556. otarget = itarget + digest_len;
  557. init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
  558. npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
  559. dir->npe_ctx_idx += init_len;
  560. dir->npe_mode |= NPE_OP_HASH_ENABLE;
  561. if (!encrypt)
  562. dir->npe_mode |= NPE_OP_HASH_VERIFY;
  563. ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
  564. init_len, npe_ctx_addr, key, key_len);
  565. if (ret)
  566. return ret;
  567. return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
  568. init_len, npe_ctx_addr, key, key_len);
  569. }
  570. static int gen_rev_aes_key(struct crypto_tfm *tfm)
  571. {
  572. struct crypt_ctl *crypt;
  573. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  574. struct ix_sa_dir *dir = &ctx->decrypt;
  575. crypt = get_crypt_desc_emerg();
  576. if (!crypt) {
  577. return -EAGAIN;
  578. }
  579. *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
  580. crypt->data.tfm = tfm;
  581. crypt->crypt_offs = 0;
  582. crypt->crypt_len = AES_BLOCK128;
  583. crypt->src_buf = 0;
  584. crypt->crypto_ctx = dir->npe_ctx_phys;
  585. crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
  586. crypt->mode = NPE_OP_ENC_GEN_KEY;
  587. crypt->init_len = dir->npe_ctx_idx;
  588. crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
  589. atomic_inc(&ctx->configuring);
  590. qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
  591. BUG_ON(qmgr_stat_overflow(SEND_QID));
  592. return 0;
  593. }
  594. static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
  595. const u8 *key, int key_len)
  596. {
  597. u8 *cinfo;
  598. u32 cipher_cfg;
  599. u32 keylen_cfg = 0;
  600. struct ix_sa_dir *dir;
  601. struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
  602. u32 *flags = &tfm->crt_flags;
  603. dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
  604. cinfo = dir->npe_ctx;
  605. if (encrypt) {
  606. cipher_cfg = cipher_cfg_enc(tfm);
  607. dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
  608. } else {
  609. cipher_cfg = cipher_cfg_dec(tfm);
  610. }
  611. if (cipher_cfg & MOD_AES) {
  612. switch (key_len) {
  613. case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
  614. case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
  615. case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
  616. default:
  617. *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  618. return -EINVAL;
  619. }
  620. cipher_cfg |= keylen_cfg;
  621. } else if (cipher_cfg & MOD_3DES) {
  622. const u32 *K = (const u32 *)key;
  623. if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
  624. !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
  625. {
  626. *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
  627. return -EINVAL;
  628. }
  629. } else {
  630. u32 tmp[DES_EXPKEY_WORDS];
  631. if (des_ekey(tmp, key) == 0) {
  632. *flags |= CRYPTO_TFM_RES_WEAK_KEY;
  633. }
  634. }
  635. /* write cfg word to cryptinfo */
  636. *(u32*)cinfo = cpu_to_be32(cipher_cfg);
  637. cinfo += sizeof(cipher_cfg);
  638. /* write cipher key to cryptinfo */
  639. memcpy(cinfo, key, key_len);
  640. /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
  641. if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
  642. memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
  643. key_len = DES3_EDE_KEY_SIZE;
  644. }
  645. dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
  646. dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
  647. if ((cipher_cfg & MOD_AES) && !encrypt) {
  648. return gen_rev_aes_key(tfm);
  649. }
  650. return 0;
  651. }
  652. static int count_sg(struct scatterlist *sg, int nbytes)
  653. {
  654. int i;
  655. for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
  656. nbytes -= sg->length;
  657. return i;
  658. }
  659. static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
  660. unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
  661. {
  662. int nents = 0;
  663. while (nbytes > 0) {
  664. struct buffer_desc *next_buf;
  665. u32 next_buf_phys;
  666. unsigned len = min(nbytes, sg_dma_len(sg));
  667. nents++;
  668. nbytes -= len;
  669. if (!buf->phys_addr) {
  670. buf->phys_addr = sg_dma_address(sg);
  671. buf->buf_len = len;
  672. buf->next = NULL;
  673. buf->phys_next = 0;
  674. goto next;
  675. }
  676. /* Two consecutive chunks on one page may be handled by the old
  677. * buffer descriptor, increased by the length of the new one
  678. */
  679. if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
  680. buf->buf_len += len;
  681. goto next;
  682. }
  683. next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
  684. if (!next_buf)
  685. return NULL;
  686. buf->next = next_buf;
  687. buf->phys_next = next_buf_phys;
  688. buf = next_buf;
  689. buf->next = NULL;
  690. buf->phys_next = 0;
  691. buf->phys_addr = sg_dma_address(sg);
  692. buf->buf_len = len;
  693. next:
  694. if (nbytes > 0) {
  695. sg = sg_next(sg);
  696. }
  697. }
  698. return buf;
  699. }
  700. static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  701. unsigned int key_len)
  702. {
  703. struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  704. u32 *flags = &tfm->base.crt_flags;
  705. int ret;
  706. init_completion(&ctx->completion);
  707. atomic_inc(&ctx->configuring);
  708. reset_sa_dir(&ctx->encrypt);
  709. reset_sa_dir(&ctx->decrypt);
  710. ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
  711. ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
  712. ret = setup_cipher(&tfm->base, 0, key, key_len);
  713. if (ret)
  714. goto out;
  715. ret = setup_cipher(&tfm->base, 1, key, key_len);
  716. if (ret)
  717. goto out;
  718. if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
  719. if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
  720. ret = -EINVAL;
  721. } else {
  722. *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
  723. }
  724. }
  725. out:
  726. if (!atomic_dec_and_test(&ctx->configuring))
  727. wait_for_completion(&ctx->completion);
  728. return ret;
  729. }
  730. static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  731. unsigned int key_len)
  732. {
  733. struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  734. /* the nonce is stored in bytes at end of key */
  735. if (key_len < CTR_RFC3686_NONCE_SIZE)
  736. return -EINVAL;
  737. memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
  738. CTR_RFC3686_NONCE_SIZE);
  739. key_len -= CTR_RFC3686_NONCE_SIZE;
  740. return ablk_setkey(tfm, key, key_len);
  741. }
  742. static int ablk_perform(struct ablkcipher_request *req, int encrypt)
  743. {
  744. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  745. struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  746. unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
  747. int ret = -ENOMEM;
  748. struct ix_sa_dir *dir;
  749. struct crypt_ctl *crypt;
  750. unsigned int nbytes = req->nbytes, nents;
  751. enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
  752. struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
  753. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  754. GFP_KERNEL : GFP_ATOMIC;
  755. if (qmgr_stat_full(SEND_QID))
  756. return -EAGAIN;
  757. if (atomic_read(&ctx->configuring))
  758. return -EAGAIN;
  759. dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
  760. crypt = get_crypt_desc();
  761. if (!crypt)
  762. return ret;
  763. crypt->data.ablk_req = req;
  764. crypt->crypto_ctx = dir->npe_ctx_phys;
  765. crypt->mode = dir->npe_mode;
  766. crypt->init_len = dir->npe_ctx_idx;
  767. crypt->crypt_offs = 0;
  768. crypt->crypt_len = nbytes;
  769. BUG_ON(ivsize && !req->info);
  770. memcpy(crypt->iv, req->info, ivsize);
  771. if (req->src != req->dst) {
  772. crypt->mode |= NPE_OP_NOT_IN_PLACE;
  773. nents = count_sg(req->dst, nbytes);
  774. /* This was never tested by Intel
  775. * for more than one dst buffer, I think. */
  776. BUG_ON(nents != 1);
  777. req_ctx->dst_nents = nents;
  778. dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
  779. req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
  780. if (!req_ctx->dst)
  781. goto unmap_sg_dest;
  782. req_ctx->dst->phys_addr = 0;
  783. if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
  784. goto free_buf_dest;
  785. src_direction = DMA_TO_DEVICE;
  786. } else {
  787. req_ctx->dst = NULL;
  788. req_ctx->dst_nents = 0;
  789. }
  790. nents = count_sg(req->src, nbytes);
  791. req_ctx->src_nents = nents;
  792. dma_map_sg(dev, req->src, nents, src_direction);
  793. req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
  794. if (!req_ctx->src)
  795. goto unmap_sg_src;
  796. req_ctx->src->phys_addr = 0;
  797. if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
  798. goto free_buf_src;
  799. crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
  800. qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
  801. BUG_ON(qmgr_stat_overflow(SEND_QID));
  802. return -EINPROGRESS;
  803. free_buf_src:
  804. free_buf_chain(req_ctx->src, crypt->src_buf);
  805. unmap_sg_src:
  806. dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
  807. free_buf_dest:
  808. if (req->src != req->dst) {
  809. free_buf_chain(req_ctx->dst, crypt->dst_buf);
  810. unmap_sg_dest:
  811. dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
  812. DMA_FROM_DEVICE);
  813. }
  814. crypt->ctl_flags = CTL_FLAG_UNUSED;
  815. return ret;
  816. }
  817. static int ablk_encrypt(struct ablkcipher_request *req)
  818. {
  819. return ablk_perform(req, 1);
  820. }
  821. static int ablk_decrypt(struct ablkcipher_request *req)
  822. {
  823. return ablk_perform(req, 0);
  824. }
  825. static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
  826. {
  827. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  828. struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  829. u8 iv[CTR_RFC3686_BLOCK_SIZE];
  830. u8 *info = req->info;
  831. int ret;
  832. /* set up counter block */
  833. memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
  834. memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
  835. /* initialize counter portion of counter block */
  836. *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
  837. cpu_to_be32(1);
  838. req->info = iv;
  839. ret = ablk_perform(req, 1);
  840. req->info = info;
  841. return ret;
  842. }
  843. static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
  844. unsigned int nbytes)
  845. {
  846. int offset = 0;
  847. if (!nbytes)
  848. return 0;
  849. for (;;) {
  850. if (start < offset + sg->length)
  851. break;
  852. offset += sg->length;
  853. sg = sg_next(sg);
  854. }
  855. return (start + nbytes > offset + sg->length);
  856. }
  857. static int aead_perform(struct aead_request *req, int encrypt,
  858. int cryptoffset, int eff_cryptlen, u8 *iv)
  859. {
  860. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  861. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  862. unsigned ivsize = crypto_aead_ivsize(tfm);
  863. unsigned authsize = crypto_aead_authsize(tfm);
  864. int ret = -ENOMEM;
  865. struct ix_sa_dir *dir;
  866. struct crypt_ctl *crypt;
  867. unsigned int cryptlen, nents;
  868. struct buffer_desc *buf;
  869. struct aead_ctx *req_ctx = aead_request_ctx(req);
  870. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  871. GFP_KERNEL : GFP_ATOMIC;
  872. if (qmgr_stat_full(SEND_QID))
  873. return -EAGAIN;
  874. if (atomic_read(&ctx->configuring))
  875. return -EAGAIN;
  876. if (encrypt) {
  877. dir = &ctx->encrypt;
  878. cryptlen = req->cryptlen;
  879. } else {
  880. dir = &ctx->decrypt;
  881. /* req->cryptlen includes the authsize when decrypting */
  882. cryptlen = req->cryptlen -authsize;
  883. eff_cryptlen -= authsize;
  884. }
  885. crypt = get_crypt_desc();
  886. if (!crypt)
  887. return ret;
  888. crypt->data.aead_req = req;
  889. crypt->crypto_ctx = dir->npe_ctx_phys;
  890. crypt->mode = dir->npe_mode;
  891. crypt->init_len = dir->npe_ctx_idx;
  892. crypt->crypt_offs = cryptoffset;
  893. crypt->crypt_len = eff_cryptlen;
  894. crypt->auth_offs = 0;
  895. crypt->auth_len = req->assoclen + ivsize + cryptlen;
  896. BUG_ON(ivsize && !req->iv);
  897. memcpy(crypt->iv, req->iv, ivsize);
  898. if (req->src != req->dst) {
  899. BUG(); /* -ENOTSUP because of my lazyness */
  900. }
  901. req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
  902. if (!req_ctx->buffer)
  903. goto out;
  904. req_ctx->buffer->phys_addr = 0;
  905. /* ASSOC data */
  906. nents = count_sg(req->assoc, req->assoclen);
  907. req_ctx->assoc_nents = nents;
  908. dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
  909. buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
  910. if (!buf)
  911. goto unmap_sg_assoc;
  912. /* IV */
  913. sg_init_table(&req_ctx->ivlist, 1);
  914. sg_set_buf(&req_ctx->ivlist, iv, ivsize);
  915. dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
  916. buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
  917. if (!buf)
  918. goto unmap_sg_iv;
  919. if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
  920. /* The 12 hmac bytes are scattered,
  921. * we need to copy them into a safe buffer */
  922. req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
  923. &crypt->icv_rev_aes);
  924. if (unlikely(!req_ctx->hmac_virt))
  925. goto unmap_sg_iv;
  926. if (!encrypt) {
  927. scatterwalk_map_and_copy(req_ctx->hmac_virt,
  928. req->src, cryptlen, authsize, 0);
  929. }
  930. req_ctx->encrypt = encrypt;
  931. } else {
  932. req_ctx->hmac_virt = NULL;
  933. }
  934. /* Crypt */
  935. nents = count_sg(req->src, cryptlen + authsize);
  936. req_ctx->src_nents = nents;
  937. dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
  938. buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
  939. if (!buf)
  940. goto unmap_sg_src;
  941. if (!req_ctx->hmac_virt) {
  942. crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
  943. }
  944. crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
  945. qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
  946. BUG_ON(qmgr_stat_overflow(SEND_QID));
  947. return -EINPROGRESS;
  948. unmap_sg_src:
  949. dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
  950. if (req_ctx->hmac_virt) {
  951. dma_pool_free(buffer_pool, req_ctx->hmac_virt,
  952. crypt->icv_rev_aes);
  953. }
  954. unmap_sg_iv:
  955. dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
  956. unmap_sg_assoc:
  957. dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
  958. free_buf_chain(req_ctx->buffer, crypt->src_buf);
  959. out:
  960. crypt->ctl_flags = CTL_FLAG_UNUSED;
  961. return ret;
  962. }
  963. static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
  964. {
  965. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  966. u32 *flags = &tfm->base.crt_flags;
  967. unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
  968. int ret;
  969. if (!ctx->enckey_len && !ctx->authkey_len)
  970. return 0;
  971. init_completion(&ctx->completion);
  972. atomic_inc(&ctx->configuring);
  973. reset_sa_dir(&ctx->encrypt);
  974. reset_sa_dir(&ctx->decrypt);
  975. ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
  976. if (ret)
  977. goto out;
  978. ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
  979. if (ret)
  980. goto out;
  981. ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
  982. ctx->authkey_len, digest_len);
  983. if (ret)
  984. goto out;
  985. ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
  986. ctx->authkey_len, digest_len);
  987. if (ret)
  988. goto out;
  989. if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
  990. if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
  991. ret = -EINVAL;
  992. goto out;
  993. } else {
  994. *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
  995. }
  996. }
  997. out:
  998. if (!atomic_dec_and_test(&ctx->configuring))
  999. wait_for_completion(&ctx->completion);
  1000. return ret;
  1001. }
  1002. static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  1003. {
  1004. int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
  1005. if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
  1006. return -EINVAL;
  1007. return aead_setup(tfm, authsize);
  1008. }
  1009. static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
  1010. unsigned int keylen)
  1011. {
  1012. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  1013. struct rtattr *rta = (struct rtattr *)key;
  1014. struct crypto_authenc_key_param *param;
  1015. if (!RTA_OK(rta, keylen))
  1016. goto badkey;
  1017. if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
  1018. goto badkey;
  1019. if (RTA_PAYLOAD(rta) < sizeof(*param))
  1020. goto badkey;
  1021. param = RTA_DATA(rta);
  1022. ctx->enckey_len = be32_to_cpu(param->enckeylen);
  1023. key += RTA_ALIGN(rta->rta_len);
  1024. keylen -= RTA_ALIGN(rta->rta_len);
  1025. if (keylen < ctx->enckey_len)
  1026. goto badkey;
  1027. ctx->authkey_len = keylen - ctx->enckey_len;
  1028. memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
  1029. memcpy(ctx->authkey, key, ctx->authkey_len);
  1030. return aead_setup(tfm, crypto_aead_authsize(tfm));
  1031. badkey:
  1032. ctx->enckey_len = 0;
  1033. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1034. return -EINVAL;
  1035. }
  1036. static int aead_encrypt(struct aead_request *req)
  1037. {
  1038. unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  1039. return aead_perform(req, 1, req->assoclen + ivsize,
  1040. req->cryptlen, req->iv);
  1041. }
  1042. static int aead_decrypt(struct aead_request *req)
  1043. {
  1044. unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  1045. return aead_perform(req, 0, req->assoclen + ivsize,
  1046. req->cryptlen, req->iv);
  1047. }
  1048. static int aead_givencrypt(struct aead_givcrypt_request *req)
  1049. {
  1050. struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
  1051. struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
  1052. unsigned len, ivsize = crypto_aead_ivsize(tfm);
  1053. __be64 seq;
  1054. /* copied from eseqiv.c */
  1055. if (!ctx->salted) {
  1056. get_random_bytes(ctx->salt, ivsize);
  1057. ctx->salted = 1;
  1058. }
  1059. memcpy(req->areq.iv, ctx->salt, ivsize);
  1060. len = ivsize;
  1061. if (ivsize > sizeof(u64)) {
  1062. memset(req->giv, 0, ivsize - sizeof(u64));
  1063. len = sizeof(u64);
  1064. }
  1065. seq = cpu_to_be64(req->seq);
  1066. memcpy(req->giv + ivsize - len, &seq, len);
  1067. return aead_perform(&req->areq, 1, req->areq.assoclen,
  1068. req->areq.cryptlen +ivsize, req->giv);
  1069. }
  1070. static struct ixp_alg ixp4xx_algos[] = {
  1071. {
  1072. .crypto = {
  1073. .cra_name = "cbc(des)",
  1074. .cra_blocksize = DES_BLOCK_SIZE,
  1075. .cra_u = { .ablkcipher = {
  1076. .min_keysize = DES_KEY_SIZE,
  1077. .max_keysize = DES_KEY_SIZE,
  1078. .ivsize = DES_BLOCK_SIZE,
  1079. .geniv = "eseqiv",
  1080. }
  1081. }
  1082. },
  1083. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
  1084. .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
  1085. }, {
  1086. .crypto = {
  1087. .cra_name = "ecb(des)",
  1088. .cra_blocksize = DES_BLOCK_SIZE,
  1089. .cra_u = { .ablkcipher = {
  1090. .min_keysize = DES_KEY_SIZE,
  1091. .max_keysize = DES_KEY_SIZE,
  1092. }
  1093. }
  1094. },
  1095. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
  1096. .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
  1097. }, {
  1098. .crypto = {
  1099. .cra_name = "cbc(des3_ede)",
  1100. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1101. .cra_u = { .ablkcipher = {
  1102. .min_keysize = DES3_EDE_KEY_SIZE,
  1103. .max_keysize = DES3_EDE_KEY_SIZE,
  1104. .ivsize = DES3_EDE_BLOCK_SIZE,
  1105. .geniv = "eseqiv",
  1106. }
  1107. }
  1108. },
  1109. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
  1110. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
  1111. }, {
  1112. .crypto = {
  1113. .cra_name = "ecb(des3_ede)",
  1114. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1115. .cra_u = { .ablkcipher = {
  1116. .min_keysize = DES3_EDE_KEY_SIZE,
  1117. .max_keysize = DES3_EDE_KEY_SIZE,
  1118. }
  1119. }
  1120. },
  1121. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
  1122. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
  1123. }, {
  1124. .crypto = {
  1125. .cra_name = "cbc(aes)",
  1126. .cra_blocksize = AES_BLOCK_SIZE,
  1127. .cra_u = { .ablkcipher = {
  1128. .min_keysize = AES_MIN_KEY_SIZE,
  1129. .max_keysize = AES_MAX_KEY_SIZE,
  1130. .ivsize = AES_BLOCK_SIZE,
  1131. .geniv = "eseqiv",
  1132. }
  1133. }
  1134. },
  1135. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
  1136. .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
  1137. }, {
  1138. .crypto = {
  1139. .cra_name = "ecb(aes)",
  1140. .cra_blocksize = AES_BLOCK_SIZE,
  1141. .cra_u = { .ablkcipher = {
  1142. .min_keysize = AES_MIN_KEY_SIZE,
  1143. .max_keysize = AES_MAX_KEY_SIZE,
  1144. }
  1145. }
  1146. },
  1147. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
  1148. .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
  1149. }, {
  1150. .crypto = {
  1151. .cra_name = "ctr(aes)",
  1152. .cra_blocksize = AES_BLOCK_SIZE,
  1153. .cra_u = { .ablkcipher = {
  1154. .min_keysize = AES_MIN_KEY_SIZE,
  1155. .max_keysize = AES_MAX_KEY_SIZE,
  1156. .ivsize = AES_BLOCK_SIZE,
  1157. .geniv = "eseqiv",
  1158. }
  1159. }
  1160. },
  1161. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
  1162. .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
  1163. }, {
  1164. .crypto = {
  1165. .cra_name = "rfc3686(ctr(aes))",
  1166. .cra_blocksize = AES_BLOCK_SIZE,
  1167. .cra_u = { .ablkcipher = {
  1168. .min_keysize = AES_MIN_KEY_SIZE,
  1169. .max_keysize = AES_MAX_KEY_SIZE,
  1170. .ivsize = AES_BLOCK_SIZE,
  1171. .geniv = "eseqiv",
  1172. .setkey = ablk_rfc3686_setkey,
  1173. .encrypt = ablk_rfc3686_crypt,
  1174. .decrypt = ablk_rfc3686_crypt }
  1175. }
  1176. },
  1177. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
  1178. .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
  1179. }, {
  1180. .crypto = {
  1181. .cra_name = "authenc(hmac(md5),cbc(des))",
  1182. .cra_blocksize = DES_BLOCK_SIZE,
  1183. .cra_u = { .aead = {
  1184. .ivsize = DES_BLOCK_SIZE,
  1185. .maxauthsize = MD5_DIGEST_SIZE,
  1186. }
  1187. }
  1188. },
  1189. .hash = &hash_alg_md5,
  1190. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
  1191. .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
  1192. }, {
  1193. .crypto = {
  1194. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1195. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1196. .cra_u = { .aead = {
  1197. .ivsize = DES3_EDE_BLOCK_SIZE,
  1198. .maxauthsize = MD5_DIGEST_SIZE,
  1199. }
  1200. }
  1201. },
  1202. .hash = &hash_alg_md5,
  1203. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
  1204. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
  1205. }, {
  1206. .crypto = {
  1207. .cra_name = "authenc(hmac(sha1),cbc(des))",
  1208. .cra_blocksize = DES_BLOCK_SIZE,
  1209. .cra_u = { .aead = {
  1210. .ivsize = DES_BLOCK_SIZE,
  1211. .maxauthsize = SHA1_DIGEST_SIZE,
  1212. }
  1213. }
  1214. },
  1215. .hash = &hash_alg_sha1,
  1216. .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
  1217. .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
  1218. }, {
  1219. .crypto = {
  1220. .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
  1221. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1222. .cra_u = { .aead = {
  1223. .ivsize = DES3_EDE_BLOCK_SIZE,
  1224. .maxauthsize = SHA1_DIGEST_SIZE,
  1225. }
  1226. }
  1227. },
  1228. .hash = &hash_alg_sha1,
  1229. .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
  1230. .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
  1231. }, {
  1232. .crypto = {
  1233. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1234. .cra_blocksize = AES_BLOCK_SIZE,
  1235. .cra_u = { .aead = {
  1236. .ivsize = AES_BLOCK_SIZE,
  1237. .maxauthsize = MD5_DIGEST_SIZE,
  1238. }
  1239. }
  1240. },
  1241. .hash = &hash_alg_md5,
  1242. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
  1243. .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
  1244. }, {
  1245. .crypto = {
  1246. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1247. .cra_blocksize = AES_BLOCK_SIZE,
  1248. .cra_u = { .aead = {
  1249. .ivsize = AES_BLOCK_SIZE,
  1250. .maxauthsize = SHA1_DIGEST_SIZE,
  1251. }
  1252. }
  1253. },
  1254. .hash = &hash_alg_sha1,
  1255. .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
  1256. .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
  1257. } };
  1258. #define IXP_POSTFIX "-ixp4xx"
  1259. static int __init ixp_module_init(void)
  1260. {
  1261. int num = ARRAY_SIZE(ixp4xx_algos);
  1262. int i,err ;
  1263. if (platform_device_register(&pseudo_dev))
  1264. return -ENODEV;
  1265. spin_lock_init(&desc_lock);
  1266. spin_lock_init(&emerg_lock);
  1267. err = init_ixp_crypto();
  1268. if (err) {
  1269. platform_device_unregister(&pseudo_dev);
  1270. return err;
  1271. }
  1272. for (i=0; i< num; i++) {
  1273. struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
  1274. if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
  1275. "%s"IXP_POSTFIX, cra->cra_name) >=
  1276. CRYPTO_MAX_ALG_NAME)
  1277. {
  1278. continue;
  1279. }
  1280. if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
  1281. continue;
  1282. }
  1283. if (!ixp4xx_algos[i].hash) {
  1284. /* block ciphers */
  1285. cra->cra_type = &crypto_ablkcipher_type;
  1286. cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1287. CRYPTO_ALG_ASYNC;
  1288. if (!cra->cra_ablkcipher.setkey)
  1289. cra->cra_ablkcipher.setkey = ablk_setkey;
  1290. if (!cra->cra_ablkcipher.encrypt)
  1291. cra->cra_ablkcipher.encrypt = ablk_encrypt;
  1292. if (!cra->cra_ablkcipher.decrypt)
  1293. cra->cra_ablkcipher.decrypt = ablk_decrypt;
  1294. cra->cra_init = init_tfm_ablk;
  1295. } else {
  1296. /* authenc */
  1297. cra->cra_type = &crypto_aead_type;
  1298. cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1299. CRYPTO_ALG_ASYNC;
  1300. cra->cra_aead.setkey = aead_setkey;
  1301. cra->cra_aead.setauthsize = aead_setauthsize;
  1302. cra->cra_aead.encrypt = aead_encrypt;
  1303. cra->cra_aead.decrypt = aead_decrypt;
  1304. cra->cra_aead.givencrypt = aead_givencrypt;
  1305. cra->cra_init = init_tfm_aead;
  1306. }
  1307. cra->cra_ctxsize = sizeof(struct ixp_ctx);
  1308. cra->cra_module = THIS_MODULE;
  1309. cra->cra_alignmask = 3;
  1310. cra->cra_priority = 300;
  1311. cra->cra_exit = exit_tfm;
  1312. if (crypto_register_alg(cra))
  1313. printk(KERN_ERR "Failed to register '%s'\n",
  1314. cra->cra_name);
  1315. else
  1316. ixp4xx_algos[i].registered = 1;
  1317. }
  1318. return 0;
  1319. }
  1320. static void __exit ixp_module_exit(void)
  1321. {
  1322. int num = ARRAY_SIZE(ixp4xx_algos);
  1323. int i;
  1324. for (i=0; i< num; i++) {
  1325. if (ixp4xx_algos[i].registered)
  1326. crypto_unregister_alg(&ixp4xx_algos[i].crypto);
  1327. }
  1328. release_ixp_crypto();
  1329. platform_device_unregister(&pseudo_dev);
  1330. }
  1331. module_init(ixp_module_init);
  1332. module_exit(ixp_module_exit);
  1333. MODULE_LICENSE("GPL");
  1334. MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
  1335. MODULE_DESCRIPTION("IXP4xx hardware crypto");