picoxcell_crypto.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873
  1. /*
  2. * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <crypto/aead.h>
  19. #include <crypto/aes.h>
  20. #include <crypto/algapi.h>
  21. #include <crypto/authenc.h>
  22. #include <crypto/des.h>
  23. #include <crypto/md5.h>
  24. #include <crypto/sha.h>
  25. #include <crypto/internal/skcipher.h>
  26. #include <linux/clk.h>
  27. #include <linux/crypto.h>
  28. #include <linux/delay.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/dmapool.h>
  31. #include <linux/err.h>
  32. #include <linux/init.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/io.h>
  35. #include <linux/list.h>
  36. #include <linux/module.h>
  37. #include <linux/platform_device.h>
  38. #include <linux/pm.h>
  39. #include <linux/rtnetlink.h>
  40. #include <linux/scatterlist.h>
  41. #include <linux/sched.h>
  42. #include <linux/slab.h>
  43. #include <linux/timer.h>
  44. #include "picoxcell_crypto_regs.h"
  45. /*
  46. * The threshold for the number of entries in the CMD FIFO available before
  47. * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
  48. * number of interrupts raised to the CPU.
  49. */
  50. #define CMD0_IRQ_THRESHOLD 1
  51. /*
  52. * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
  53. * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
  54. * When there are packets in flight but lower than the threshold, we enable
  55. * the timer and at expiry, attempt to remove any processed packets from the
  56. * queue and if there are still packets left, schedule the timer again.
  57. */
  58. #define PACKET_TIMEOUT 1
  59. /* The priority to register each algorithm with. */
  60. #define SPACC_CRYPTO_ALG_PRIORITY 10000
  61. #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
  62. #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
  63. #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
  64. #define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
  65. #define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
  66. #define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
  67. #define SPACC_CRYPTO_L2_HASH_PG_SZ 64
  68. #define SPACC_CRYPTO_L2_MAX_CTXS 128
  69. #define SPACC_CRYPTO_L2_FIFO_SZ 128
  70. #define MAX_DDT_LEN 16
  71. /* DDT format. This must match the hardware DDT format exactly. */
  72. struct spacc_ddt {
  73. dma_addr_t p;
  74. u32 len;
  75. };
  76. /*
  77. * Asynchronous crypto request structure.
  78. *
  79. * This structure defines a request that is either queued for processing or
  80. * being processed.
  81. */
  82. struct spacc_req {
  83. struct list_head list;
  84. struct spacc_engine *engine;
  85. struct crypto_async_request *req;
  86. int result;
  87. bool is_encrypt;
  88. unsigned ctx_id;
  89. dma_addr_t src_addr, dst_addr;
  90. struct spacc_ddt *src_ddt, *dst_ddt;
  91. void (*complete)(struct spacc_req *req);
  92. /* AEAD specific bits. */
  93. u8 *giv;
  94. size_t giv_len;
  95. dma_addr_t giv_pa;
  96. };
  97. struct spacc_engine {
  98. void __iomem *regs;
  99. struct list_head pending;
  100. int next_ctx;
  101. spinlock_t hw_lock;
  102. int in_flight;
  103. struct list_head completed;
  104. struct list_head in_progress;
  105. struct tasklet_struct complete;
  106. unsigned long fifo_sz;
  107. void __iomem *cipher_ctx_base;
  108. void __iomem *hash_key_base;
  109. struct spacc_alg *algs;
  110. unsigned num_algs;
  111. struct list_head registered_algs;
  112. size_t cipher_pg_sz;
  113. size_t hash_pg_sz;
  114. const char *name;
  115. struct clk *clk;
  116. struct device *dev;
  117. unsigned max_ctxs;
  118. struct timer_list packet_timeout;
  119. unsigned stat_irq_thresh;
  120. struct dma_pool *req_pool;
  121. };
  122. /* Algorithm type mask. */
  123. #define SPACC_CRYPTO_ALG_MASK 0x7
  124. /* SPACC definition of a crypto algorithm. */
  125. struct spacc_alg {
  126. unsigned long ctrl_default;
  127. unsigned long type;
  128. struct crypto_alg alg;
  129. struct spacc_engine *engine;
  130. struct list_head entry;
  131. int key_offs;
  132. int iv_offs;
  133. };
  134. /* Generic context structure for any algorithm type. */
  135. struct spacc_generic_ctx {
  136. struct spacc_engine *engine;
  137. int flags;
  138. int key_offs;
  139. int iv_offs;
  140. };
  141. /* Block cipher context. */
  142. struct spacc_ablk_ctx {
  143. struct spacc_generic_ctx generic;
  144. u8 key[AES_MAX_KEY_SIZE];
  145. u8 key_len;
  146. /*
  147. * The fallback cipher. If the operation can't be done in hardware,
  148. * fallback to a software version.
  149. */
  150. struct crypto_ablkcipher *sw_cipher;
  151. };
  152. /* AEAD cipher context. */
  153. struct spacc_aead_ctx {
  154. struct spacc_generic_ctx generic;
  155. u8 cipher_key[AES_MAX_KEY_SIZE];
  156. u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
  157. u8 cipher_key_len;
  158. u8 hash_key_len;
  159. struct crypto_aead *sw_cipher;
  160. size_t auth_size;
  161. u8 salt[AES_BLOCK_SIZE];
  162. };
  163. static int spacc_ablk_submit(struct spacc_req *req);
  164. static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
  165. {
  166. return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
  167. }
  168. static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
  169. {
  170. u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
  171. return fifo_stat & SPA_FIFO_CMD_FULL;
  172. }
  173. /*
  174. * Given a cipher context, and a context number, get the base address of the
  175. * context page.
  176. *
  177. * Returns the address of the context page where the key/context may
  178. * be written.
  179. */
  180. static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
  181. unsigned indx,
  182. bool is_cipher_ctx)
  183. {
  184. return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
  185. (indx * ctx->engine->cipher_pg_sz) :
  186. ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
  187. }
  188. /* The context pages can only be written with 32-bit accesses. */
  189. static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
  190. unsigned count)
  191. {
  192. const u32 *src32 = (const u32 *) src;
  193. while (count--)
  194. writel(*src32++, dst++);
  195. }
  196. static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
  197. void __iomem *page_addr, const u8 *key,
  198. size_t key_len, const u8 *iv, size_t iv_len)
  199. {
  200. void __iomem *key_ptr = page_addr + ctx->key_offs;
  201. void __iomem *iv_ptr = page_addr + ctx->iv_offs;
  202. memcpy_toio32(key_ptr, key, key_len / 4);
  203. memcpy_toio32(iv_ptr, iv, iv_len / 4);
  204. }
  205. /*
  206. * Load a context into the engines context memory.
  207. *
  208. * Returns the index of the context page where the context was loaded.
  209. */
  210. static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
  211. const u8 *ciph_key, size_t ciph_len,
  212. const u8 *iv, size_t ivlen, const u8 *hash_key,
  213. size_t hash_len)
  214. {
  215. unsigned indx = ctx->engine->next_ctx++;
  216. void __iomem *ciph_page_addr, *hash_page_addr;
  217. ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
  218. hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
  219. ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
  220. spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
  221. ivlen);
  222. writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
  223. (1 << SPA_KEY_SZ_CIPHER_OFFSET),
  224. ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
  225. if (hash_key) {
  226. memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
  227. writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
  228. ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
  229. }
  230. return indx;
  231. }
  232. /* Count the number of scatterlist entries in a scatterlist. */
  233. static int sg_count(struct scatterlist *sg_list, int nbytes)
  234. {
  235. struct scatterlist *sg = sg_list;
  236. int sg_nents = 0;
  237. while (nbytes > 0) {
  238. ++sg_nents;
  239. nbytes -= sg->length;
  240. sg = sg_next(sg);
  241. }
  242. return sg_nents;
  243. }
  244. static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
  245. {
  246. ddt->p = phys;
  247. ddt->len = len;
  248. }
  249. /*
  250. * Take a crypto request and scatterlists for the data and turn them into DDTs
  251. * for passing to the crypto engines. This also DMA maps the data so that the
  252. * crypto engines can DMA to/from them.
  253. */
  254. static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
  255. struct scatterlist *payload,
  256. unsigned nbytes,
  257. enum dma_data_direction dir,
  258. dma_addr_t *ddt_phys)
  259. {
  260. unsigned nents, mapped_ents;
  261. struct scatterlist *cur;
  262. struct spacc_ddt *ddt;
  263. int i;
  264. nents = sg_count(payload, nbytes);
  265. mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
  266. if (mapped_ents + 1 > MAX_DDT_LEN)
  267. goto out;
  268. ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
  269. if (!ddt)
  270. goto out;
  271. for_each_sg(payload, cur, mapped_ents, i)
  272. ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
  273. ddt_set(&ddt[mapped_ents], 0, 0);
  274. return ddt;
  275. out:
  276. dma_unmap_sg(engine->dev, payload, nents, dir);
  277. return NULL;
  278. }
  279. static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
  280. {
  281. struct aead_request *areq = container_of(req->req, struct aead_request,
  282. base);
  283. struct spacc_engine *engine = req->engine;
  284. struct spacc_ddt *src_ddt, *dst_ddt;
  285. unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
  286. unsigned nents = sg_count(areq->src, areq->cryptlen);
  287. dma_addr_t iv_addr;
  288. struct scatterlist *cur;
  289. int i, dst_ents, src_ents, assoc_ents;
  290. u8 *iv = giv ? giv : areq->iv;
  291. src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
  292. if (!src_ddt)
  293. return -ENOMEM;
  294. dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
  295. if (!dst_ddt) {
  296. dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
  297. return -ENOMEM;
  298. }
  299. req->src_ddt = src_ddt;
  300. req->dst_ddt = dst_ddt;
  301. assoc_ents = dma_map_sg(engine->dev, areq->assoc,
  302. sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
  303. if (areq->src != areq->dst) {
  304. src_ents = dma_map_sg(engine->dev, areq->src, nents,
  305. DMA_TO_DEVICE);
  306. dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
  307. DMA_FROM_DEVICE);
  308. } else {
  309. src_ents = dma_map_sg(engine->dev, areq->src, nents,
  310. DMA_BIDIRECTIONAL);
  311. dst_ents = 0;
  312. }
  313. /*
  314. * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
  315. * formed by the crypto block and sent as the ESP IV for IPSEC.
  316. */
  317. iv_addr = dma_map_single(engine->dev, iv, ivsize,
  318. giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  319. req->giv_pa = iv_addr;
  320. /*
  321. * Map the associated data. For decryption we don't copy the
  322. * associated data.
  323. */
  324. for_each_sg(areq->assoc, cur, assoc_ents, i) {
  325. ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
  326. if (req->is_encrypt)
  327. ddt_set(dst_ddt++, sg_dma_address(cur),
  328. sg_dma_len(cur));
  329. }
  330. ddt_set(src_ddt++, iv_addr, ivsize);
  331. if (giv || req->is_encrypt)
  332. ddt_set(dst_ddt++, iv_addr, ivsize);
  333. /*
  334. * Now map in the payload for the source and destination and terminate
  335. * with the NULL pointers.
  336. */
  337. for_each_sg(areq->src, cur, src_ents, i) {
  338. ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
  339. if (areq->src == areq->dst)
  340. ddt_set(dst_ddt++, sg_dma_address(cur),
  341. sg_dma_len(cur));
  342. }
  343. for_each_sg(areq->dst, cur, dst_ents, i)
  344. ddt_set(dst_ddt++, sg_dma_address(cur),
  345. sg_dma_len(cur));
  346. ddt_set(src_ddt, 0, 0);
  347. ddt_set(dst_ddt, 0, 0);
  348. return 0;
  349. }
  350. static void spacc_aead_free_ddts(struct spacc_req *req)
  351. {
  352. struct aead_request *areq = container_of(req->req, struct aead_request,
  353. base);
  354. struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
  355. struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
  356. struct spacc_engine *engine = aead_ctx->generic.engine;
  357. unsigned ivsize = alg->alg.cra_aead.ivsize;
  358. unsigned nents = sg_count(areq->src, areq->cryptlen);
  359. if (areq->src != areq->dst) {
  360. dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
  361. dma_unmap_sg(engine->dev, areq->dst,
  362. sg_count(areq->dst, areq->cryptlen),
  363. DMA_FROM_DEVICE);
  364. } else
  365. dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
  366. dma_unmap_sg(engine->dev, areq->assoc,
  367. sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
  368. dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
  369. dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
  370. dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
  371. }
  372. static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
  373. dma_addr_t ddt_addr, struct scatterlist *payload,
  374. unsigned nbytes, enum dma_data_direction dir)
  375. {
  376. unsigned nents = sg_count(payload, nbytes);
  377. dma_unmap_sg(req->engine->dev, payload, nents, dir);
  378. dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
  379. }
  380. /*
  381. * Set key for a DES operation in an AEAD cipher. This also performs weak key
  382. * checking if required.
  383. */
  384. static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
  385. unsigned int len)
  386. {
  387. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  388. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  389. u32 tmp[DES_EXPKEY_WORDS];
  390. if (unlikely(!des_ekey(tmp, key)) &&
  391. (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
  392. tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  393. return -EINVAL;
  394. }
  395. memcpy(ctx->cipher_key, key, len);
  396. ctx->cipher_key_len = len;
  397. return 0;
  398. }
  399. /* Set the key for the AES block cipher component of the AEAD transform. */
  400. static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
  401. unsigned int len)
  402. {
  403. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  404. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  405. /*
  406. * IPSec engine only supports 128 and 256 bit AES keys. If we get a
  407. * request for any other size (192 bits) then we need to do a software
  408. * fallback.
  409. */
  410. if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
  411. /*
  412. * Set the fallback transform to use the same request flags as
  413. * the hardware transform.
  414. */
  415. ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  416. ctx->sw_cipher->base.crt_flags |=
  417. tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
  418. return crypto_aead_setkey(ctx->sw_cipher, key, len);
  419. }
  420. memcpy(ctx->cipher_key, key, len);
  421. ctx->cipher_key_len = len;
  422. return 0;
  423. }
  424. static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
  425. unsigned int keylen)
  426. {
  427. struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  428. struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
  429. struct rtattr *rta = (void *)key;
  430. struct crypto_authenc_key_param *param;
  431. unsigned int authkeylen, enckeylen;
  432. int err = -EINVAL;
  433. if (!RTA_OK(rta, keylen))
  434. goto badkey;
  435. if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
  436. goto badkey;
  437. if (RTA_PAYLOAD(rta) < sizeof(*param))
  438. goto badkey;
  439. param = RTA_DATA(rta);
  440. enckeylen = be32_to_cpu(param->enckeylen);
  441. key += RTA_ALIGN(rta->rta_len);
  442. keylen -= RTA_ALIGN(rta->rta_len);
  443. if (keylen < enckeylen)
  444. goto badkey;
  445. authkeylen = keylen - enckeylen;
  446. if (enckeylen > AES_MAX_KEY_SIZE)
  447. goto badkey;
  448. if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
  449. SPA_CTRL_CIPH_ALG_AES)
  450. err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
  451. else
  452. err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
  453. if (err)
  454. goto badkey;
  455. memcpy(ctx->hash_ctx, key, authkeylen);
  456. ctx->hash_key_len = authkeylen;
  457. return 0;
  458. badkey:
  459. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  460. return -EINVAL;
  461. }
  462. static int spacc_aead_setauthsize(struct crypto_aead *tfm,
  463. unsigned int authsize)
  464. {
  465. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
  466. ctx->auth_size = authsize;
  467. return 0;
  468. }
  469. /*
  470. * Check if an AEAD request requires a fallback operation. Some requests can't
  471. * be completed in hardware because the hardware may not support certain key
  472. * sizes. In these cases we need to complete the request in software.
  473. */
  474. static int spacc_aead_need_fallback(struct spacc_req *req)
  475. {
  476. struct aead_request *aead_req;
  477. struct crypto_tfm *tfm = req->req->tfm;
  478. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  479. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  480. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  481. aead_req = container_of(req->req, struct aead_request, base);
  482. /*
  483. * If we have a non-supported key-length, then we need to do a
  484. * software fallback.
  485. */
  486. if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
  487. SPA_CTRL_CIPH_ALG_AES &&
  488. ctx->cipher_key_len != AES_KEYSIZE_128 &&
  489. ctx->cipher_key_len != AES_KEYSIZE_256)
  490. return 1;
  491. return 0;
  492. }
  493. static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
  494. bool is_encrypt)
  495. {
  496. struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
  497. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
  498. int err;
  499. if (ctx->sw_cipher) {
  500. /*
  501. * Change the request to use the software fallback transform,
  502. * and once the ciphering has completed, put the old transform
  503. * back into the request.
  504. */
  505. aead_request_set_tfm(req, ctx->sw_cipher);
  506. err = is_encrypt ? crypto_aead_encrypt(req) :
  507. crypto_aead_decrypt(req);
  508. aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
  509. } else
  510. err = -EINVAL;
  511. return err;
  512. }
  513. static void spacc_aead_complete(struct spacc_req *req)
  514. {
  515. spacc_aead_free_ddts(req);
  516. req->req->complete(req->req, req->result);
  517. }
  518. static int spacc_aead_submit(struct spacc_req *req)
  519. {
  520. struct crypto_tfm *tfm = req->req->tfm;
  521. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  522. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  523. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  524. struct spacc_engine *engine = ctx->generic.engine;
  525. u32 ctrl, proc_len, assoc_len;
  526. struct aead_request *aead_req =
  527. container_of(req->req, struct aead_request, base);
  528. req->result = -EINPROGRESS;
  529. req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
  530. ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
  531. ctx->hash_ctx, ctx->hash_key_len);
  532. /* Set the source and destination DDT pointers. */
  533. writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
  534. writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
  535. writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
  536. assoc_len = aead_req->assoclen;
  537. proc_len = aead_req->cryptlen + assoc_len;
  538. /*
  539. * If we aren't generating an IV, then we need to include the IV in the
  540. * associated data so that it is included in the hash.
  541. */
  542. if (!req->giv) {
  543. assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
  544. proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
  545. } else
  546. proc_len += req->giv_len;
  547. /*
  548. * If we are decrypting, we need to take the length of the ICV out of
  549. * the processing length.
  550. */
  551. if (!req->is_encrypt)
  552. proc_len -= ctx->auth_size;
  553. writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
  554. writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
  555. writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
  556. writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
  557. writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
  558. ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
  559. (1 << SPA_CTRL_ICV_APPEND);
  560. if (req->is_encrypt)
  561. ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
  562. else
  563. ctrl |= (1 << SPA_CTRL_KEY_EXP);
  564. mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
  565. writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
  566. return -EINPROGRESS;
  567. }
  568. static int spacc_req_submit(struct spacc_req *req);
  569. static void spacc_push(struct spacc_engine *engine)
  570. {
  571. struct spacc_req *req;
  572. while (!list_empty(&engine->pending) &&
  573. engine->in_flight + 1 <= engine->fifo_sz) {
  574. ++engine->in_flight;
  575. req = list_first_entry(&engine->pending, struct spacc_req,
  576. list);
  577. list_move_tail(&req->list, &engine->in_progress);
  578. req->result = spacc_req_submit(req);
  579. }
  580. }
  581. /*
  582. * Setup an AEAD request for processing. This will configure the engine, load
  583. * the context and then start the packet processing.
  584. *
  585. * @giv Pointer to destination address for a generated IV. If the
  586. * request does not need to generate an IV then this should be set to NULL.
  587. */
  588. static int spacc_aead_setup(struct aead_request *req, u8 *giv,
  589. unsigned alg_type, bool is_encrypt)
  590. {
  591. struct crypto_alg *alg = req->base.tfm->__crt_alg;
  592. struct spacc_engine *engine = to_spacc_alg(alg)->engine;
  593. struct spacc_req *dev_req = aead_request_ctx(req);
  594. int err = -EINPROGRESS;
  595. unsigned long flags;
  596. unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  597. dev_req->giv = giv;
  598. dev_req->giv_len = ivsize;
  599. dev_req->req = &req->base;
  600. dev_req->is_encrypt = is_encrypt;
  601. dev_req->result = -EBUSY;
  602. dev_req->engine = engine;
  603. dev_req->complete = spacc_aead_complete;
  604. if (unlikely(spacc_aead_need_fallback(dev_req)))
  605. return spacc_aead_do_fallback(req, alg_type, is_encrypt);
  606. spacc_aead_make_ddts(dev_req, dev_req->giv);
  607. err = -EINPROGRESS;
  608. spin_lock_irqsave(&engine->hw_lock, flags);
  609. if (unlikely(spacc_fifo_cmd_full(engine)) ||
  610. engine->in_flight + 1 > engine->fifo_sz) {
  611. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  612. err = -EBUSY;
  613. spin_unlock_irqrestore(&engine->hw_lock, flags);
  614. goto out_free_ddts;
  615. }
  616. list_add_tail(&dev_req->list, &engine->pending);
  617. } else {
  618. list_add_tail(&dev_req->list, &engine->pending);
  619. spacc_push(engine);
  620. }
  621. spin_unlock_irqrestore(&engine->hw_lock, flags);
  622. goto out;
  623. out_free_ddts:
  624. spacc_aead_free_ddts(dev_req);
  625. out:
  626. return err;
  627. }
  628. static int spacc_aead_encrypt(struct aead_request *req)
  629. {
  630. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  631. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  632. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  633. return spacc_aead_setup(req, NULL, alg->type, 1);
  634. }
  635. static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
  636. {
  637. struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
  638. struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  639. size_t ivsize = crypto_aead_ivsize(tfm);
  640. struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
  641. unsigned len;
  642. __be64 seq;
  643. memcpy(req->areq.iv, ctx->salt, ivsize);
  644. len = ivsize;
  645. if (ivsize > sizeof(u64)) {
  646. memset(req->giv, 0, ivsize - sizeof(u64));
  647. len = sizeof(u64);
  648. }
  649. seq = cpu_to_be64(req->seq);
  650. memcpy(req->giv + ivsize - len, &seq, len);
  651. return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
  652. }
  653. static int spacc_aead_decrypt(struct aead_request *req)
  654. {
  655. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  656. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  657. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  658. return spacc_aead_setup(req, NULL, alg->type, 0);
  659. }
  660. /*
  661. * Initialise a new AEAD context. This is responsible for allocating the
  662. * fallback cipher and initialising the context.
  663. */
  664. static int spacc_aead_cra_init(struct crypto_tfm *tfm)
  665. {
  666. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  667. struct crypto_alg *alg = tfm->__crt_alg;
  668. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  669. struct spacc_engine *engine = spacc_alg->engine;
  670. ctx->generic.flags = spacc_alg->type;
  671. ctx->generic.engine = engine;
  672. ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
  673. CRYPTO_ALG_ASYNC |
  674. CRYPTO_ALG_NEED_FALLBACK);
  675. if (IS_ERR(ctx->sw_cipher)) {
  676. dev_warn(engine->dev, "failed to allocate fallback for %s\n",
  677. alg->cra_name);
  678. ctx->sw_cipher = NULL;
  679. }
  680. ctx->generic.key_offs = spacc_alg->key_offs;
  681. ctx->generic.iv_offs = spacc_alg->iv_offs;
  682. get_random_bytes(ctx->salt, sizeof(ctx->salt));
  683. tfm->crt_aead.reqsize = sizeof(struct spacc_req);
  684. return 0;
  685. }
  686. /*
  687. * Destructor for an AEAD context. This is called when the transform is freed
  688. * and must free the fallback cipher.
  689. */
  690. static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
  691. {
  692. struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
  693. if (ctx->sw_cipher)
  694. crypto_free_aead(ctx->sw_cipher);
  695. ctx->sw_cipher = NULL;
  696. }
  697. /*
  698. * Set the DES key for a block cipher transform. This also performs weak key
  699. * checking if the transform has requested it.
  700. */
  701. static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  702. unsigned int len)
  703. {
  704. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  705. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  706. u32 tmp[DES_EXPKEY_WORDS];
  707. if (len > DES3_EDE_KEY_SIZE) {
  708. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  709. return -EINVAL;
  710. }
  711. if (unlikely(!des_ekey(tmp, key)) &&
  712. (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
  713. tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  714. return -EINVAL;
  715. }
  716. memcpy(ctx->key, key, len);
  717. ctx->key_len = len;
  718. return 0;
  719. }
  720. /*
  721. * Set the key for an AES block cipher. Some key lengths are not supported in
  722. * hardware so this must also check whether a fallback is needed.
  723. */
  724. static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  725. unsigned int len)
  726. {
  727. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  728. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  729. int err = 0;
  730. if (len > AES_MAX_KEY_SIZE) {
  731. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  732. return -EINVAL;
  733. }
  734. /*
  735. * IPSec engine only supports 128 and 256 bit AES keys. If we get a
  736. * request for any other size (192 bits) then we need to do a software
  737. * fallback.
  738. */
  739. if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
  740. ctx->sw_cipher) {
  741. /*
  742. * Set the fallback transform to use the same request flags as
  743. * the hardware transform.
  744. */
  745. ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  746. ctx->sw_cipher->base.crt_flags |=
  747. cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
  748. err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
  749. if (err)
  750. goto sw_setkey_failed;
  751. } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
  752. !ctx->sw_cipher)
  753. err = -EINVAL;
  754. memcpy(ctx->key, key, len);
  755. ctx->key_len = len;
  756. sw_setkey_failed:
  757. if (err && ctx->sw_cipher) {
  758. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  759. tfm->crt_flags |=
  760. ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
  761. }
  762. return err;
  763. }
  764. static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
  765. const u8 *key, unsigned int len)
  766. {
  767. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  768. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  769. int err = 0;
  770. if (len > AES_MAX_KEY_SIZE) {
  771. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  772. err = -EINVAL;
  773. goto out;
  774. }
  775. memcpy(ctx->key, key, len);
  776. ctx->key_len = len;
  777. out:
  778. return err;
  779. }
  780. static int spacc_ablk_need_fallback(struct spacc_req *req)
  781. {
  782. struct spacc_ablk_ctx *ctx;
  783. struct crypto_tfm *tfm = req->req->tfm;
  784. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  785. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  786. ctx = crypto_tfm_ctx(tfm);
  787. return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
  788. SPA_CTRL_CIPH_ALG_AES &&
  789. ctx->key_len != AES_KEYSIZE_128 &&
  790. ctx->key_len != AES_KEYSIZE_256;
  791. }
  792. static void spacc_ablk_complete(struct spacc_req *req)
  793. {
  794. struct ablkcipher_request *ablk_req =
  795. container_of(req->req, struct ablkcipher_request, base);
  796. if (ablk_req->src != ablk_req->dst) {
  797. spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
  798. ablk_req->nbytes, DMA_TO_DEVICE);
  799. spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
  800. ablk_req->nbytes, DMA_FROM_DEVICE);
  801. } else
  802. spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
  803. ablk_req->nbytes, DMA_BIDIRECTIONAL);
  804. req->req->complete(req->req, req->result);
  805. }
  806. static int spacc_ablk_submit(struct spacc_req *req)
  807. {
  808. struct crypto_tfm *tfm = req->req->tfm;
  809. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  810. struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
  811. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  812. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  813. struct spacc_engine *engine = ctx->generic.engine;
  814. u32 ctrl;
  815. req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
  816. ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
  817. NULL, 0);
  818. writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
  819. writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
  820. writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
  821. writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
  822. writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
  823. writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
  824. writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
  825. ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
  826. (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
  827. (1 << SPA_CTRL_KEY_EXP));
  828. mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
  829. writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
  830. return -EINPROGRESS;
  831. }
  832. static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
  833. unsigned alg_type, bool is_encrypt)
  834. {
  835. struct crypto_tfm *old_tfm =
  836. crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
  837. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
  838. int err;
  839. if (!ctx->sw_cipher)
  840. return -EINVAL;
  841. /*
  842. * Change the request to use the software fallback transform, and once
  843. * the ciphering has completed, put the old transform back into the
  844. * request.
  845. */
  846. ablkcipher_request_set_tfm(req, ctx->sw_cipher);
  847. err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
  848. crypto_ablkcipher_decrypt(req);
  849. ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
  850. return err;
  851. }
  852. static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
  853. bool is_encrypt)
  854. {
  855. struct crypto_alg *alg = req->base.tfm->__crt_alg;
  856. struct spacc_engine *engine = to_spacc_alg(alg)->engine;
  857. struct spacc_req *dev_req = ablkcipher_request_ctx(req);
  858. unsigned long flags;
  859. int err = -ENOMEM;
  860. dev_req->req = &req->base;
  861. dev_req->is_encrypt = is_encrypt;
  862. dev_req->engine = engine;
  863. dev_req->complete = spacc_ablk_complete;
  864. dev_req->result = -EINPROGRESS;
  865. if (unlikely(spacc_ablk_need_fallback(dev_req)))
  866. return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
  867. /*
  868. * Create the DDT's for the engine. If we share the same source and
  869. * destination then we can optimize by reusing the DDT's.
  870. */
  871. if (req->src != req->dst) {
  872. dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
  873. req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
  874. if (!dev_req->src_ddt)
  875. goto out;
  876. dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
  877. req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
  878. if (!dev_req->dst_ddt)
  879. goto out_free_src;
  880. } else {
  881. dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
  882. req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
  883. if (!dev_req->dst_ddt)
  884. goto out;
  885. dev_req->src_ddt = NULL;
  886. dev_req->src_addr = dev_req->dst_addr;
  887. }
  888. err = -EINPROGRESS;
  889. spin_lock_irqsave(&engine->hw_lock, flags);
  890. /*
  891. * Check if the engine will accept the operation now. If it won't then
  892. * we either stick it on the end of a pending list if we can backlog,
  893. * or bailout with an error if not.
  894. */
  895. if (unlikely(spacc_fifo_cmd_full(engine)) ||
  896. engine->in_flight + 1 > engine->fifo_sz) {
  897. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  898. err = -EBUSY;
  899. spin_unlock_irqrestore(&engine->hw_lock, flags);
  900. goto out_free_ddts;
  901. }
  902. list_add_tail(&dev_req->list, &engine->pending);
  903. } else {
  904. list_add_tail(&dev_req->list, &engine->pending);
  905. spacc_push(engine);
  906. }
  907. spin_unlock_irqrestore(&engine->hw_lock, flags);
  908. goto out;
  909. out_free_ddts:
  910. spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
  911. req->nbytes, req->src == req->dst ?
  912. DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
  913. out_free_src:
  914. if (req->src != req->dst)
  915. spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
  916. req->src, req->nbytes, DMA_TO_DEVICE);
  917. out:
  918. return err;
  919. }
  920. static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
  921. {
  922. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  923. struct crypto_alg *alg = tfm->__crt_alg;
  924. struct spacc_alg *spacc_alg = to_spacc_alg(alg);
  925. struct spacc_engine *engine = spacc_alg->engine;
  926. ctx->generic.flags = spacc_alg->type;
  927. ctx->generic.engine = engine;
  928. if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
  929. ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
  930. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  931. if (IS_ERR(ctx->sw_cipher)) {
  932. dev_warn(engine->dev, "failed to allocate fallback for %s\n",
  933. alg->cra_name);
  934. ctx->sw_cipher = NULL;
  935. }
  936. }
  937. ctx->generic.key_offs = spacc_alg->key_offs;
  938. ctx->generic.iv_offs = spacc_alg->iv_offs;
  939. tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
  940. return 0;
  941. }
  942. static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
  943. {
  944. struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
  945. if (ctx->sw_cipher)
  946. crypto_free_ablkcipher(ctx->sw_cipher);
  947. ctx->sw_cipher = NULL;
  948. }
  949. static int spacc_ablk_encrypt(struct ablkcipher_request *req)
  950. {
  951. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
  952. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  953. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  954. return spacc_ablk_setup(req, alg->type, 1);
  955. }
  956. static int spacc_ablk_decrypt(struct ablkcipher_request *req)
  957. {
  958. struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
  959. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  960. struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
  961. return spacc_ablk_setup(req, alg->type, 0);
  962. }
  963. static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
  964. {
  965. return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
  966. SPA_FIFO_STAT_EMPTY;
  967. }
  968. static void spacc_process_done(struct spacc_engine *engine)
  969. {
  970. struct spacc_req *req;
  971. unsigned long flags;
  972. spin_lock_irqsave(&engine->hw_lock, flags);
  973. while (!spacc_fifo_stat_empty(engine)) {
  974. req = list_first_entry(&engine->in_progress, struct spacc_req,
  975. list);
  976. list_move_tail(&req->list, &engine->completed);
  977. --engine->in_flight;
  978. /* POP the status register. */
  979. writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
  980. req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
  981. SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
  982. /*
  983. * Convert the SPAcc error status into the standard POSIX error
  984. * codes.
  985. */
  986. if (unlikely(req->result)) {
  987. switch (req->result) {
  988. case SPA_STATUS_ICV_FAIL:
  989. req->result = -EBADMSG;
  990. break;
  991. case SPA_STATUS_MEMORY_ERROR:
  992. dev_warn(engine->dev,
  993. "memory error triggered\n");
  994. req->result = -EFAULT;
  995. break;
  996. case SPA_STATUS_BLOCK_ERROR:
  997. dev_warn(engine->dev,
  998. "block error triggered\n");
  999. req->result = -EIO;
  1000. break;
  1001. }
  1002. }
  1003. }
  1004. tasklet_schedule(&engine->complete);
  1005. spin_unlock_irqrestore(&engine->hw_lock, flags);
  1006. }
  1007. static irqreturn_t spacc_spacc_irq(int irq, void *dev)
  1008. {
  1009. struct spacc_engine *engine = (struct spacc_engine *)dev;
  1010. u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
  1011. writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
  1012. spacc_process_done(engine);
  1013. return IRQ_HANDLED;
  1014. }
  1015. static void spacc_packet_timeout(unsigned long data)
  1016. {
  1017. struct spacc_engine *engine = (struct spacc_engine *)data;
  1018. spacc_process_done(engine);
  1019. }
  1020. static int spacc_req_submit(struct spacc_req *req)
  1021. {
  1022. struct crypto_alg *alg = req->req->tfm->__crt_alg;
  1023. if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
  1024. return spacc_aead_submit(req);
  1025. else
  1026. return spacc_ablk_submit(req);
  1027. }
  1028. static void spacc_spacc_complete(unsigned long data)
  1029. {
  1030. struct spacc_engine *engine = (struct spacc_engine *)data;
  1031. struct spacc_req *req, *tmp;
  1032. unsigned long flags;
  1033. LIST_HEAD(completed);
  1034. spin_lock_irqsave(&engine->hw_lock, flags);
  1035. list_splice_init(&engine->completed, &completed);
  1036. spacc_push(engine);
  1037. if (engine->in_flight)
  1038. mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
  1039. spin_unlock_irqrestore(&engine->hw_lock, flags);
  1040. list_for_each_entry_safe(req, tmp, &completed, list) {
  1041. req->complete(req);
  1042. list_del(&req->list);
  1043. }
  1044. }
  1045. #ifdef CONFIG_PM
  1046. static int spacc_suspend(struct device *dev)
  1047. {
  1048. struct platform_device *pdev = to_platform_device(dev);
  1049. struct spacc_engine *engine = platform_get_drvdata(pdev);
  1050. /*
  1051. * We only support standby mode. All we have to do is gate the clock to
  1052. * the spacc. The hardware will preserve state until we turn it back
  1053. * on again.
  1054. */
  1055. clk_disable(engine->clk);
  1056. return 0;
  1057. }
  1058. static int spacc_resume(struct device *dev)
  1059. {
  1060. struct platform_device *pdev = to_platform_device(dev);
  1061. struct spacc_engine *engine = platform_get_drvdata(pdev);
  1062. return clk_enable(engine->clk);
  1063. }
  1064. static const struct dev_pm_ops spacc_pm_ops = {
  1065. .suspend = spacc_suspend,
  1066. .resume = spacc_resume,
  1067. };
  1068. #endif /* CONFIG_PM */
  1069. static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
  1070. {
  1071. return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
  1072. }
  1073. static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
  1074. struct device_attribute *attr,
  1075. char *buf)
  1076. {
  1077. struct spacc_engine *engine = spacc_dev_to_engine(dev);
  1078. return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
  1079. }
  1080. static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
  1081. struct device_attribute *attr,
  1082. const char *buf, size_t len)
  1083. {
  1084. struct spacc_engine *engine = spacc_dev_to_engine(dev);
  1085. unsigned long thresh;
  1086. if (strict_strtoul(buf, 0, &thresh))
  1087. return -EINVAL;
  1088. thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
  1089. engine->stat_irq_thresh = thresh;
  1090. writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
  1091. engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
  1092. return len;
  1093. }
  1094. static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
  1095. spacc_stat_irq_thresh_store);
  1096. static struct spacc_alg ipsec_engine_algs[] = {
  1097. {
  1098. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
  1099. .key_offs = 0,
  1100. .iv_offs = AES_MAX_KEY_SIZE,
  1101. .alg = {
  1102. .cra_name = "cbc(aes)",
  1103. .cra_driver_name = "cbc-aes-picoxcell",
  1104. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1105. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1106. CRYPTO_ALG_ASYNC |
  1107. CRYPTO_ALG_NEED_FALLBACK,
  1108. .cra_blocksize = AES_BLOCK_SIZE,
  1109. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1110. .cra_type = &crypto_ablkcipher_type,
  1111. .cra_module = THIS_MODULE,
  1112. .cra_ablkcipher = {
  1113. .setkey = spacc_aes_setkey,
  1114. .encrypt = spacc_ablk_encrypt,
  1115. .decrypt = spacc_ablk_decrypt,
  1116. .min_keysize = AES_MIN_KEY_SIZE,
  1117. .max_keysize = AES_MAX_KEY_SIZE,
  1118. .ivsize = AES_BLOCK_SIZE,
  1119. },
  1120. .cra_init = spacc_ablk_cra_init,
  1121. .cra_exit = spacc_ablk_cra_exit,
  1122. },
  1123. },
  1124. {
  1125. .key_offs = 0,
  1126. .iv_offs = AES_MAX_KEY_SIZE,
  1127. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
  1128. .alg = {
  1129. .cra_name = "ecb(aes)",
  1130. .cra_driver_name = "ecb-aes-picoxcell",
  1131. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1132. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1133. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  1134. .cra_blocksize = AES_BLOCK_SIZE,
  1135. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1136. .cra_type = &crypto_ablkcipher_type,
  1137. .cra_module = THIS_MODULE,
  1138. .cra_ablkcipher = {
  1139. .setkey = spacc_aes_setkey,
  1140. .encrypt = spacc_ablk_encrypt,
  1141. .decrypt = spacc_ablk_decrypt,
  1142. .min_keysize = AES_MIN_KEY_SIZE,
  1143. .max_keysize = AES_MAX_KEY_SIZE,
  1144. },
  1145. .cra_init = spacc_ablk_cra_init,
  1146. .cra_exit = spacc_ablk_cra_exit,
  1147. },
  1148. },
  1149. {
  1150. .key_offs = DES_BLOCK_SIZE,
  1151. .iv_offs = 0,
  1152. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
  1153. .alg = {
  1154. .cra_name = "cbc(des)",
  1155. .cra_driver_name = "cbc-des-picoxcell",
  1156. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1157. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  1158. .cra_blocksize = DES_BLOCK_SIZE,
  1159. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1160. .cra_type = &crypto_ablkcipher_type,
  1161. .cra_module = THIS_MODULE,
  1162. .cra_ablkcipher = {
  1163. .setkey = spacc_des_setkey,
  1164. .encrypt = spacc_ablk_encrypt,
  1165. .decrypt = spacc_ablk_decrypt,
  1166. .min_keysize = DES_KEY_SIZE,
  1167. .max_keysize = DES_KEY_SIZE,
  1168. .ivsize = DES_BLOCK_SIZE,
  1169. },
  1170. .cra_init = spacc_ablk_cra_init,
  1171. .cra_exit = spacc_ablk_cra_exit,
  1172. },
  1173. },
  1174. {
  1175. .key_offs = DES_BLOCK_SIZE,
  1176. .iv_offs = 0,
  1177. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
  1178. .alg = {
  1179. .cra_name = "ecb(des)",
  1180. .cra_driver_name = "ecb-des-picoxcell",
  1181. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1182. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  1183. .cra_blocksize = DES_BLOCK_SIZE,
  1184. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1185. .cra_type = &crypto_ablkcipher_type,
  1186. .cra_module = THIS_MODULE,
  1187. .cra_ablkcipher = {
  1188. .setkey = spacc_des_setkey,
  1189. .encrypt = spacc_ablk_encrypt,
  1190. .decrypt = spacc_ablk_decrypt,
  1191. .min_keysize = DES_KEY_SIZE,
  1192. .max_keysize = DES_KEY_SIZE,
  1193. },
  1194. .cra_init = spacc_ablk_cra_init,
  1195. .cra_exit = spacc_ablk_cra_exit,
  1196. },
  1197. },
  1198. {
  1199. .key_offs = DES_BLOCK_SIZE,
  1200. .iv_offs = 0,
  1201. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
  1202. .alg = {
  1203. .cra_name = "cbc(des3_ede)",
  1204. .cra_driver_name = "cbc-des3-ede-picoxcell",
  1205. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1206. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  1207. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1208. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1209. .cra_type = &crypto_ablkcipher_type,
  1210. .cra_module = THIS_MODULE,
  1211. .cra_ablkcipher = {
  1212. .setkey = spacc_des_setkey,
  1213. .encrypt = spacc_ablk_encrypt,
  1214. .decrypt = spacc_ablk_decrypt,
  1215. .min_keysize = DES3_EDE_KEY_SIZE,
  1216. .max_keysize = DES3_EDE_KEY_SIZE,
  1217. .ivsize = DES3_EDE_BLOCK_SIZE,
  1218. },
  1219. .cra_init = spacc_ablk_cra_init,
  1220. .cra_exit = spacc_ablk_cra_exit,
  1221. },
  1222. },
  1223. {
  1224. .key_offs = DES_BLOCK_SIZE,
  1225. .iv_offs = 0,
  1226. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
  1227. .alg = {
  1228. .cra_name = "ecb(des3_ede)",
  1229. .cra_driver_name = "ecb-des3-ede-picoxcell",
  1230. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1231. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  1232. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1233. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1234. .cra_type = &crypto_ablkcipher_type,
  1235. .cra_module = THIS_MODULE,
  1236. .cra_ablkcipher = {
  1237. .setkey = spacc_des_setkey,
  1238. .encrypt = spacc_ablk_encrypt,
  1239. .decrypt = spacc_ablk_decrypt,
  1240. .min_keysize = DES3_EDE_KEY_SIZE,
  1241. .max_keysize = DES3_EDE_KEY_SIZE,
  1242. },
  1243. .cra_init = spacc_ablk_cra_init,
  1244. .cra_exit = spacc_ablk_cra_exit,
  1245. },
  1246. },
  1247. {
  1248. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1249. SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
  1250. .key_offs = 0,
  1251. .iv_offs = AES_MAX_KEY_SIZE,
  1252. .alg = {
  1253. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1254. .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
  1255. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1256. .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
  1257. .cra_blocksize = AES_BLOCK_SIZE,
  1258. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1259. .cra_type = &crypto_aead_type,
  1260. .cra_module = THIS_MODULE,
  1261. .cra_aead = {
  1262. .setkey = spacc_aead_setkey,
  1263. .setauthsize = spacc_aead_setauthsize,
  1264. .encrypt = spacc_aead_encrypt,
  1265. .decrypt = spacc_aead_decrypt,
  1266. .givencrypt = spacc_aead_givencrypt,
  1267. .ivsize = AES_BLOCK_SIZE,
  1268. .maxauthsize = SHA1_DIGEST_SIZE,
  1269. },
  1270. .cra_init = spacc_aead_cra_init,
  1271. .cra_exit = spacc_aead_cra_exit,
  1272. },
  1273. },
  1274. {
  1275. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1276. SPA_CTRL_HASH_ALG_SHA256 |
  1277. SPA_CTRL_HASH_MODE_HMAC,
  1278. .key_offs = 0,
  1279. .iv_offs = AES_MAX_KEY_SIZE,
  1280. .alg = {
  1281. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1282. .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
  1283. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1284. .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
  1285. .cra_blocksize = AES_BLOCK_SIZE,
  1286. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1287. .cra_type = &crypto_aead_type,
  1288. .cra_module = THIS_MODULE,
  1289. .cra_aead = {
  1290. .setkey = spacc_aead_setkey,
  1291. .setauthsize = spacc_aead_setauthsize,
  1292. .encrypt = spacc_aead_encrypt,
  1293. .decrypt = spacc_aead_decrypt,
  1294. .givencrypt = spacc_aead_givencrypt,
  1295. .ivsize = AES_BLOCK_SIZE,
  1296. .maxauthsize = SHA256_DIGEST_SIZE,
  1297. },
  1298. .cra_init = spacc_aead_cra_init,
  1299. .cra_exit = spacc_aead_cra_exit,
  1300. },
  1301. },
  1302. {
  1303. .key_offs = 0,
  1304. .iv_offs = AES_MAX_KEY_SIZE,
  1305. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1306. SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
  1307. .alg = {
  1308. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1309. .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
  1310. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1311. .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
  1312. .cra_blocksize = AES_BLOCK_SIZE,
  1313. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1314. .cra_type = &crypto_aead_type,
  1315. .cra_module = THIS_MODULE,
  1316. .cra_aead = {
  1317. .setkey = spacc_aead_setkey,
  1318. .setauthsize = spacc_aead_setauthsize,
  1319. .encrypt = spacc_aead_encrypt,
  1320. .decrypt = spacc_aead_decrypt,
  1321. .givencrypt = spacc_aead_givencrypt,
  1322. .ivsize = AES_BLOCK_SIZE,
  1323. .maxauthsize = MD5_DIGEST_SIZE,
  1324. },
  1325. .cra_init = spacc_aead_cra_init,
  1326. .cra_exit = spacc_aead_cra_exit,
  1327. },
  1328. },
  1329. {
  1330. .key_offs = DES_BLOCK_SIZE,
  1331. .iv_offs = 0,
  1332. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
  1333. SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
  1334. .alg = {
  1335. .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
  1336. .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
  1337. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1338. .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
  1339. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1340. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1341. .cra_type = &crypto_aead_type,
  1342. .cra_module = THIS_MODULE,
  1343. .cra_aead = {
  1344. .setkey = spacc_aead_setkey,
  1345. .setauthsize = spacc_aead_setauthsize,
  1346. .encrypt = spacc_aead_encrypt,
  1347. .decrypt = spacc_aead_decrypt,
  1348. .givencrypt = spacc_aead_givencrypt,
  1349. .ivsize = DES3_EDE_BLOCK_SIZE,
  1350. .maxauthsize = SHA1_DIGEST_SIZE,
  1351. },
  1352. .cra_init = spacc_aead_cra_init,
  1353. .cra_exit = spacc_aead_cra_exit,
  1354. },
  1355. },
  1356. {
  1357. .key_offs = DES_BLOCK_SIZE,
  1358. .iv_offs = 0,
  1359. .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
  1360. SPA_CTRL_HASH_ALG_SHA256 |
  1361. SPA_CTRL_HASH_MODE_HMAC,
  1362. .alg = {
  1363. .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
  1364. .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
  1365. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1366. .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
  1367. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1368. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1369. .cra_type = &crypto_aead_type,
  1370. .cra_module = THIS_MODULE,
  1371. .cra_aead = {
  1372. .setkey = spacc_aead_setkey,
  1373. .setauthsize = spacc_aead_setauthsize,
  1374. .encrypt = spacc_aead_encrypt,
  1375. .decrypt = spacc_aead_decrypt,
  1376. .givencrypt = spacc_aead_givencrypt,
  1377. .ivsize = DES3_EDE_BLOCK_SIZE,
  1378. .maxauthsize = SHA256_DIGEST_SIZE,
  1379. },
  1380. .cra_init = spacc_aead_cra_init,
  1381. .cra_exit = spacc_aead_cra_exit,
  1382. },
  1383. },
  1384. {
  1385. .key_offs = DES_BLOCK_SIZE,
  1386. .iv_offs = 0,
  1387. .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
  1388. SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
  1389. .alg = {
  1390. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1391. .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
  1392. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1393. .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
  1394. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1395. .cra_ctxsize = sizeof(struct spacc_aead_ctx),
  1396. .cra_type = &crypto_aead_type,
  1397. .cra_module = THIS_MODULE,
  1398. .cra_aead = {
  1399. .setkey = spacc_aead_setkey,
  1400. .setauthsize = spacc_aead_setauthsize,
  1401. .encrypt = spacc_aead_encrypt,
  1402. .decrypt = spacc_aead_decrypt,
  1403. .givencrypt = spacc_aead_givencrypt,
  1404. .ivsize = DES3_EDE_BLOCK_SIZE,
  1405. .maxauthsize = MD5_DIGEST_SIZE,
  1406. },
  1407. .cra_init = spacc_aead_cra_init,
  1408. .cra_exit = spacc_aead_cra_exit,
  1409. },
  1410. },
  1411. };
  1412. static struct spacc_alg l2_engine_algs[] = {
  1413. {
  1414. .key_offs = 0,
  1415. .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
  1416. .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
  1417. SPA_CTRL_CIPH_MODE_F8,
  1418. .alg = {
  1419. .cra_name = "f8(kasumi)",
  1420. .cra_driver_name = "f8-kasumi-picoxcell",
  1421. .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
  1422. .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC,
  1423. .cra_blocksize = 8,
  1424. .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
  1425. .cra_type = &crypto_ablkcipher_type,
  1426. .cra_module = THIS_MODULE,
  1427. .cra_ablkcipher = {
  1428. .setkey = spacc_kasumi_f8_setkey,
  1429. .encrypt = spacc_ablk_encrypt,
  1430. .decrypt = spacc_ablk_decrypt,
  1431. .min_keysize = 16,
  1432. .max_keysize = 16,
  1433. .ivsize = 8,
  1434. },
  1435. .cra_init = spacc_ablk_cra_init,
  1436. .cra_exit = spacc_ablk_cra_exit,
  1437. },
  1438. },
  1439. };
  1440. static int __devinit spacc_probe(struct platform_device *pdev,
  1441. unsigned max_ctxs, size_t cipher_pg_sz,
  1442. size_t hash_pg_sz, size_t fifo_sz,
  1443. struct spacc_alg *algs, size_t num_algs)
  1444. {
  1445. int i, err, ret = -EINVAL;
  1446. struct resource *mem, *irq;
  1447. struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
  1448. GFP_KERNEL);
  1449. if (!engine)
  1450. return -ENOMEM;
  1451. engine->max_ctxs = max_ctxs;
  1452. engine->cipher_pg_sz = cipher_pg_sz;
  1453. engine->hash_pg_sz = hash_pg_sz;
  1454. engine->fifo_sz = fifo_sz;
  1455. engine->algs = algs;
  1456. engine->num_algs = num_algs;
  1457. engine->name = dev_name(&pdev->dev);
  1458. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1459. irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1460. if (!mem || !irq) {
  1461. dev_err(&pdev->dev, "no memory/irq resource for engine\n");
  1462. return -ENXIO;
  1463. }
  1464. if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
  1465. engine->name))
  1466. return -ENOMEM;
  1467. engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
  1468. if (!engine->regs) {
  1469. dev_err(&pdev->dev, "memory map failed\n");
  1470. return -ENOMEM;
  1471. }
  1472. if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
  1473. engine->name, engine)) {
  1474. dev_err(engine->dev, "failed to request IRQ\n");
  1475. return -EBUSY;
  1476. }
  1477. engine->dev = &pdev->dev;
  1478. engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
  1479. engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
  1480. engine->req_pool = dmam_pool_create(engine->name, engine->dev,
  1481. MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
  1482. if (!engine->req_pool)
  1483. return -ENOMEM;
  1484. spin_lock_init(&engine->hw_lock);
  1485. engine->clk = clk_get(&pdev->dev, NULL);
  1486. if (IS_ERR(engine->clk)) {
  1487. dev_info(&pdev->dev, "clk unavailable\n");
  1488. device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
  1489. return PTR_ERR(engine->clk);
  1490. }
  1491. if (clk_enable(engine->clk)) {
  1492. dev_info(&pdev->dev, "unable to enable clk\n");
  1493. clk_put(engine->clk);
  1494. return -EIO;
  1495. }
  1496. err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
  1497. if (err) {
  1498. clk_disable(engine->clk);
  1499. clk_put(engine->clk);
  1500. return err;
  1501. }
  1502. /*
  1503. * Use an IRQ threshold of 50% as a default. This seems to be a
  1504. * reasonable trade off of latency against throughput but can be
  1505. * changed at runtime.
  1506. */
  1507. engine->stat_irq_thresh = (engine->fifo_sz / 2);
  1508. /*
  1509. * Configure the interrupts. We only use the STAT_CNT interrupt as we
  1510. * only submit a new packet for processing when we complete another in
  1511. * the queue. This minimizes time spent in the interrupt handler.
  1512. */
  1513. writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
  1514. engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
  1515. writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
  1516. engine->regs + SPA_IRQ_EN_REG_OFFSET);
  1517. setup_timer(&engine->packet_timeout, spacc_packet_timeout,
  1518. (unsigned long)engine);
  1519. INIT_LIST_HEAD(&engine->pending);
  1520. INIT_LIST_HEAD(&engine->completed);
  1521. INIT_LIST_HEAD(&engine->in_progress);
  1522. engine->in_flight = 0;
  1523. tasklet_init(&engine->complete, spacc_spacc_complete,
  1524. (unsigned long)engine);
  1525. platform_set_drvdata(pdev, engine);
  1526. INIT_LIST_HEAD(&engine->registered_algs);
  1527. for (i = 0; i < engine->num_algs; ++i) {
  1528. engine->algs[i].engine = engine;
  1529. err = crypto_register_alg(&engine->algs[i].alg);
  1530. if (!err) {
  1531. list_add_tail(&engine->algs[i].entry,
  1532. &engine->registered_algs);
  1533. ret = 0;
  1534. }
  1535. if (err)
  1536. dev_err(engine->dev, "failed to register alg \"%s\"\n",
  1537. engine->algs[i].alg.cra_name);
  1538. else
  1539. dev_dbg(engine->dev, "registered alg \"%s\"\n",
  1540. engine->algs[i].alg.cra_name);
  1541. }
  1542. return ret;
  1543. }
  1544. static int __devexit spacc_remove(struct platform_device *pdev)
  1545. {
  1546. struct spacc_alg *alg, *next;
  1547. struct spacc_engine *engine = platform_get_drvdata(pdev);
  1548. del_timer_sync(&engine->packet_timeout);
  1549. device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
  1550. list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
  1551. list_del(&alg->entry);
  1552. crypto_unregister_alg(&alg->alg);
  1553. }
  1554. clk_disable(engine->clk);
  1555. clk_put(engine->clk);
  1556. return 0;
  1557. }
  1558. static int __devinit ipsec_probe(struct platform_device *pdev)
  1559. {
  1560. return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
  1561. SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
  1562. SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
  1563. SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
  1564. ARRAY_SIZE(ipsec_engine_algs));
  1565. }
  1566. static struct platform_driver ipsec_driver = {
  1567. .probe = ipsec_probe,
  1568. .remove = __devexit_p(spacc_remove),
  1569. .driver = {
  1570. .name = "picoxcell-ipsec",
  1571. #ifdef CONFIG_PM
  1572. .pm = &spacc_pm_ops,
  1573. #endif /* CONFIG_PM */
  1574. },
  1575. };
  1576. static int __devinit l2_probe(struct platform_device *pdev)
  1577. {
  1578. return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
  1579. SPACC_CRYPTO_L2_CIPHER_PG_SZ,
  1580. SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
  1581. l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
  1582. }
  1583. static struct platform_driver l2_driver = {
  1584. .probe = l2_probe,
  1585. .remove = __devexit_p(spacc_remove),
  1586. .driver = {
  1587. .name = "picoxcell-l2",
  1588. #ifdef CONFIG_PM
  1589. .pm = &spacc_pm_ops,
  1590. #endif /* CONFIG_PM */
  1591. },
  1592. };
  1593. static int __init spacc_init(void)
  1594. {
  1595. int ret = platform_driver_register(&ipsec_driver);
  1596. if (ret) {
  1597. pr_err("failed to register ipsec spacc driver");
  1598. goto out;
  1599. }
  1600. ret = platform_driver_register(&l2_driver);
  1601. if (ret) {
  1602. pr_err("failed to register l2 spacc driver");
  1603. goto l2_failed;
  1604. }
  1605. return 0;
  1606. l2_failed:
  1607. platform_driver_unregister(&ipsec_driver);
  1608. out:
  1609. return ret;
  1610. }
  1611. module_init(spacc_init);
  1612. static void __exit spacc_exit(void)
  1613. {
  1614. platform_driver_unregister(&ipsec_driver);
  1615. platform_driver_unregister(&l2_driver);
  1616. }
  1617. module_exit(spacc_exit);
  1618. MODULE_LICENSE("GPL");
  1619. MODULE_AUTHOR("Jamie Iles");