n2_core.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267
  1. /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
  2. *
  3. * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/of.h>
  9. #include <linux/of_device.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/slab.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/crypto.h>
  14. #include <crypto/md5.h>
  15. #include <crypto/sha.h>
  16. #include <crypto/aes.h>
  17. #include <crypto/des.h>
  18. #include <linux/mutex.h>
  19. #include <linux/delay.h>
  20. #include <linux/sched.h>
  21. #include <crypto/internal/hash.h>
  22. #include <crypto/scatterwalk.h>
  23. #include <crypto/algapi.h>
  24. #include <asm/hypervisor.h>
  25. #include <asm/mdesc.h>
  26. #include "n2_core.h"
  27. #define DRV_MODULE_NAME "n2_crypto"
  28. #define DRV_MODULE_VERSION "0.1"
  29. #define DRV_MODULE_RELDATE "April 29, 2010"
  30. static char version[] __devinitdata =
  31. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  32. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  33. MODULE_DESCRIPTION("Niagara2 Crypto driver");
  34. MODULE_LICENSE("GPL");
  35. MODULE_VERSION(DRV_MODULE_VERSION);
  36. #define N2_CRA_PRIORITY 300
  37. static DEFINE_MUTEX(spu_lock);
  38. struct spu_queue {
  39. cpumask_t sharing;
  40. unsigned long qhandle;
  41. spinlock_t lock;
  42. u8 q_type;
  43. void *q;
  44. unsigned long head;
  45. unsigned long tail;
  46. struct list_head jobs;
  47. unsigned long devino;
  48. char irq_name[32];
  49. unsigned int irq;
  50. struct list_head list;
  51. };
  52. static struct spu_queue **cpu_to_cwq;
  53. static struct spu_queue **cpu_to_mau;
  54. static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
  55. {
  56. if (q->q_type == HV_NCS_QTYPE_MAU) {
  57. off += MAU_ENTRY_SIZE;
  58. if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
  59. off = 0;
  60. } else {
  61. off += CWQ_ENTRY_SIZE;
  62. if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
  63. off = 0;
  64. }
  65. return off;
  66. }
  67. struct n2_request_common {
  68. struct list_head entry;
  69. unsigned int offset;
  70. };
  71. #define OFFSET_NOT_RUNNING (~(unsigned int)0)
  72. /* An async job request records the final tail value it used in
  73. * n2_request_common->offset, test to see if that offset is in
  74. * the range old_head, new_head, inclusive.
  75. */
  76. static inline bool job_finished(struct spu_queue *q, unsigned int offset,
  77. unsigned long old_head, unsigned long new_head)
  78. {
  79. if (old_head <= new_head) {
  80. if (offset > old_head && offset <= new_head)
  81. return true;
  82. } else {
  83. if (offset > old_head || offset <= new_head)
  84. return true;
  85. }
  86. return false;
  87. }
  88. /* When the HEAD marker is unequal to the actual HEAD, we get
  89. * a virtual device INO interrupt. We should process the
  90. * completed CWQ entries and adjust the HEAD marker to clear
  91. * the IRQ.
  92. */
  93. static irqreturn_t cwq_intr(int irq, void *dev_id)
  94. {
  95. unsigned long off, new_head, hv_ret;
  96. struct spu_queue *q = dev_id;
  97. pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
  98. smp_processor_id(), q->qhandle);
  99. spin_lock(&q->lock);
  100. hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
  101. pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
  102. smp_processor_id(), new_head, hv_ret);
  103. for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
  104. /* XXX ... XXX */
  105. }
  106. hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
  107. if (hv_ret == HV_EOK)
  108. q->head = new_head;
  109. spin_unlock(&q->lock);
  110. return IRQ_HANDLED;
  111. }
  112. static irqreturn_t mau_intr(int irq, void *dev_id)
  113. {
  114. struct spu_queue *q = dev_id;
  115. unsigned long head, hv_ret;
  116. spin_lock(&q->lock);
  117. pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
  118. smp_processor_id(), q->qhandle);
  119. hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
  120. pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
  121. smp_processor_id(), head, hv_ret);
  122. sun4v_ncs_sethead_marker(q->qhandle, head);
  123. spin_unlock(&q->lock);
  124. return IRQ_HANDLED;
  125. }
  126. static void *spu_queue_next(struct spu_queue *q, void *cur)
  127. {
  128. return q->q + spu_next_offset(q, cur - q->q);
  129. }
  130. static int spu_queue_num_free(struct spu_queue *q)
  131. {
  132. unsigned long head = q->head;
  133. unsigned long tail = q->tail;
  134. unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
  135. unsigned long diff;
  136. if (head > tail)
  137. diff = head - tail;
  138. else
  139. diff = (end - tail) + head;
  140. return (diff / CWQ_ENTRY_SIZE) - 1;
  141. }
  142. static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
  143. {
  144. int avail = spu_queue_num_free(q);
  145. if (avail >= num_entries)
  146. return q->q + q->tail;
  147. return NULL;
  148. }
  149. static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
  150. {
  151. unsigned long hv_ret, new_tail;
  152. new_tail = spu_next_offset(q, last - q->q);
  153. hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
  154. if (hv_ret == HV_EOK)
  155. q->tail = new_tail;
  156. return hv_ret;
  157. }
  158. static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
  159. int enc_type, int auth_type,
  160. unsigned int hash_len,
  161. bool sfas, bool sob, bool eob, bool encrypt,
  162. int opcode)
  163. {
  164. u64 word = (len - 1) & CONTROL_LEN;
  165. word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
  166. word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
  167. word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
  168. if (sfas)
  169. word |= CONTROL_STORE_FINAL_AUTH_STATE;
  170. if (sob)
  171. word |= CONTROL_START_OF_BLOCK;
  172. if (eob)
  173. word |= CONTROL_END_OF_BLOCK;
  174. if (encrypt)
  175. word |= CONTROL_ENCRYPT;
  176. if (hmac_key_len)
  177. word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
  178. if (hash_len)
  179. word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
  180. return word;
  181. }
  182. #if 0
  183. static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
  184. {
  185. if (this_len >= 64 ||
  186. qp->head != qp->tail)
  187. return true;
  188. return false;
  189. }
  190. #endif
  191. struct n2_ahash_alg {
  192. struct list_head entry;
  193. const char *hash_zero;
  194. const u32 *hash_init;
  195. u8 hw_op_hashsz;
  196. u8 digest_size;
  197. u8 auth_type;
  198. u8 hmac_type;
  199. struct ahash_alg alg;
  200. };
  201. static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
  202. {
  203. struct crypto_alg *alg = tfm->__crt_alg;
  204. struct ahash_alg *ahash_alg;
  205. ahash_alg = container_of(alg, struct ahash_alg, halg.base);
  206. return container_of(ahash_alg, struct n2_ahash_alg, alg);
  207. }
  208. struct n2_hmac_alg {
  209. const char *child_alg;
  210. struct n2_ahash_alg derived;
  211. };
  212. static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
  213. {
  214. struct crypto_alg *alg = tfm->__crt_alg;
  215. struct ahash_alg *ahash_alg;
  216. ahash_alg = container_of(alg, struct ahash_alg, halg.base);
  217. return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
  218. }
  219. struct n2_hash_ctx {
  220. struct crypto_ahash *fallback_tfm;
  221. };
  222. #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
  223. struct n2_hmac_ctx {
  224. struct n2_hash_ctx base;
  225. struct crypto_shash *child_shash;
  226. int hash_key_len;
  227. unsigned char hash_key[N2_HASH_KEY_MAX];
  228. };
  229. struct n2_hash_req_ctx {
  230. union {
  231. struct md5_state md5;
  232. struct sha1_state sha1;
  233. struct sha256_state sha256;
  234. } u;
  235. struct ahash_request fallback_req;
  236. };
  237. static int n2_hash_async_init(struct ahash_request *req)
  238. {
  239. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  240. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  241. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  242. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  243. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  244. return crypto_ahash_init(&rctx->fallback_req);
  245. }
  246. static int n2_hash_async_update(struct ahash_request *req)
  247. {
  248. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  249. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  250. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  251. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  252. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  253. rctx->fallback_req.nbytes = req->nbytes;
  254. rctx->fallback_req.src = req->src;
  255. return crypto_ahash_update(&rctx->fallback_req);
  256. }
  257. static int n2_hash_async_final(struct ahash_request *req)
  258. {
  259. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  260. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  261. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  262. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  263. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  264. rctx->fallback_req.result = req->result;
  265. return crypto_ahash_final(&rctx->fallback_req);
  266. }
  267. static int n2_hash_async_finup(struct ahash_request *req)
  268. {
  269. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  270. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  271. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  272. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  273. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  274. rctx->fallback_req.nbytes = req->nbytes;
  275. rctx->fallback_req.src = req->src;
  276. rctx->fallback_req.result = req->result;
  277. return crypto_ahash_finup(&rctx->fallback_req);
  278. }
  279. static int n2_hash_cra_init(struct crypto_tfm *tfm)
  280. {
  281. const char *fallback_driver_name = tfm->__crt_alg->cra_name;
  282. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  283. struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  284. struct crypto_ahash *fallback_tfm;
  285. int err;
  286. fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
  287. CRYPTO_ALG_NEED_FALLBACK);
  288. if (IS_ERR(fallback_tfm)) {
  289. pr_warning("Fallback driver '%s' could not be loaded!\n",
  290. fallback_driver_name);
  291. err = PTR_ERR(fallback_tfm);
  292. goto out;
  293. }
  294. crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
  295. crypto_ahash_reqsize(fallback_tfm)));
  296. ctx->fallback_tfm = fallback_tfm;
  297. return 0;
  298. out:
  299. return err;
  300. }
  301. static void n2_hash_cra_exit(struct crypto_tfm *tfm)
  302. {
  303. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  304. struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  305. crypto_free_ahash(ctx->fallback_tfm);
  306. }
  307. static int n2_hmac_cra_init(struct crypto_tfm *tfm)
  308. {
  309. const char *fallback_driver_name = tfm->__crt_alg->cra_name;
  310. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  311. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
  312. struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
  313. struct crypto_ahash *fallback_tfm;
  314. struct crypto_shash *child_shash;
  315. int err;
  316. fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
  317. CRYPTO_ALG_NEED_FALLBACK);
  318. if (IS_ERR(fallback_tfm)) {
  319. pr_warning("Fallback driver '%s' could not be loaded!\n",
  320. fallback_driver_name);
  321. err = PTR_ERR(fallback_tfm);
  322. goto out;
  323. }
  324. child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
  325. if (IS_ERR(child_shash)) {
  326. pr_warning("Child shash '%s' could not be loaded!\n",
  327. n2alg->child_alg);
  328. err = PTR_ERR(child_shash);
  329. goto out_free_fallback;
  330. }
  331. crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
  332. crypto_ahash_reqsize(fallback_tfm)));
  333. ctx->child_shash = child_shash;
  334. ctx->base.fallback_tfm = fallback_tfm;
  335. return 0;
  336. out_free_fallback:
  337. crypto_free_ahash(fallback_tfm);
  338. out:
  339. return err;
  340. }
  341. static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
  342. {
  343. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  344. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
  345. crypto_free_ahash(ctx->base.fallback_tfm);
  346. crypto_free_shash(ctx->child_shash);
  347. }
  348. static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
  349. unsigned int keylen)
  350. {
  351. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
  352. struct crypto_shash *child_shash = ctx->child_shash;
  353. struct crypto_ahash *fallback_tfm;
  354. struct {
  355. struct shash_desc shash;
  356. char ctx[crypto_shash_descsize(child_shash)];
  357. } desc;
  358. int err, bs, ds;
  359. fallback_tfm = ctx->base.fallback_tfm;
  360. err = crypto_ahash_setkey(fallback_tfm, key, keylen);
  361. if (err)
  362. return err;
  363. desc.shash.tfm = child_shash;
  364. desc.shash.flags = crypto_ahash_get_flags(tfm) &
  365. CRYPTO_TFM_REQ_MAY_SLEEP;
  366. bs = crypto_shash_blocksize(child_shash);
  367. ds = crypto_shash_digestsize(child_shash);
  368. BUG_ON(ds > N2_HASH_KEY_MAX);
  369. if (keylen > bs) {
  370. err = crypto_shash_digest(&desc.shash, key, keylen,
  371. ctx->hash_key);
  372. if (err)
  373. return err;
  374. keylen = ds;
  375. } else if (keylen <= N2_HASH_KEY_MAX)
  376. memcpy(ctx->hash_key, key, keylen);
  377. ctx->hash_key_len = keylen;
  378. return err;
  379. }
  380. static unsigned long wait_for_tail(struct spu_queue *qp)
  381. {
  382. unsigned long head, hv_ret;
  383. do {
  384. hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
  385. if (hv_ret != HV_EOK) {
  386. pr_err("Hypervisor error on gethead\n");
  387. break;
  388. }
  389. if (head == qp->tail) {
  390. qp->head = head;
  391. break;
  392. }
  393. } while (1);
  394. return hv_ret;
  395. }
  396. static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
  397. struct cwq_initial_entry *ent)
  398. {
  399. unsigned long hv_ret = spu_queue_submit(qp, ent);
  400. if (hv_ret == HV_EOK)
  401. hv_ret = wait_for_tail(qp);
  402. return hv_ret;
  403. }
  404. static int n2_do_async_digest(struct ahash_request *req,
  405. unsigned int auth_type, unsigned int digest_size,
  406. unsigned int result_size, void *hash_loc,
  407. unsigned long auth_key, unsigned int auth_key_len)
  408. {
  409. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  410. struct cwq_initial_entry *ent;
  411. struct crypto_hash_walk walk;
  412. struct spu_queue *qp;
  413. unsigned long flags;
  414. int err = -ENODEV;
  415. int nbytes, cpu;
  416. /* The total effective length of the operation may not
  417. * exceed 2^16.
  418. */
  419. if (unlikely(req->nbytes > (1 << 16))) {
  420. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  421. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  422. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  423. rctx->fallback_req.base.flags =
  424. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  425. rctx->fallback_req.nbytes = req->nbytes;
  426. rctx->fallback_req.src = req->src;
  427. rctx->fallback_req.result = req->result;
  428. return crypto_ahash_digest(&rctx->fallback_req);
  429. }
  430. nbytes = crypto_hash_walk_first(req, &walk);
  431. cpu = get_cpu();
  432. qp = cpu_to_cwq[cpu];
  433. if (!qp)
  434. goto out;
  435. spin_lock_irqsave(&qp->lock, flags);
  436. /* XXX can do better, improve this later by doing a by-hand scatterlist
  437. * XXX walk, etc.
  438. */
  439. ent = qp->q + qp->tail;
  440. ent->control = control_word_base(nbytes, auth_key_len, 0,
  441. auth_type, digest_size,
  442. false, true, false, false,
  443. OPCODE_INPLACE_BIT |
  444. OPCODE_AUTH_MAC);
  445. ent->src_addr = __pa(walk.data);
  446. ent->auth_key_addr = auth_key;
  447. ent->auth_iv_addr = __pa(hash_loc);
  448. ent->final_auth_state_addr = 0UL;
  449. ent->enc_key_addr = 0UL;
  450. ent->enc_iv_addr = 0UL;
  451. ent->dest_addr = __pa(hash_loc);
  452. nbytes = crypto_hash_walk_done(&walk, 0);
  453. while (nbytes > 0) {
  454. ent = spu_queue_next(qp, ent);
  455. ent->control = (nbytes - 1);
  456. ent->src_addr = __pa(walk.data);
  457. ent->auth_key_addr = 0UL;
  458. ent->auth_iv_addr = 0UL;
  459. ent->final_auth_state_addr = 0UL;
  460. ent->enc_key_addr = 0UL;
  461. ent->enc_iv_addr = 0UL;
  462. ent->dest_addr = 0UL;
  463. nbytes = crypto_hash_walk_done(&walk, 0);
  464. }
  465. ent->control |= CONTROL_END_OF_BLOCK;
  466. if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
  467. err = -EINVAL;
  468. else
  469. err = 0;
  470. spin_unlock_irqrestore(&qp->lock, flags);
  471. if (!err)
  472. memcpy(req->result, hash_loc, result_size);
  473. out:
  474. put_cpu();
  475. return err;
  476. }
  477. static int n2_hash_async_digest(struct ahash_request *req)
  478. {
  479. struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
  480. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  481. int ds;
  482. ds = n2alg->digest_size;
  483. if (unlikely(req->nbytes == 0)) {
  484. memcpy(req->result, n2alg->hash_zero, ds);
  485. return 0;
  486. }
  487. memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
  488. return n2_do_async_digest(req, n2alg->auth_type,
  489. n2alg->hw_op_hashsz, ds,
  490. &rctx->u, 0UL, 0);
  491. }
  492. static int n2_hmac_async_digest(struct ahash_request *req)
  493. {
  494. struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
  495. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  496. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  497. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
  498. int ds;
  499. ds = n2alg->derived.digest_size;
  500. if (unlikely(req->nbytes == 0) ||
  501. unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
  502. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  503. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  504. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  505. rctx->fallback_req.base.flags =
  506. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  507. rctx->fallback_req.nbytes = req->nbytes;
  508. rctx->fallback_req.src = req->src;
  509. rctx->fallback_req.result = req->result;
  510. return crypto_ahash_digest(&rctx->fallback_req);
  511. }
  512. memcpy(&rctx->u, n2alg->derived.hash_init,
  513. n2alg->derived.hw_op_hashsz);
  514. return n2_do_async_digest(req, n2alg->derived.hmac_type,
  515. n2alg->derived.hw_op_hashsz, ds,
  516. &rctx->u,
  517. __pa(&ctx->hash_key),
  518. ctx->hash_key_len);
  519. }
  520. struct n2_cipher_context {
  521. int key_len;
  522. int enc_type;
  523. union {
  524. u8 aes[AES_MAX_KEY_SIZE];
  525. u8 des[DES_KEY_SIZE];
  526. u8 des3[3 * DES_KEY_SIZE];
  527. u8 arc4[258]; /* S-box, X, Y */
  528. } key;
  529. };
  530. #define N2_CHUNK_ARR_LEN 16
  531. struct n2_crypto_chunk {
  532. struct list_head entry;
  533. unsigned long iv_paddr : 44;
  534. unsigned long arr_len : 20;
  535. unsigned long dest_paddr;
  536. unsigned long dest_final;
  537. struct {
  538. unsigned long src_paddr : 44;
  539. unsigned long src_len : 20;
  540. } arr[N2_CHUNK_ARR_LEN];
  541. };
  542. struct n2_request_context {
  543. struct ablkcipher_walk walk;
  544. struct list_head chunk_list;
  545. struct n2_crypto_chunk chunk;
  546. u8 temp_iv[16];
  547. };
  548. /* The SPU allows some level of flexibility for partial cipher blocks
  549. * being specified in a descriptor.
  550. *
  551. * It merely requires that every descriptor's length field is at least
  552. * as large as the cipher block size. This means that a cipher block
  553. * can span at most 2 descriptors. However, this does not allow a
  554. * partial block to span into the final descriptor as that would
  555. * violate the rule (since every descriptor's length must be at lest
  556. * the block size). So, for example, assuming an 8 byte block size:
  557. *
  558. * 0xe --> 0xa --> 0x8
  559. *
  560. * is a valid length sequence, whereas:
  561. *
  562. * 0xe --> 0xb --> 0x7
  563. *
  564. * is not a valid sequence.
  565. */
  566. struct n2_cipher_alg {
  567. struct list_head entry;
  568. u8 enc_type;
  569. struct crypto_alg alg;
  570. };
  571. static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
  572. {
  573. struct crypto_alg *alg = tfm->__crt_alg;
  574. return container_of(alg, struct n2_cipher_alg, alg);
  575. }
  576. struct n2_cipher_request_context {
  577. struct ablkcipher_walk walk;
  578. };
  579. static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  580. unsigned int keylen)
  581. {
  582. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  583. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  584. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  585. ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
  586. switch (keylen) {
  587. case AES_KEYSIZE_128:
  588. ctx->enc_type |= ENC_TYPE_ALG_AES128;
  589. break;
  590. case AES_KEYSIZE_192:
  591. ctx->enc_type |= ENC_TYPE_ALG_AES192;
  592. break;
  593. case AES_KEYSIZE_256:
  594. ctx->enc_type |= ENC_TYPE_ALG_AES256;
  595. break;
  596. default:
  597. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  598. return -EINVAL;
  599. }
  600. ctx->key_len = keylen;
  601. memcpy(ctx->key.aes, key, keylen);
  602. return 0;
  603. }
  604. static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  605. unsigned int keylen)
  606. {
  607. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  608. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  609. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  610. u32 tmp[DES_EXPKEY_WORDS];
  611. int err;
  612. ctx->enc_type = n2alg->enc_type;
  613. if (keylen != DES_KEY_SIZE) {
  614. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  615. return -EINVAL;
  616. }
  617. err = des_ekey(tmp, key);
  618. if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  619. tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  620. return -EINVAL;
  621. }
  622. ctx->key_len = keylen;
  623. memcpy(ctx->key.des, key, keylen);
  624. return 0;
  625. }
  626. static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  627. unsigned int keylen)
  628. {
  629. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  630. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  631. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  632. ctx->enc_type = n2alg->enc_type;
  633. if (keylen != (3 * DES_KEY_SIZE)) {
  634. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  635. return -EINVAL;
  636. }
  637. ctx->key_len = keylen;
  638. memcpy(ctx->key.des3, key, keylen);
  639. return 0;
  640. }
  641. static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  642. unsigned int keylen)
  643. {
  644. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  645. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  646. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  647. u8 *s = ctx->key.arc4;
  648. u8 *x = s + 256;
  649. u8 *y = x + 1;
  650. int i, j, k;
  651. ctx->enc_type = n2alg->enc_type;
  652. j = k = 0;
  653. *x = 0;
  654. *y = 0;
  655. for (i = 0; i < 256; i++)
  656. s[i] = i;
  657. for (i = 0; i < 256; i++) {
  658. u8 a = s[i];
  659. j = (j + key[k] + a) & 0xff;
  660. s[i] = s[j];
  661. s[j] = a;
  662. if (++k >= keylen)
  663. k = 0;
  664. }
  665. return 0;
  666. }
  667. static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
  668. {
  669. int this_len = nbytes;
  670. this_len -= (nbytes & (block_size - 1));
  671. return this_len > (1 << 16) ? (1 << 16) : this_len;
  672. }
  673. static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
  674. struct spu_queue *qp, bool encrypt)
  675. {
  676. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  677. struct cwq_initial_entry *ent;
  678. bool in_place;
  679. int i;
  680. ent = spu_queue_alloc(qp, cp->arr_len);
  681. if (!ent) {
  682. pr_info("queue_alloc() of %d fails\n",
  683. cp->arr_len);
  684. return -EBUSY;
  685. }
  686. in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
  687. ent->control = control_word_base(cp->arr[0].src_len,
  688. 0, ctx->enc_type, 0, 0,
  689. false, true, false, encrypt,
  690. OPCODE_ENCRYPT |
  691. (in_place ? OPCODE_INPLACE_BIT : 0));
  692. ent->src_addr = cp->arr[0].src_paddr;
  693. ent->auth_key_addr = 0UL;
  694. ent->auth_iv_addr = 0UL;
  695. ent->final_auth_state_addr = 0UL;
  696. ent->enc_key_addr = __pa(&ctx->key);
  697. ent->enc_iv_addr = cp->iv_paddr;
  698. ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
  699. for (i = 1; i < cp->arr_len; i++) {
  700. ent = spu_queue_next(qp, ent);
  701. ent->control = cp->arr[i].src_len - 1;
  702. ent->src_addr = cp->arr[i].src_paddr;
  703. ent->auth_key_addr = 0UL;
  704. ent->auth_iv_addr = 0UL;
  705. ent->final_auth_state_addr = 0UL;
  706. ent->enc_key_addr = 0UL;
  707. ent->enc_iv_addr = 0UL;
  708. ent->dest_addr = 0UL;
  709. }
  710. ent->control |= CONTROL_END_OF_BLOCK;
  711. return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
  712. }
  713. static int n2_compute_chunks(struct ablkcipher_request *req)
  714. {
  715. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  716. struct ablkcipher_walk *walk = &rctx->walk;
  717. struct n2_crypto_chunk *chunk;
  718. unsigned long dest_prev;
  719. unsigned int tot_len;
  720. bool prev_in_place;
  721. int err, nbytes;
  722. ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
  723. err = ablkcipher_walk_phys(req, walk);
  724. if (err)
  725. return err;
  726. INIT_LIST_HEAD(&rctx->chunk_list);
  727. chunk = &rctx->chunk;
  728. INIT_LIST_HEAD(&chunk->entry);
  729. chunk->iv_paddr = 0UL;
  730. chunk->arr_len = 0;
  731. chunk->dest_paddr = 0UL;
  732. prev_in_place = false;
  733. dest_prev = ~0UL;
  734. tot_len = 0;
  735. while ((nbytes = walk->nbytes) != 0) {
  736. unsigned long dest_paddr, src_paddr;
  737. bool in_place;
  738. int this_len;
  739. src_paddr = (page_to_phys(walk->src.page) +
  740. walk->src.offset);
  741. dest_paddr = (page_to_phys(walk->dst.page) +
  742. walk->dst.offset);
  743. in_place = (src_paddr == dest_paddr);
  744. this_len = cipher_descriptor_len(nbytes, walk->blocksize);
  745. if (chunk->arr_len != 0) {
  746. if (in_place != prev_in_place ||
  747. (!prev_in_place &&
  748. dest_paddr != dest_prev) ||
  749. chunk->arr_len == N2_CHUNK_ARR_LEN ||
  750. tot_len + this_len > (1 << 16)) {
  751. chunk->dest_final = dest_prev;
  752. list_add_tail(&chunk->entry,
  753. &rctx->chunk_list);
  754. chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
  755. if (!chunk) {
  756. err = -ENOMEM;
  757. break;
  758. }
  759. INIT_LIST_HEAD(&chunk->entry);
  760. }
  761. }
  762. if (chunk->arr_len == 0) {
  763. chunk->dest_paddr = dest_paddr;
  764. tot_len = 0;
  765. }
  766. chunk->arr[chunk->arr_len].src_paddr = src_paddr;
  767. chunk->arr[chunk->arr_len].src_len = this_len;
  768. chunk->arr_len++;
  769. dest_prev = dest_paddr + this_len;
  770. prev_in_place = in_place;
  771. tot_len += this_len;
  772. err = ablkcipher_walk_done(req, walk, nbytes - this_len);
  773. if (err)
  774. break;
  775. }
  776. if (!err && chunk->arr_len != 0) {
  777. chunk->dest_final = dest_prev;
  778. list_add_tail(&chunk->entry, &rctx->chunk_list);
  779. }
  780. return err;
  781. }
  782. static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
  783. {
  784. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  785. struct n2_crypto_chunk *c, *tmp;
  786. if (final_iv)
  787. memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
  788. ablkcipher_walk_complete(&rctx->walk);
  789. list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
  790. list_del(&c->entry);
  791. if (unlikely(c != &rctx->chunk))
  792. kfree(c);
  793. }
  794. }
  795. static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
  796. {
  797. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  798. struct crypto_tfm *tfm = req->base.tfm;
  799. int err = n2_compute_chunks(req);
  800. struct n2_crypto_chunk *c, *tmp;
  801. unsigned long flags, hv_ret;
  802. struct spu_queue *qp;
  803. if (err)
  804. return err;
  805. qp = cpu_to_cwq[get_cpu()];
  806. err = -ENODEV;
  807. if (!qp)
  808. goto out;
  809. spin_lock_irqsave(&qp->lock, flags);
  810. list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
  811. err = __n2_crypt_chunk(tfm, c, qp, encrypt);
  812. if (err)
  813. break;
  814. list_del(&c->entry);
  815. if (unlikely(c != &rctx->chunk))
  816. kfree(c);
  817. }
  818. if (!err) {
  819. hv_ret = wait_for_tail(qp);
  820. if (hv_ret != HV_EOK)
  821. err = -EINVAL;
  822. }
  823. spin_unlock_irqrestore(&qp->lock, flags);
  824. put_cpu();
  825. out:
  826. n2_chunk_complete(req, NULL);
  827. return err;
  828. }
  829. static int n2_encrypt_ecb(struct ablkcipher_request *req)
  830. {
  831. return n2_do_ecb(req, true);
  832. }
  833. static int n2_decrypt_ecb(struct ablkcipher_request *req)
  834. {
  835. return n2_do_ecb(req, false);
  836. }
  837. static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
  838. {
  839. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  840. struct crypto_tfm *tfm = req->base.tfm;
  841. unsigned long flags, hv_ret, iv_paddr;
  842. int err = n2_compute_chunks(req);
  843. struct n2_crypto_chunk *c, *tmp;
  844. struct spu_queue *qp;
  845. void *final_iv_addr;
  846. final_iv_addr = NULL;
  847. if (err)
  848. return err;
  849. qp = cpu_to_cwq[get_cpu()];
  850. err = -ENODEV;
  851. if (!qp)
  852. goto out;
  853. spin_lock_irqsave(&qp->lock, flags);
  854. if (encrypt) {
  855. iv_paddr = __pa(rctx->walk.iv);
  856. list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
  857. entry) {
  858. c->iv_paddr = iv_paddr;
  859. err = __n2_crypt_chunk(tfm, c, qp, true);
  860. if (err)
  861. break;
  862. iv_paddr = c->dest_final - rctx->walk.blocksize;
  863. list_del(&c->entry);
  864. if (unlikely(c != &rctx->chunk))
  865. kfree(c);
  866. }
  867. final_iv_addr = __va(iv_paddr);
  868. } else {
  869. list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
  870. entry) {
  871. if (c == &rctx->chunk) {
  872. iv_paddr = __pa(rctx->walk.iv);
  873. } else {
  874. iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
  875. tmp->arr[tmp->arr_len-1].src_len -
  876. rctx->walk.blocksize);
  877. }
  878. if (!final_iv_addr) {
  879. unsigned long pa;
  880. pa = (c->arr[c->arr_len-1].src_paddr +
  881. c->arr[c->arr_len-1].src_len -
  882. rctx->walk.blocksize);
  883. final_iv_addr = rctx->temp_iv;
  884. memcpy(rctx->temp_iv, __va(pa),
  885. rctx->walk.blocksize);
  886. }
  887. c->iv_paddr = iv_paddr;
  888. err = __n2_crypt_chunk(tfm, c, qp, false);
  889. if (err)
  890. break;
  891. list_del(&c->entry);
  892. if (unlikely(c != &rctx->chunk))
  893. kfree(c);
  894. }
  895. }
  896. if (!err) {
  897. hv_ret = wait_for_tail(qp);
  898. if (hv_ret != HV_EOK)
  899. err = -EINVAL;
  900. }
  901. spin_unlock_irqrestore(&qp->lock, flags);
  902. put_cpu();
  903. out:
  904. n2_chunk_complete(req, err ? NULL : final_iv_addr);
  905. return err;
  906. }
  907. static int n2_encrypt_chaining(struct ablkcipher_request *req)
  908. {
  909. return n2_do_chaining(req, true);
  910. }
  911. static int n2_decrypt_chaining(struct ablkcipher_request *req)
  912. {
  913. return n2_do_chaining(req, false);
  914. }
  915. struct n2_cipher_tmpl {
  916. const char *name;
  917. const char *drv_name;
  918. u8 block_size;
  919. u8 enc_type;
  920. struct ablkcipher_alg ablkcipher;
  921. };
  922. static const struct n2_cipher_tmpl cipher_tmpls[] = {
  923. /* ARC4: only ECB is supported (chaining bits ignored) */
  924. { .name = "ecb(arc4)",
  925. .drv_name = "ecb-arc4",
  926. .block_size = 1,
  927. .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
  928. ENC_TYPE_CHAINING_ECB),
  929. .ablkcipher = {
  930. .min_keysize = 1,
  931. .max_keysize = 256,
  932. .setkey = n2_arc4_setkey,
  933. .encrypt = n2_encrypt_ecb,
  934. .decrypt = n2_decrypt_ecb,
  935. },
  936. },
  937. /* DES: ECB CBC and CFB are supported */
  938. { .name = "ecb(des)",
  939. .drv_name = "ecb-des",
  940. .block_size = DES_BLOCK_SIZE,
  941. .enc_type = (ENC_TYPE_ALG_DES |
  942. ENC_TYPE_CHAINING_ECB),
  943. .ablkcipher = {
  944. .min_keysize = DES_KEY_SIZE,
  945. .max_keysize = DES_KEY_SIZE,
  946. .setkey = n2_des_setkey,
  947. .encrypt = n2_encrypt_ecb,
  948. .decrypt = n2_decrypt_ecb,
  949. },
  950. },
  951. { .name = "cbc(des)",
  952. .drv_name = "cbc-des",
  953. .block_size = DES_BLOCK_SIZE,
  954. .enc_type = (ENC_TYPE_ALG_DES |
  955. ENC_TYPE_CHAINING_CBC),
  956. .ablkcipher = {
  957. .ivsize = DES_BLOCK_SIZE,
  958. .min_keysize = DES_KEY_SIZE,
  959. .max_keysize = DES_KEY_SIZE,
  960. .setkey = n2_des_setkey,
  961. .encrypt = n2_encrypt_chaining,
  962. .decrypt = n2_decrypt_chaining,
  963. },
  964. },
  965. { .name = "cfb(des)",
  966. .drv_name = "cfb-des",
  967. .block_size = DES_BLOCK_SIZE,
  968. .enc_type = (ENC_TYPE_ALG_DES |
  969. ENC_TYPE_CHAINING_CFB),
  970. .ablkcipher = {
  971. .min_keysize = DES_KEY_SIZE,
  972. .max_keysize = DES_KEY_SIZE,
  973. .setkey = n2_des_setkey,
  974. .encrypt = n2_encrypt_chaining,
  975. .decrypt = n2_decrypt_chaining,
  976. },
  977. },
  978. /* 3DES: ECB CBC and CFB are supported */
  979. { .name = "ecb(des3_ede)",
  980. .drv_name = "ecb-3des",
  981. .block_size = DES_BLOCK_SIZE,
  982. .enc_type = (ENC_TYPE_ALG_3DES |
  983. ENC_TYPE_CHAINING_ECB),
  984. .ablkcipher = {
  985. .min_keysize = 3 * DES_KEY_SIZE,
  986. .max_keysize = 3 * DES_KEY_SIZE,
  987. .setkey = n2_3des_setkey,
  988. .encrypt = n2_encrypt_ecb,
  989. .decrypt = n2_decrypt_ecb,
  990. },
  991. },
  992. { .name = "cbc(des3_ede)",
  993. .drv_name = "cbc-3des",
  994. .block_size = DES_BLOCK_SIZE,
  995. .enc_type = (ENC_TYPE_ALG_3DES |
  996. ENC_TYPE_CHAINING_CBC),
  997. .ablkcipher = {
  998. .ivsize = DES_BLOCK_SIZE,
  999. .min_keysize = 3 * DES_KEY_SIZE,
  1000. .max_keysize = 3 * DES_KEY_SIZE,
  1001. .setkey = n2_3des_setkey,
  1002. .encrypt = n2_encrypt_chaining,
  1003. .decrypt = n2_decrypt_chaining,
  1004. },
  1005. },
  1006. { .name = "cfb(des3_ede)",
  1007. .drv_name = "cfb-3des",
  1008. .block_size = DES_BLOCK_SIZE,
  1009. .enc_type = (ENC_TYPE_ALG_3DES |
  1010. ENC_TYPE_CHAINING_CFB),
  1011. .ablkcipher = {
  1012. .min_keysize = 3 * DES_KEY_SIZE,
  1013. .max_keysize = 3 * DES_KEY_SIZE,
  1014. .setkey = n2_3des_setkey,
  1015. .encrypt = n2_encrypt_chaining,
  1016. .decrypt = n2_decrypt_chaining,
  1017. },
  1018. },
  1019. /* AES: ECB CBC and CTR are supported */
  1020. { .name = "ecb(aes)",
  1021. .drv_name = "ecb-aes",
  1022. .block_size = AES_BLOCK_SIZE,
  1023. .enc_type = (ENC_TYPE_ALG_AES128 |
  1024. ENC_TYPE_CHAINING_ECB),
  1025. .ablkcipher = {
  1026. .min_keysize = AES_MIN_KEY_SIZE,
  1027. .max_keysize = AES_MAX_KEY_SIZE,
  1028. .setkey = n2_aes_setkey,
  1029. .encrypt = n2_encrypt_ecb,
  1030. .decrypt = n2_decrypt_ecb,
  1031. },
  1032. },
  1033. { .name = "cbc(aes)",
  1034. .drv_name = "cbc-aes",
  1035. .block_size = AES_BLOCK_SIZE,
  1036. .enc_type = (ENC_TYPE_ALG_AES128 |
  1037. ENC_TYPE_CHAINING_CBC),
  1038. .ablkcipher = {
  1039. .ivsize = AES_BLOCK_SIZE,
  1040. .min_keysize = AES_MIN_KEY_SIZE,
  1041. .max_keysize = AES_MAX_KEY_SIZE,
  1042. .setkey = n2_aes_setkey,
  1043. .encrypt = n2_encrypt_chaining,
  1044. .decrypt = n2_decrypt_chaining,
  1045. },
  1046. },
  1047. { .name = "ctr(aes)",
  1048. .drv_name = "ctr-aes",
  1049. .block_size = AES_BLOCK_SIZE,
  1050. .enc_type = (ENC_TYPE_ALG_AES128 |
  1051. ENC_TYPE_CHAINING_COUNTER),
  1052. .ablkcipher = {
  1053. .ivsize = AES_BLOCK_SIZE,
  1054. .min_keysize = AES_MIN_KEY_SIZE,
  1055. .max_keysize = AES_MAX_KEY_SIZE,
  1056. .setkey = n2_aes_setkey,
  1057. .encrypt = n2_encrypt_chaining,
  1058. .decrypt = n2_encrypt_chaining,
  1059. },
  1060. },
  1061. };
  1062. #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
  1063. static LIST_HEAD(cipher_algs);
  1064. struct n2_hash_tmpl {
  1065. const char *name;
  1066. const char *hash_zero;
  1067. const u32 *hash_init;
  1068. u8 hw_op_hashsz;
  1069. u8 digest_size;
  1070. u8 block_size;
  1071. u8 auth_type;
  1072. u8 hmac_type;
  1073. };
  1074. static const char md5_zero[MD5_DIGEST_SIZE] = {
  1075. 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
  1076. 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
  1077. };
  1078. static const u32 md5_init[MD5_HASH_WORDS] = {
  1079. cpu_to_le32(0x67452301),
  1080. cpu_to_le32(0xefcdab89),
  1081. cpu_to_le32(0x98badcfe),
  1082. cpu_to_le32(0x10325476),
  1083. };
  1084. static const char sha1_zero[SHA1_DIGEST_SIZE] = {
  1085. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
  1086. 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
  1087. 0x07, 0x09
  1088. };
  1089. static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
  1090. SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
  1091. };
  1092. static const char sha256_zero[SHA256_DIGEST_SIZE] = {
  1093. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
  1094. 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
  1095. 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
  1096. 0x1b, 0x78, 0x52, 0xb8, 0x55
  1097. };
  1098. static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
  1099. SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
  1100. SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
  1101. };
  1102. static const char sha224_zero[SHA224_DIGEST_SIZE] = {
  1103. 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
  1104. 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
  1105. 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
  1106. 0x2f
  1107. };
  1108. static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
  1109. SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
  1110. SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
  1111. };
  1112. static const struct n2_hash_tmpl hash_tmpls[] = {
  1113. { .name = "md5",
  1114. .hash_zero = md5_zero,
  1115. .hash_init = md5_init,
  1116. .auth_type = AUTH_TYPE_MD5,
  1117. .hmac_type = AUTH_TYPE_HMAC_MD5,
  1118. .hw_op_hashsz = MD5_DIGEST_SIZE,
  1119. .digest_size = MD5_DIGEST_SIZE,
  1120. .block_size = MD5_HMAC_BLOCK_SIZE },
  1121. { .name = "sha1",
  1122. .hash_zero = sha1_zero,
  1123. .hash_init = sha1_init,
  1124. .auth_type = AUTH_TYPE_SHA1,
  1125. .hmac_type = AUTH_TYPE_HMAC_SHA1,
  1126. .hw_op_hashsz = SHA1_DIGEST_SIZE,
  1127. .digest_size = SHA1_DIGEST_SIZE,
  1128. .block_size = SHA1_BLOCK_SIZE },
  1129. { .name = "sha256",
  1130. .hash_zero = sha256_zero,
  1131. .hash_init = sha256_init,
  1132. .auth_type = AUTH_TYPE_SHA256,
  1133. .hmac_type = AUTH_TYPE_HMAC_SHA256,
  1134. .hw_op_hashsz = SHA256_DIGEST_SIZE,
  1135. .digest_size = SHA256_DIGEST_SIZE,
  1136. .block_size = SHA256_BLOCK_SIZE },
  1137. { .name = "sha224",
  1138. .hash_zero = sha224_zero,
  1139. .hash_init = sha224_init,
  1140. .auth_type = AUTH_TYPE_SHA256,
  1141. .hmac_type = AUTH_TYPE_RESERVED,
  1142. .hw_op_hashsz = SHA256_DIGEST_SIZE,
  1143. .digest_size = SHA224_DIGEST_SIZE,
  1144. .block_size = SHA224_BLOCK_SIZE },
  1145. };
  1146. #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
  1147. static LIST_HEAD(ahash_algs);
  1148. static LIST_HEAD(hmac_algs);
  1149. static int algs_registered;
  1150. static void __n2_unregister_algs(void)
  1151. {
  1152. struct n2_cipher_alg *cipher, *cipher_tmp;
  1153. struct n2_ahash_alg *alg, *alg_tmp;
  1154. struct n2_hmac_alg *hmac, *hmac_tmp;
  1155. list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
  1156. crypto_unregister_alg(&cipher->alg);
  1157. list_del(&cipher->entry);
  1158. kfree(cipher);
  1159. }
  1160. list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
  1161. crypto_unregister_ahash(&hmac->derived.alg);
  1162. list_del(&hmac->derived.entry);
  1163. kfree(hmac);
  1164. }
  1165. list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
  1166. crypto_unregister_ahash(&alg->alg);
  1167. list_del(&alg->entry);
  1168. kfree(alg);
  1169. }
  1170. }
  1171. static int n2_cipher_cra_init(struct crypto_tfm *tfm)
  1172. {
  1173. tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
  1174. return 0;
  1175. }
  1176. static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
  1177. {
  1178. struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
  1179. struct crypto_alg *alg;
  1180. int err;
  1181. if (!p)
  1182. return -ENOMEM;
  1183. alg = &p->alg;
  1184. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
  1185. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
  1186. alg->cra_priority = N2_CRA_PRIORITY;
  1187. alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
  1188. alg->cra_blocksize = tmpl->block_size;
  1189. p->enc_type = tmpl->enc_type;
  1190. alg->cra_ctxsize = sizeof(struct n2_cipher_context);
  1191. alg->cra_type = &crypto_ablkcipher_type;
  1192. alg->cra_u.ablkcipher = tmpl->ablkcipher;
  1193. alg->cra_init = n2_cipher_cra_init;
  1194. alg->cra_module = THIS_MODULE;
  1195. list_add(&p->entry, &cipher_algs);
  1196. err = crypto_register_alg(alg);
  1197. if (err) {
  1198. pr_err("%s alg registration failed\n", alg->cra_name);
  1199. list_del(&p->entry);
  1200. kfree(p);
  1201. } else {
  1202. pr_info("%s alg registered\n", alg->cra_name);
  1203. }
  1204. return err;
  1205. }
  1206. static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
  1207. {
  1208. struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
  1209. struct ahash_alg *ahash;
  1210. struct crypto_alg *base;
  1211. int err;
  1212. if (!p)
  1213. return -ENOMEM;
  1214. p->child_alg = n2ahash->alg.halg.base.cra_name;
  1215. memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
  1216. INIT_LIST_HEAD(&p->derived.entry);
  1217. ahash = &p->derived.alg;
  1218. ahash->digest = n2_hmac_async_digest;
  1219. ahash->setkey = n2_hmac_async_setkey;
  1220. base = &ahash->halg.base;
  1221. snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
  1222. snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
  1223. base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
  1224. base->cra_init = n2_hmac_cra_init;
  1225. base->cra_exit = n2_hmac_cra_exit;
  1226. list_add(&p->derived.entry, &hmac_algs);
  1227. err = crypto_register_ahash(ahash);
  1228. if (err) {
  1229. pr_err("%s alg registration failed\n", base->cra_name);
  1230. list_del(&p->derived.entry);
  1231. kfree(p);
  1232. } else {
  1233. pr_info("%s alg registered\n", base->cra_name);
  1234. }
  1235. return err;
  1236. }
  1237. static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
  1238. {
  1239. struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
  1240. struct hash_alg_common *halg;
  1241. struct crypto_alg *base;
  1242. struct ahash_alg *ahash;
  1243. int err;
  1244. if (!p)
  1245. return -ENOMEM;
  1246. p->hash_zero = tmpl->hash_zero;
  1247. p->hash_init = tmpl->hash_init;
  1248. p->auth_type = tmpl->auth_type;
  1249. p->hmac_type = tmpl->hmac_type;
  1250. p->hw_op_hashsz = tmpl->hw_op_hashsz;
  1251. p->digest_size = tmpl->digest_size;
  1252. ahash = &p->alg;
  1253. ahash->init = n2_hash_async_init;
  1254. ahash->update = n2_hash_async_update;
  1255. ahash->final = n2_hash_async_final;
  1256. ahash->finup = n2_hash_async_finup;
  1257. ahash->digest = n2_hash_async_digest;
  1258. halg = &ahash->halg;
  1259. halg->digestsize = tmpl->digest_size;
  1260. base = &halg->base;
  1261. snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
  1262. snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
  1263. base->cra_priority = N2_CRA_PRIORITY;
  1264. base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
  1265. base->cra_blocksize = tmpl->block_size;
  1266. base->cra_ctxsize = sizeof(struct n2_hash_ctx);
  1267. base->cra_module = THIS_MODULE;
  1268. base->cra_init = n2_hash_cra_init;
  1269. base->cra_exit = n2_hash_cra_exit;
  1270. list_add(&p->entry, &ahash_algs);
  1271. err = crypto_register_ahash(ahash);
  1272. if (err) {
  1273. pr_err("%s alg registration failed\n", base->cra_name);
  1274. list_del(&p->entry);
  1275. kfree(p);
  1276. } else {
  1277. pr_info("%s alg registered\n", base->cra_name);
  1278. }
  1279. if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
  1280. err = __n2_register_one_hmac(p);
  1281. return err;
  1282. }
  1283. static int __devinit n2_register_algs(void)
  1284. {
  1285. int i, err = 0;
  1286. mutex_lock(&spu_lock);
  1287. if (algs_registered++)
  1288. goto out;
  1289. for (i = 0; i < NUM_HASH_TMPLS; i++) {
  1290. err = __n2_register_one_ahash(&hash_tmpls[i]);
  1291. if (err) {
  1292. __n2_unregister_algs();
  1293. goto out;
  1294. }
  1295. }
  1296. for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
  1297. err = __n2_register_one_cipher(&cipher_tmpls[i]);
  1298. if (err) {
  1299. __n2_unregister_algs();
  1300. goto out;
  1301. }
  1302. }
  1303. out:
  1304. mutex_unlock(&spu_lock);
  1305. return err;
  1306. }
  1307. static void __exit n2_unregister_algs(void)
  1308. {
  1309. mutex_lock(&spu_lock);
  1310. if (!--algs_registered)
  1311. __n2_unregister_algs();
  1312. mutex_unlock(&spu_lock);
  1313. }
  1314. /* To map CWQ queues to interrupt sources, the hypervisor API provides
  1315. * a devino. This isn't very useful to us because all of the
  1316. * interrupts listed in the device_node have been translated to
  1317. * Linux virtual IRQ cookie numbers.
  1318. *
  1319. * So we have to back-translate, going through the 'intr' and 'ino'
  1320. * property tables of the n2cp MDESC node, matching it with the OF
  1321. * 'interrupts' property entries, in order to to figure out which
  1322. * devino goes to which already-translated IRQ.
  1323. */
  1324. static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
  1325. unsigned long dev_ino)
  1326. {
  1327. const unsigned int *dev_intrs;
  1328. unsigned int intr;
  1329. int i;
  1330. for (i = 0; i < ip->num_intrs; i++) {
  1331. if (ip->ino_table[i].ino == dev_ino)
  1332. break;
  1333. }
  1334. if (i == ip->num_intrs)
  1335. return -ENODEV;
  1336. intr = ip->ino_table[i].intr;
  1337. dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
  1338. if (!dev_intrs)
  1339. return -ENODEV;
  1340. for (i = 0; i < dev->archdata.num_irqs; i++) {
  1341. if (dev_intrs[i] == intr)
  1342. return i;
  1343. }
  1344. return -ENODEV;
  1345. }
  1346. static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
  1347. const char *irq_name, struct spu_queue *p,
  1348. irq_handler_t handler)
  1349. {
  1350. unsigned long herr;
  1351. int index;
  1352. herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
  1353. if (herr)
  1354. return -EINVAL;
  1355. index = find_devino_index(dev, ip, p->devino);
  1356. if (index < 0)
  1357. return index;
  1358. p->irq = dev->archdata.irqs[index];
  1359. sprintf(p->irq_name, "%s-%d", irq_name, index);
  1360. return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
  1361. p->irq_name, p);
  1362. }
  1363. static struct kmem_cache *queue_cache[2];
  1364. static void *new_queue(unsigned long q_type)
  1365. {
  1366. return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
  1367. }
  1368. static void free_queue(void *p, unsigned long q_type)
  1369. {
  1370. return kmem_cache_free(queue_cache[q_type - 1], p);
  1371. }
  1372. static int queue_cache_init(void)
  1373. {
  1374. if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
  1375. queue_cache[HV_NCS_QTYPE_MAU - 1] =
  1376. kmem_cache_create("mau_queue",
  1377. (MAU_NUM_ENTRIES *
  1378. MAU_ENTRY_SIZE),
  1379. MAU_ENTRY_SIZE, 0, NULL);
  1380. if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
  1381. return -ENOMEM;
  1382. if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
  1383. queue_cache[HV_NCS_QTYPE_CWQ - 1] =
  1384. kmem_cache_create("cwq_queue",
  1385. (CWQ_NUM_ENTRIES *
  1386. CWQ_ENTRY_SIZE),
  1387. CWQ_ENTRY_SIZE, 0, NULL);
  1388. if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
  1389. kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
  1390. return -ENOMEM;
  1391. }
  1392. return 0;
  1393. }
  1394. static void queue_cache_destroy(void)
  1395. {
  1396. kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
  1397. kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
  1398. }
  1399. static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
  1400. {
  1401. cpumask_var_t old_allowed;
  1402. unsigned long hv_ret;
  1403. if (cpumask_empty(&p->sharing))
  1404. return -EINVAL;
  1405. if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
  1406. return -ENOMEM;
  1407. cpumask_copy(old_allowed, &current->cpus_allowed);
  1408. set_cpus_allowed_ptr(current, &p->sharing);
  1409. hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
  1410. CWQ_NUM_ENTRIES, &p->qhandle);
  1411. if (!hv_ret)
  1412. sun4v_ncs_sethead_marker(p->qhandle, 0);
  1413. set_cpus_allowed_ptr(current, old_allowed);
  1414. free_cpumask_var(old_allowed);
  1415. return (hv_ret ? -EINVAL : 0);
  1416. }
  1417. static int spu_queue_setup(struct spu_queue *p)
  1418. {
  1419. int err;
  1420. p->q = new_queue(p->q_type);
  1421. if (!p->q)
  1422. return -ENOMEM;
  1423. err = spu_queue_register(p, p->q_type);
  1424. if (err) {
  1425. free_queue(p->q, p->q_type);
  1426. p->q = NULL;
  1427. }
  1428. return err;
  1429. }
  1430. static void spu_queue_destroy(struct spu_queue *p)
  1431. {
  1432. unsigned long hv_ret;
  1433. if (!p->q)
  1434. return;
  1435. hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
  1436. if (!hv_ret)
  1437. free_queue(p->q, p->q_type);
  1438. }
  1439. static void spu_list_destroy(struct list_head *list)
  1440. {
  1441. struct spu_queue *p, *n;
  1442. list_for_each_entry_safe(p, n, list, list) {
  1443. int i;
  1444. for (i = 0; i < NR_CPUS; i++) {
  1445. if (cpu_to_cwq[i] == p)
  1446. cpu_to_cwq[i] = NULL;
  1447. }
  1448. if (p->irq) {
  1449. free_irq(p->irq, p);
  1450. p->irq = 0;
  1451. }
  1452. spu_queue_destroy(p);
  1453. list_del(&p->list);
  1454. kfree(p);
  1455. }
  1456. }
  1457. /* Walk the backward arcs of a CWQ 'exec-unit' node,
  1458. * gathering cpu membership information.
  1459. */
  1460. static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
  1461. struct platform_device *dev,
  1462. u64 node, struct spu_queue *p,
  1463. struct spu_queue **table)
  1464. {
  1465. u64 arc;
  1466. mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
  1467. u64 tgt = mdesc_arc_target(mdesc, arc);
  1468. const char *name = mdesc_node_name(mdesc, tgt);
  1469. const u64 *id;
  1470. if (strcmp(name, "cpu"))
  1471. continue;
  1472. id = mdesc_get_property(mdesc, tgt, "id", NULL);
  1473. if (table[*id] != NULL) {
  1474. dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
  1475. dev->dev.of_node->full_name);
  1476. return -EINVAL;
  1477. }
  1478. cpu_set(*id, p->sharing);
  1479. table[*id] = p;
  1480. }
  1481. return 0;
  1482. }
  1483. /* Process an 'exec-unit' MDESC node of type 'cwq'. */
  1484. static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
  1485. struct platform_device *dev, struct mdesc_handle *mdesc,
  1486. u64 node, const char *iname, unsigned long q_type,
  1487. irq_handler_t handler, struct spu_queue **table)
  1488. {
  1489. struct spu_queue *p;
  1490. int err;
  1491. p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
  1492. if (!p) {
  1493. dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
  1494. dev->dev.of_node->full_name);
  1495. return -ENOMEM;
  1496. }
  1497. cpus_clear(p->sharing);
  1498. spin_lock_init(&p->lock);
  1499. p->q_type = q_type;
  1500. INIT_LIST_HEAD(&p->jobs);
  1501. list_add(&p->list, list);
  1502. err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
  1503. if (err)
  1504. return err;
  1505. err = spu_queue_setup(p);
  1506. if (err)
  1507. return err;
  1508. return spu_map_ino(dev, ip, iname, p, handler);
  1509. }
  1510. static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
  1511. struct spu_mdesc_info *ip, struct list_head *list,
  1512. const char *exec_name, unsigned long q_type,
  1513. irq_handler_t handler, struct spu_queue **table)
  1514. {
  1515. int err = 0;
  1516. u64 node;
  1517. mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
  1518. const char *type;
  1519. type = mdesc_get_property(mdesc, node, "type", NULL);
  1520. if (!type || strcmp(type, exec_name))
  1521. continue;
  1522. err = handle_exec_unit(ip, list, dev, mdesc, node,
  1523. exec_name, q_type, handler, table);
  1524. if (err) {
  1525. spu_list_destroy(list);
  1526. break;
  1527. }
  1528. }
  1529. return err;
  1530. }
  1531. static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
  1532. struct spu_mdesc_info *ip)
  1533. {
  1534. const u64 *intr, *ino;
  1535. int intr_len, ino_len;
  1536. int i;
  1537. intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
  1538. if (!intr)
  1539. return -ENODEV;
  1540. ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
  1541. if (!ino)
  1542. return -ENODEV;
  1543. if (intr_len != ino_len)
  1544. return -EINVAL;
  1545. ip->num_intrs = intr_len / sizeof(u64);
  1546. ip->ino_table = kzalloc((sizeof(struct ino_blob) *
  1547. ip->num_intrs),
  1548. GFP_KERNEL);
  1549. if (!ip->ino_table)
  1550. return -ENOMEM;
  1551. for (i = 0; i < ip->num_intrs; i++) {
  1552. struct ino_blob *b = &ip->ino_table[i];
  1553. b->intr = intr[i];
  1554. b->ino = ino[i];
  1555. }
  1556. return 0;
  1557. }
  1558. static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
  1559. struct platform_device *dev,
  1560. struct spu_mdesc_info *ip,
  1561. const char *node_name)
  1562. {
  1563. const unsigned int *reg;
  1564. u64 node;
  1565. reg = of_get_property(dev->dev.of_node, "reg", NULL);
  1566. if (!reg)
  1567. return -ENODEV;
  1568. mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
  1569. const char *name;
  1570. const u64 *chdl;
  1571. name = mdesc_get_property(mdesc, node, "name", NULL);
  1572. if (!name || strcmp(name, node_name))
  1573. continue;
  1574. chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
  1575. if (!chdl || (*chdl != *reg))
  1576. continue;
  1577. ip->cfg_handle = *chdl;
  1578. return get_irq_props(mdesc, node, ip);
  1579. }
  1580. return -ENODEV;
  1581. }
  1582. static unsigned long n2_spu_hvapi_major;
  1583. static unsigned long n2_spu_hvapi_minor;
  1584. static int __devinit n2_spu_hvapi_register(void)
  1585. {
  1586. int err;
  1587. n2_spu_hvapi_major = 2;
  1588. n2_spu_hvapi_minor = 0;
  1589. err = sun4v_hvapi_register(HV_GRP_NCS,
  1590. n2_spu_hvapi_major,
  1591. &n2_spu_hvapi_minor);
  1592. if (!err)
  1593. pr_info("Registered NCS HVAPI version %lu.%lu\n",
  1594. n2_spu_hvapi_major,
  1595. n2_spu_hvapi_minor);
  1596. return err;
  1597. }
  1598. static void n2_spu_hvapi_unregister(void)
  1599. {
  1600. sun4v_hvapi_unregister(HV_GRP_NCS);
  1601. }
  1602. static int global_ref;
  1603. static int __devinit grab_global_resources(void)
  1604. {
  1605. int err = 0;
  1606. mutex_lock(&spu_lock);
  1607. if (global_ref++)
  1608. goto out;
  1609. err = n2_spu_hvapi_register();
  1610. if (err)
  1611. goto out;
  1612. err = queue_cache_init();
  1613. if (err)
  1614. goto out_hvapi_release;
  1615. err = -ENOMEM;
  1616. cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
  1617. GFP_KERNEL);
  1618. if (!cpu_to_cwq)
  1619. goto out_queue_cache_destroy;
  1620. cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
  1621. GFP_KERNEL);
  1622. if (!cpu_to_mau)
  1623. goto out_free_cwq_table;
  1624. err = 0;
  1625. out:
  1626. if (err)
  1627. global_ref--;
  1628. mutex_unlock(&spu_lock);
  1629. return err;
  1630. out_free_cwq_table:
  1631. kfree(cpu_to_cwq);
  1632. cpu_to_cwq = NULL;
  1633. out_queue_cache_destroy:
  1634. queue_cache_destroy();
  1635. out_hvapi_release:
  1636. n2_spu_hvapi_unregister();
  1637. goto out;
  1638. }
  1639. static void release_global_resources(void)
  1640. {
  1641. mutex_lock(&spu_lock);
  1642. if (!--global_ref) {
  1643. kfree(cpu_to_cwq);
  1644. cpu_to_cwq = NULL;
  1645. kfree(cpu_to_mau);
  1646. cpu_to_mau = NULL;
  1647. queue_cache_destroy();
  1648. n2_spu_hvapi_unregister();
  1649. }
  1650. mutex_unlock(&spu_lock);
  1651. }
  1652. static struct n2_crypto * __devinit alloc_n2cp(void)
  1653. {
  1654. struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
  1655. if (np)
  1656. INIT_LIST_HEAD(&np->cwq_list);
  1657. return np;
  1658. }
  1659. static void free_n2cp(struct n2_crypto *np)
  1660. {
  1661. if (np->cwq_info.ino_table) {
  1662. kfree(np->cwq_info.ino_table);
  1663. np->cwq_info.ino_table = NULL;
  1664. }
  1665. kfree(np);
  1666. }
  1667. static void __devinit n2_spu_driver_version(void)
  1668. {
  1669. static int n2_spu_version_printed;
  1670. if (n2_spu_version_printed++ == 0)
  1671. pr_info("%s", version);
  1672. }
  1673. static int __devinit n2_crypto_probe(struct platform_device *dev,
  1674. const struct of_device_id *match)
  1675. {
  1676. struct mdesc_handle *mdesc;
  1677. const char *full_name;
  1678. struct n2_crypto *np;
  1679. int err;
  1680. n2_spu_driver_version();
  1681. full_name = dev->dev.of_node->full_name;
  1682. pr_info("Found N2CP at %s\n", full_name);
  1683. np = alloc_n2cp();
  1684. if (!np) {
  1685. dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
  1686. full_name);
  1687. return -ENOMEM;
  1688. }
  1689. err = grab_global_resources();
  1690. if (err) {
  1691. dev_err(&dev->dev, "%s: Unable to grab "
  1692. "global resources.\n", full_name);
  1693. goto out_free_n2cp;
  1694. }
  1695. mdesc = mdesc_grab();
  1696. if (!mdesc) {
  1697. dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
  1698. full_name);
  1699. err = -ENODEV;
  1700. goto out_free_global;
  1701. }
  1702. err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
  1703. if (err) {
  1704. dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
  1705. full_name);
  1706. mdesc_release(mdesc);
  1707. goto out_free_global;
  1708. }
  1709. err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
  1710. "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
  1711. cpu_to_cwq);
  1712. mdesc_release(mdesc);
  1713. if (err) {
  1714. dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
  1715. full_name);
  1716. goto out_free_global;
  1717. }
  1718. err = n2_register_algs();
  1719. if (err) {
  1720. dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
  1721. full_name);
  1722. goto out_free_spu_list;
  1723. }
  1724. dev_set_drvdata(&dev->dev, np);
  1725. return 0;
  1726. out_free_spu_list:
  1727. spu_list_destroy(&np->cwq_list);
  1728. out_free_global:
  1729. release_global_resources();
  1730. out_free_n2cp:
  1731. free_n2cp(np);
  1732. return err;
  1733. }
  1734. static int __devexit n2_crypto_remove(struct platform_device *dev)
  1735. {
  1736. struct n2_crypto *np = dev_get_drvdata(&dev->dev);
  1737. n2_unregister_algs();
  1738. spu_list_destroy(&np->cwq_list);
  1739. release_global_resources();
  1740. free_n2cp(np);
  1741. return 0;
  1742. }
  1743. static struct n2_mau * __devinit alloc_ncp(void)
  1744. {
  1745. struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
  1746. if (mp)
  1747. INIT_LIST_HEAD(&mp->mau_list);
  1748. return mp;
  1749. }
  1750. static void free_ncp(struct n2_mau *mp)
  1751. {
  1752. if (mp->mau_info.ino_table) {
  1753. kfree(mp->mau_info.ino_table);
  1754. mp->mau_info.ino_table = NULL;
  1755. }
  1756. kfree(mp);
  1757. }
  1758. static int __devinit n2_mau_probe(struct platform_device *dev,
  1759. const struct of_device_id *match)
  1760. {
  1761. struct mdesc_handle *mdesc;
  1762. const char *full_name;
  1763. struct n2_mau *mp;
  1764. int err;
  1765. n2_spu_driver_version();
  1766. full_name = dev->dev.of_node->full_name;
  1767. pr_info("Found NCP at %s\n", full_name);
  1768. mp = alloc_ncp();
  1769. if (!mp) {
  1770. dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
  1771. full_name);
  1772. return -ENOMEM;
  1773. }
  1774. err = grab_global_resources();
  1775. if (err) {
  1776. dev_err(&dev->dev, "%s: Unable to grab "
  1777. "global resources.\n", full_name);
  1778. goto out_free_ncp;
  1779. }
  1780. mdesc = mdesc_grab();
  1781. if (!mdesc) {
  1782. dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
  1783. full_name);
  1784. err = -ENODEV;
  1785. goto out_free_global;
  1786. }
  1787. err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
  1788. if (err) {
  1789. dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
  1790. full_name);
  1791. mdesc_release(mdesc);
  1792. goto out_free_global;
  1793. }
  1794. err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
  1795. "mau", HV_NCS_QTYPE_MAU, mau_intr,
  1796. cpu_to_mau);
  1797. mdesc_release(mdesc);
  1798. if (err) {
  1799. dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
  1800. full_name);
  1801. goto out_free_global;
  1802. }
  1803. dev_set_drvdata(&dev->dev, mp);
  1804. return 0;
  1805. out_free_global:
  1806. release_global_resources();
  1807. out_free_ncp:
  1808. free_ncp(mp);
  1809. return err;
  1810. }
  1811. static int __devexit n2_mau_remove(struct platform_device *dev)
  1812. {
  1813. struct n2_mau *mp = dev_get_drvdata(&dev->dev);
  1814. spu_list_destroy(&mp->mau_list);
  1815. release_global_resources();
  1816. free_ncp(mp);
  1817. return 0;
  1818. }
  1819. static struct of_device_id n2_crypto_match[] = {
  1820. {
  1821. .name = "n2cp",
  1822. .compatible = "SUNW,n2-cwq",
  1823. },
  1824. {
  1825. .name = "n2cp",
  1826. .compatible = "SUNW,vf-cwq",
  1827. },
  1828. {},
  1829. };
  1830. MODULE_DEVICE_TABLE(of, n2_crypto_match);
  1831. static struct of_platform_driver n2_crypto_driver = {
  1832. .driver = {
  1833. .name = "n2cp",
  1834. .owner = THIS_MODULE,
  1835. .of_match_table = n2_crypto_match,
  1836. },
  1837. .probe = n2_crypto_probe,
  1838. .remove = __devexit_p(n2_crypto_remove),
  1839. };
  1840. static struct of_device_id n2_mau_match[] = {
  1841. {
  1842. .name = "ncp",
  1843. .compatible = "SUNW,n2-mau",
  1844. },
  1845. {
  1846. .name = "ncp",
  1847. .compatible = "SUNW,vf-mau",
  1848. },
  1849. {},
  1850. };
  1851. MODULE_DEVICE_TABLE(of, n2_mau_match);
  1852. static struct of_platform_driver n2_mau_driver = {
  1853. .driver = {
  1854. .name = "ncp",
  1855. .owner = THIS_MODULE,
  1856. .of_match_table = n2_mau_match,
  1857. },
  1858. .probe = n2_mau_probe,
  1859. .remove = __devexit_p(n2_mau_remove),
  1860. };
  1861. static int __init n2_init(void)
  1862. {
  1863. int err = of_register_platform_driver(&n2_crypto_driver);
  1864. if (!err) {
  1865. err = of_register_platform_driver(&n2_mau_driver);
  1866. if (err)
  1867. of_unregister_platform_driver(&n2_crypto_driver);
  1868. }
  1869. return err;
  1870. }
  1871. static void __exit n2_exit(void)
  1872. {
  1873. of_unregister_platform_driver(&n2_mau_driver);
  1874. of_unregister_platform_driver(&n2_crypto_driver);
  1875. }
  1876. module_init(n2_init);
  1877. module_exit(n2_exit);