hifn_795x.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630
  1. /*
  2. * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/mod_devicetable.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/pci.h>
  24. #include <linux/slab.h>
  25. #include <linux/delay.h>
  26. #include <linux/mm.h>
  27. #include <linux/highmem.h>
  28. #include <linux/crypto.h>
  29. #include <crypto/algapi.h>
  30. #include <crypto/des.h>
  31. #include <asm/kmap_types.h>
  32. #undef dprintk
  33. #define HIFN_TEST
  34. //#define HIFN_DEBUG
  35. #ifdef HIFN_DEBUG
  36. #define dprintk(f, a...) printk(f, ##a)
  37. #else
  38. #define dprintk(f, a...) do {} while (0)
  39. #endif
  40. static atomic_t hifn_dev_number;
  41. #define ACRYPTO_OP_DECRYPT 0
  42. #define ACRYPTO_OP_ENCRYPT 1
  43. #define ACRYPTO_OP_HMAC 2
  44. #define ACRYPTO_OP_RNG 3
  45. #define ACRYPTO_MODE_ECB 0
  46. #define ACRYPTO_MODE_CBC 1
  47. #define ACRYPTO_MODE_CFB 2
  48. #define ACRYPTO_MODE_OFB 3
  49. #define ACRYPTO_TYPE_AES_128 0
  50. #define ACRYPTO_TYPE_AES_192 1
  51. #define ACRYPTO_TYPE_AES_256 2
  52. #define ACRYPTO_TYPE_3DES 3
  53. #define ACRYPTO_TYPE_DES 4
  54. #define PCI_VENDOR_ID_HIFN 0x13A3
  55. #define PCI_DEVICE_ID_HIFN_7955 0x0020
  56. #define PCI_DEVICE_ID_HIFN_7956 0x001d
  57. /* I/O region sizes */
  58. #define HIFN_BAR0_SIZE 0x1000
  59. #define HIFN_BAR1_SIZE 0x2000
  60. #define HIFN_BAR2_SIZE 0x8000
  61. /* DMA registres */
  62. #define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */
  63. #define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */
  64. #define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */
  65. #define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */
  66. #define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */
  67. #define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */
  68. #define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */
  69. #define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */
  70. #define HIFN_CHIP_ID 0x98 /* Chip ID */
  71. /*
  72. * Processing Unit Registers (offset from BASEREG0)
  73. */
  74. #define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
  75. #define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
  76. #define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
  77. #define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
  78. #define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
  79. #define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
  80. #define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
  81. #define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
  82. #define HIFN_0_SPACESIZE 0x20 /* Register space size */
  83. /* Processing Unit Control Register (HIFN_0_PUCTRL) */
  84. #define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
  85. #define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
  86. #define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
  87. #define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
  88. #define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
  89. /* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
  90. #define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
  91. #define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
  92. #define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
  93. #define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
  94. #define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
  95. #define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
  96. #define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
  97. #define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
  98. #define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
  99. #define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
  100. /* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
  101. #define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
  102. #define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
  103. #define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
  104. #define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
  105. #define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
  106. #define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
  107. #define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
  108. #define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
  109. #define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
  110. #define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
  111. #define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
  112. #define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
  113. #define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
  114. #define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
  115. #define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
  116. #define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
  117. #define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
  118. #define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
  119. #define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
  120. #define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
  121. #define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
  122. #define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
  123. #define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
  124. /* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
  125. #define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
  126. #define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
  127. #define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
  128. #define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
  129. #define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
  130. #define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
  131. #define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
  132. #define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
  133. #define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
  134. #define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
  135. /* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
  136. #define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
  137. #define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
  138. #define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
  139. #define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
  140. #define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
  141. #define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
  142. #define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
  143. #define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
  144. #define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
  145. #define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
  146. #define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
  147. #define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
  148. #define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
  149. #define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
  150. #define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
  151. #define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
  152. #define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
  153. /* FIFO Status Register (HIFN_0_FIFOSTAT) */
  154. #define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
  155. #define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
  156. /* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
  157. #define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */
  158. /*
  159. * DMA Interface Registers (offset from BASEREG1)
  160. */
  161. #define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
  162. #define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
  163. #define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
  164. #define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
  165. #define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
  166. #define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
  167. #define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
  168. #define HIFN_1_PLL 0x4c /* 795x: PLL config */
  169. #define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
  170. #define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
  171. #define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
  172. #define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
  173. #define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
  174. #define HIFN_1_REVID 0x98 /* Revision ID */
  175. #define HIFN_1_UNLOCK_SECRET1 0xf4
  176. #define HIFN_1_UNLOCK_SECRET2 0xfc
  177. #define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
  178. #define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
  179. #define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */
  180. #define HIFN_1_PUB_OP 0x308 /* Public Operand */
  181. #define HIFN_1_PUB_STATUS 0x30c /* Public Status */
  182. #define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
  183. #define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
  184. #define HIFN_1_RNG_DATA 0x318 /* RNG data */
  185. #define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
  186. #define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
  187. /* DMA Status and Control Register (HIFN_1_DMA_CSR) */
  188. #define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
  189. #define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
  190. #define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
  191. #define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
  192. #define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
  193. #define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
  194. #define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
  195. #define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
  196. #define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
  197. #define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
  198. #define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
  199. #define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
  200. #define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
  201. #define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
  202. #define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
  203. #define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
  204. #define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
  205. #define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
  206. #define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
  207. #define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
  208. #define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
  209. #define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
  210. #define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
  211. #define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
  212. #define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
  213. #define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
  214. #define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
  215. #define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
  216. #define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
  217. #define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
  218. #define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
  219. #define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
  220. #define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
  221. #define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
  222. #define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
  223. #define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
  224. #define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
  225. #define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
  226. /* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
  227. #define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
  228. #define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
  229. #define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
  230. #define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
  231. #define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
  232. #define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
  233. #define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
  234. #define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
  235. #define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
  236. #define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
  237. #define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
  238. #define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
  239. #define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
  240. #define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
  241. #define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
  242. #define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
  243. #define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
  244. #define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
  245. #define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
  246. #define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
  247. #define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
  248. #define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
  249. /* DMA Configuration Register (HIFN_1_DMA_CNFG) */
  250. #define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
  251. #define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
  252. #define HIFN_DMACNFG_UNLOCK 0x00000800
  253. #define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
  254. #define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
  255. #define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
  256. #define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
  257. #define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
  258. #define HIFN_PLL_7956 0x00001d18 /* 7956 PLL config value */
  259. /* Public key reset register (HIFN_1_PUB_RESET) */
  260. #define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
  261. /* Public base address register (HIFN_1_PUB_BASE) */
  262. #define HIFN_PUBBASE_ADDR 0x00003fff /* base address */
  263. /* Public operand length register (HIFN_1_PUB_OPLEN) */
  264. #define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */
  265. #define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */
  266. #define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */
  267. #define HIFN_PUBOPLEN_EXP_S 7 /* exponent lenght shift */
  268. #define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */
  269. #define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */
  270. /* Public operation register (HIFN_1_PUB_OP) */
  271. #define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */
  272. #define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */
  273. #define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */
  274. #define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */
  275. #define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */
  276. #define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */
  277. #define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
  278. #define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
  279. #define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
  280. #define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
  281. #define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
  282. #define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
  283. #define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
  284. #define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
  285. #define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
  286. #define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
  287. #define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
  288. #define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
  289. #define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */
  290. #define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */
  291. /* Public status register (HIFN_1_PUB_STATUS) */
  292. #define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
  293. #define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
  294. /* Public interrupt enable register (HIFN_1_PUB_IEN) */
  295. #define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
  296. /* Random number generator config register (HIFN_1_RNG_CONFIG) */
  297. #define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
  298. #define HIFN_NAMESIZE 32
  299. #define HIFN_MAX_RESULT_ORDER 5
  300. #define HIFN_D_CMD_RSIZE 24*4
  301. #define HIFN_D_SRC_RSIZE 80*4
  302. #define HIFN_D_DST_RSIZE 80*4
  303. #define HIFN_D_RES_RSIZE 24*4
  304. #define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5
  305. #define AES_MIN_KEY_SIZE 16
  306. #define AES_MAX_KEY_SIZE 32
  307. #define HIFN_DES_KEY_LENGTH 8
  308. #define HIFN_3DES_KEY_LENGTH 24
  309. #define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE
  310. #define HIFN_IV_LENGTH 8
  311. #define HIFN_AES_IV_LENGTH 16
  312. #define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
  313. #define HIFN_MAC_KEY_LENGTH 64
  314. #define HIFN_MD5_LENGTH 16
  315. #define HIFN_SHA1_LENGTH 20
  316. #define HIFN_MAC_TRUNC_LENGTH 12
  317. #define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
  318. #define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4)
  319. #define HIFN_USED_RESULT 12
  320. struct hifn_desc
  321. {
  322. volatile u32 l;
  323. volatile u32 p;
  324. };
  325. struct hifn_dma {
  326. struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
  327. struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
  328. struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
  329. struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
  330. u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
  331. u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
  332. u64 test_src, test_dst;
  333. /*
  334. * Our current positions for insertion and removal from the descriptor
  335. * rings.
  336. */
  337. volatile int cmdi, srci, dsti, resi;
  338. volatile int cmdu, srcu, dstu, resu;
  339. int cmdk, srck, dstk, resk;
  340. };
  341. #define HIFN_FLAG_CMD_BUSY (1<<0)
  342. #define HIFN_FLAG_SRC_BUSY (1<<1)
  343. #define HIFN_FLAG_DST_BUSY (1<<2)
  344. #define HIFN_FLAG_RES_BUSY (1<<3)
  345. #define HIFN_FLAG_OLD_KEY (1<<4)
  346. #define HIFN_DEFAULT_ACTIVE_NUM 5
  347. struct hifn_device
  348. {
  349. char name[HIFN_NAMESIZE];
  350. int irq;
  351. struct pci_dev *pdev;
  352. void __iomem *bar[3];
  353. unsigned long result_mem;
  354. dma_addr_t dst;
  355. void *desc_virt;
  356. dma_addr_t desc_dma;
  357. u32 dmareg;
  358. void *sa[HIFN_D_RES_RSIZE];
  359. spinlock_t lock;
  360. void *priv;
  361. u32 flags;
  362. int active, started;
  363. struct delayed_work work;
  364. unsigned long reset;
  365. unsigned long success;
  366. unsigned long prev_success;
  367. u8 snum;
  368. struct crypto_queue queue;
  369. struct list_head alg_list;
  370. };
  371. #define HIFN_D_LENGTH 0x0000ffff
  372. #define HIFN_D_NOINVALID 0x01000000
  373. #define HIFN_D_MASKDONEIRQ 0x02000000
  374. #define HIFN_D_DESTOVER 0x04000000
  375. #define HIFN_D_OVER 0x08000000
  376. #define HIFN_D_LAST 0x20000000
  377. #define HIFN_D_JUMP 0x40000000
  378. #define HIFN_D_VALID 0x80000000
  379. struct hifn_base_command
  380. {
  381. volatile u16 masks;
  382. volatile u16 session_num;
  383. volatile u16 total_source_count;
  384. volatile u16 total_dest_count;
  385. };
  386. #define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */
  387. #define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */
  388. #define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */
  389. #define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */
  390. #define HIFN_BASE_CMD_DECODE 0x2000
  391. #define HIFN_BASE_CMD_SRCLEN_M 0xc000
  392. #define HIFN_BASE_CMD_SRCLEN_S 14
  393. #define HIFN_BASE_CMD_DSTLEN_M 0x3000
  394. #define HIFN_BASE_CMD_DSTLEN_S 12
  395. #define HIFN_BASE_CMD_LENMASK_HI 0x30000
  396. #define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
  397. /*
  398. * Structure to help build up the command data structure.
  399. */
  400. struct hifn_crypt_command
  401. {
  402. volatile u16 masks;
  403. volatile u16 header_skip;
  404. volatile u16 source_count;
  405. volatile u16 reserved;
  406. };
  407. #define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
  408. #define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
  409. #define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
  410. #define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
  411. #define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
  412. #define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
  413. #define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
  414. #define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
  415. #define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
  416. #define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
  417. #define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
  418. #define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
  419. #define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
  420. #define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
  421. #define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
  422. #define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
  423. #define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
  424. #define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
  425. #define HIFN_CRYPT_CMD_SRCLEN_S 14
  426. /*
  427. * Structure to help build up the command data structure.
  428. */
  429. struct hifn_mac_command
  430. {
  431. volatile u16 masks;
  432. volatile u16 header_skip;
  433. volatile u16 source_count;
  434. volatile u16 reserved;
  435. };
  436. #define HIFN_MAC_CMD_ALG_MASK 0x0001
  437. #define HIFN_MAC_CMD_ALG_SHA1 0x0000
  438. #define HIFN_MAC_CMD_ALG_MD5 0x0001
  439. #define HIFN_MAC_CMD_MODE_MASK 0x000c
  440. #define HIFN_MAC_CMD_MODE_HMAC 0x0000
  441. #define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
  442. #define HIFN_MAC_CMD_MODE_HASH 0x0008
  443. #define HIFN_MAC_CMD_MODE_FULL 0x0004
  444. #define HIFN_MAC_CMD_TRUNC 0x0010
  445. #define HIFN_MAC_CMD_RESULT 0x0020
  446. #define HIFN_MAC_CMD_APPEND 0x0040
  447. #define HIFN_MAC_CMD_SRCLEN_M 0xc000
  448. #define HIFN_MAC_CMD_SRCLEN_S 14
  449. /*
  450. * MAC POS IPsec initiates authentication after encryption on encodes
  451. * and before decryption on decodes.
  452. */
  453. #define HIFN_MAC_CMD_POS_IPSEC 0x0200
  454. #define HIFN_MAC_CMD_NEW_KEY 0x0800
  455. struct hifn_comp_command
  456. {
  457. volatile u16 masks;
  458. volatile u16 header_skip;
  459. volatile u16 source_count;
  460. volatile u16 reserved;
  461. };
  462. #define HIFN_COMP_CMD_SRCLEN_M 0xc000
  463. #define HIFN_COMP_CMD_SRCLEN_S 14
  464. #define HIFN_COMP_CMD_ONE 0x0100 /* must be one */
  465. #define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */
  466. #define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */
  467. #define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */
  468. #define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */
  469. #define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */
  470. #define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */
  471. #define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */
  472. struct hifn_base_result
  473. {
  474. volatile u16 flags;
  475. volatile u16 session;
  476. volatile u16 src_cnt; /* 15:0 of source count */
  477. volatile u16 dst_cnt; /* 15:0 of dest count */
  478. };
  479. #define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
  480. #define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */
  481. #define HIFN_BASE_RES_SRCLEN_S 14
  482. #define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */
  483. #define HIFN_BASE_RES_DSTLEN_S 12
  484. struct hifn_comp_result
  485. {
  486. volatile u16 flags;
  487. volatile u16 crc;
  488. };
  489. #define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
  490. #define HIFN_COMP_RES_LCB_S 8
  491. #define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */
  492. #define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */
  493. #define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */
  494. struct hifn_mac_result
  495. {
  496. volatile u16 flags;
  497. volatile u16 reserved;
  498. /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
  499. };
  500. #define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */
  501. #define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */
  502. struct hifn_crypt_result
  503. {
  504. volatile u16 flags;
  505. volatile u16 reserved;
  506. };
  507. #define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
  508. #ifndef HIFN_POLL_FREQUENCY
  509. #define HIFN_POLL_FREQUENCY 0x1
  510. #endif
  511. #ifndef HIFN_POLL_SCALAR
  512. #define HIFN_POLL_SCALAR 0x0
  513. #endif
  514. #define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
  515. #define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
  516. struct hifn_crypto_alg
  517. {
  518. struct list_head entry;
  519. struct crypto_alg alg;
  520. struct hifn_device *dev;
  521. };
  522. #define ASYNC_SCATTERLIST_CACHE 16
  523. #define ASYNC_FLAGS_MISALIGNED (1<<0)
  524. struct ablkcipher_walk
  525. {
  526. struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
  527. u32 flags;
  528. int num;
  529. };
  530. struct hifn_context
  531. {
  532. u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv;
  533. struct hifn_device *dev;
  534. unsigned int keysize, ivsize;
  535. u8 op, type, mode, unused;
  536. struct ablkcipher_walk walk;
  537. atomic_t sg_num;
  538. };
  539. #define crypto_alg_to_hifn(alg) container_of(alg, struct hifn_crypto_alg, alg)
  540. static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
  541. {
  542. u32 ret;
  543. ret = readl((char *)(dev->bar[0]) + reg);
  544. return ret;
  545. }
  546. static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
  547. {
  548. u32 ret;
  549. ret = readl((char *)(dev->bar[1]) + reg);
  550. return ret;
  551. }
  552. static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
  553. {
  554. writel(val, (char *)(dev->bar[0]) + reg);
  555. }
  556. static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
  557. {
  558. writel(val, (char *)(dev->bar[1]) + reg);
  559. }
  560. static void hifn_wait_puc(struct hifn_device *dev)
  561. {
  562. int i;
  563. u32 ret;
  564. for (i=10000; i > 0; --i) {
  565. ret = hifn_read_0(dev, HIFN_0_PUCTRL);
  566. if (!(ret & HIFN_PUCTRL_RESET))
  567. break;
  568. udelay(1);
  569. }
  570. if (!i)
  571. dprintk("%s: Failed to reset PUC unit.\n", dev->name);
  572. }
  573. static void hifn_reset_puc(struct hifn_device *dev)
  574. {
  575. hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
  576. hifn_wait_puc(dev);
  577. }
  578. static void hifn_stop_device(struct hifn_device *dev)
  579. {
  580. hifn_write_1(dev, HIFN_1_DMA_CSR,
  581. HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
  582. HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
  583. hifn_write_0(dev, HIFN_0_PUIER, 0);
  584. hifn_write_1(dev, HIFN_1_DMA_IER, 0);
  585. }
  586. static void hifn_reset_dma(struct hifn_device *dev, int full)
  587. {
  588. hifn_stop_device(dev);
  589. /*
  590. * Setting poll frequency and others to 0.
  591. */
  592. hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
  593. HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
  594. mdelay(1);
  595. /*
  596. * Reset DMA.
  597. */
  598. if (full) {
  599. hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
  600. mdelay(1);
  601. } else {
  602. hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
  603. HIFN_DMACNFG_MSTRESET);
  604. hifn_reset_puc(dev);
  605. }
  606. hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
  607. HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
  608. hifn_reset_puc(dev);
  609. }
  610. static u32 hifn_next_signature(u_int32_t a, u_int cnt)
  611. {
  612. int i;
  613. u32 v;
  614. for (i = 0; i < cnt; i++) {
  615. /* get the parity */
  616. v = a & 0x80080125;
  617. v ^= v >> 16;
  618. v ^= v >> 8;
  619. v ^= v >> 4;
  620. v ^= v >> 2;
  621. v ^= v >> 1;
  622. a = (v & 1) ^ (a << 1);
  623. }
  624. return a;
  625. }
  626. static struct pci2id {
  627. u_short pci_vendor;
  628. u_short pci_prod;
  629. char card_id[13];
  630. } pci2id[] = {
  631. {
  632. PCI_VENDOR_ID_HIFN,
  633. PCI_DEVICE_ID_HIFN_7955,
  634. { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  635. 0x00, 0x00, 0x00, 0x00, 0x00 }
  636. },
  637. {
  638. PCI_VENDOR_ID_HIFN,
  639. PCI_DEVICE_ID_HIFN_7956,
  640. { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  641. 0x00, 0x00, 0x00, 0x00, 0x00 }
  642. }
  643. };
  644. static int hifn_init_pubrng(struct hifn_device *dev)
  645. {
  646. int i;
  647. hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
  648. HIFN_PUBRST_RESET);
  649. for (i=100; i > 0; --i) {
  650. mdelay(1);
  651. if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
  652. break;
  653. }
  654. if (!i)
  655. dprintk("Chip %s: Failed to initialise public key engine.\n",
  656. dev->name);
  657. else {
  658. hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
  659. dev->dmareg |= HIFN_DMAIER_PUBDONE;
  660. hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
  661. dprintk("Chip %s: Public key engine has been sucessfully "
  662. "initialised.\n", dev->name);
  663. }
  664. /*
  665. * Enable RNG engine.
  666. */
  667. hifn_write_1(dev, HIFN_1_RNG_CONFIG,
  668. hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
  669. dprintk("Chip %s: RNG engine has been successfully initialised.\n",
  670. dev->name);
  671. return 0;
  672. }
  673. static int hifn_enable_crypto(struct hifn_device *dev)
  674. {
  675. u32 dmacfg, addr;
  676. char *offtbl = NULL;
  677. int i;
  678. for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
  679. if (pci2id[i].pci_vendor == dev->pdev->vendor &&
  680. pci2id[i].pci_prod == dev->pdev->device) {
  681. offtbl = pci2id[i].card_id;
  682. break;
  683. }
  684. }
  685. if (offtbl == NULL) {
  686. dprintk("Chip %s: Unknown card!\n", dev->name);
  687. return -ENODEV;
  688. }
  689. dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
  690. hifn_write_1(dev, HIFN_1_DMA_CNFG,
  691. HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
  692. HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
  693. mdelay(1);
  694. addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
  695. mdelay(1);
  696. hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
  697. mdelay(1);
  698. for (i=0; i<12; ++i) {
  699. addr = hifn_next_signature(addr, offtbl[i] + 0x101);
  700. hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
  701. mdelay(1);
  702. }
  703. hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
  704. dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
  705. return 0;
  706. }
  707. static void hifn_init_dma(struct hifn_device *dev)
  708. {
  709. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  710. u32 dptr = dev->desc_dma;
  711. int i;
  712. for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
  713. dma->cmdr[i].p = __cpu_to_le32(dptr +
  714. offsetof(struct hifn_dma, command_bufs[i][0]));
  715. for (i=0; i<HIFN_D_RES_RSIZE; ++i)
  716. dma->resr[i].p = __cpu_to_le32(dptr +
  717. offsetof(struct hifn_dma, result_bufs[i][0]));
  718. /*
  719. * Setup LAST descriptors.
  720. */
  721. dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
  722. offsetof(struct hifn_dma, cmdr[0]));
  723. dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
  724. offsetof(struct hifn_dma, srcr[0]));
  725. dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
  726. offsetof(struct hifn_dma, dstr[0]));
  727. dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
  728. offsetof(struct hifn_dma, resr[0]));
  729. dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
  730. dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
  731. dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
  732. }
  733. static void hifn_init_registers(struct hifn_device *dev)
  734. {
  735. u32 dptr = dev->desc_dma;
  736. /* Initialization magic... */
  737. hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
  738. hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
  739. hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
  740. /* write all 4 ring address registers */
  741. hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr +
  742. offsetof(struct hifn_dma, cmdr[0])));
  743. hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr +
  744. offsetof(struct hifn_dma, srcr[0])));
  745. hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr +
  746. offsetof(struct hifn_dma, dstr[0])));
  747. hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr +
  748. offsetof(struct hifn_dma, resr[0])));
  749. mdelay(2);
  750. #if 0
  751. hifn_write_1(dev, HIFN_1_DMA_CSR,
  752. HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
  753. HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
  754. HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
  755. HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
  756. HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
  757. HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
  758. HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
  759. HIFN_DMACSR_S_WAIT |
  760. HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
  761. HIFN_DMACSR_C_WAIT |
  762. HIFN_DMACSR_ENGINE |
  763. HIFN_DMACSR_PUBDONE);
  764. #else
  765. hifn_write_1(dev, HIFN_1_DMA_CSR,
  766. HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
  767. HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
  768. HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
  769. HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
  770. HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
  771. HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
  772. HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
  773. HIFN_DMACSR_S_WAIT |
  774. HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
  775. HIFN_DMACSR_C_WAIT |
  776. HIFN_DMACSR_ENGINE |
  777. HIFN_DMACSR_PUBDONE);
  778. #endif
  779. hifn_read_1(dev, HIFN_1_DMA_CSR);
  780. dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
  781. HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
  782. HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
  783. HIFN_DMAIER_ENGINE;
  784. dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
  785. hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
  786. hifn_read_1(dev, HIFN_1_DMA_IER);
  787. #if 0
  788. hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
  789. HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
  790. HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
  791. HIFN_PUCNFG_DRAM);
  792. #else
  793. hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
  794. #endif
  795. hifn_write_1(dev, HIFN_1_PLL, HIFN_PLL_7956);
  796. hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
  797. hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
  798. HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
  799. ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
  800. ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
  801. }
  802. static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
  803. unsigned dlen, unsigned slen, u16 mask, u8 snum)
  804. {
  805. struct hifn_base_command *base_cmd;
  806. u8 *buf_pos = buf;
  807. base_cmd = (struct hifn_base_command *)buf_pos;
  808. base_cmd->masks = __cpu_to_le16(mask);
  809. base_cmd->total_source_count =
  810. __cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
  811. base_cmd->total_dest_count =
  812. __cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
  813. dlen >>= 16;
  814. slen >>= 16;
  815. base_cmd->session_num = __cpu_to_le16(snum |
  816. ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
  817. ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
  818. return sizeof(struct hifn_base_command);
  819. }
  820. static int hifn_setup_crypto_command(struct hifn_device *dev,
  821. u8 *buf, unsigned dlen, unsigned slen,
  822. u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
  823. {
  824. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  825. struct hifn_crypt_command *cry_cmd;
  826. u8 *buf_pos = buf;
  827. u16 cmd_len;
  828. cry_cmd = (struct hifn_crypt_command *)buf_pos;
  829. cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
  830. dlen >>= 16;
  831. cry_cmd->masks = __cpu_to_le16(mode |
  832. ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
  833. HIFN_CRYPT_CMD_SRCLEN_M));
  834. cry_cmd->header_skip = 0;
  835. cry_cmd->reserved = 0;
  836. buf_pos += sizeof(struct hifn_crypt_command);
  837. dma->cmdu++;
  838. if (dma->cmdu > 1) {
  839. dev->dmareg |= HIFN_DMAIER_C_WAIT;
  840. hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
  841. }
  842. if (keylen) {
  843. memcpy(buf_pos, key, keylen);
  844. buf_pos += keylen;
  845. }
  846. if (ivsize) {
  847. memcpy(buf_pos, iv, ivsize);
  848. buf_pos += ivsize;
  849. }
  850. cmd_len = buf_pos - buf;
  851. return cmd_len;
  852. }
  853. static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
  854. unsigned int offset, unsigned int size)
  855. {
  856. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  857. int idx;
  858. dma_addr_t addr;
  859. addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
  860. idx = dma->srci;
  861. dma->srcr[idx].p = __cpu_to_le32(addr);
  862. dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
  863. HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
  864. if (++idx == HIFN_D_SRC_RSIZE) {
  865. dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
  866. HIFN_D_JUMP |
  867. HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
  868. idx = 0;
  869. }
  870. dma->srci = idx;
  871. dma->srcu++;
  872. if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
  873. hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
  874. dev->flags |= HIFN_FLAG_SRC_BUSY;
  875. }
  876. return size;
  877. }
  878. static void hifn_setup_res_desc(struct hifn_device *dev)
  879. {
  880. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  881. dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
  882. HIFN_D_VALID | HIFN_D_LAST);
  883. /*
  884. * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
  885. * HIFN_D_LAST | HIFN_D_NOINVALID);
  886. */
  887. if (++dma->resi == HIFN_D_RES_RSIZE) {
  888. dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
  889. HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
  890. dma->resi = 0;
  891. }
  892. dma->resu++;
  893. if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
  894. hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
  895. dev->flags |= HIFN_FLAG_RES_BUSY;
  896. }
  897. }
  898. static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
  899. unsigned offset, unsigned size)
  900. {
  901. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  902. int idx;
  903. dma_addr_t addr;
  904. addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
  905. idx = dma->dsti;
  906. dma->dstr[idx].p = __cpu_to_le32(addr);
  907. dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
  908. HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
  909. if (++idx == HIFN_D_DST_RSIZE) {
  910. dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
  911. HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
  912. HIFN_D_LAST | HIFN_D_NOINVALID);
  913. idx = 0;
  914. }
  915. dma->dsti = idx;
  916. dma->dstu++;
  917. if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
  918. hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
  919. dev->flags |= HIFN_FLAG_DST_BUSY;
  920. }
  921. }
  922. static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
  923. struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
  924. struct hifn_context *ctx)
  925. {
  926. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  927. int cmd_len, sa_idx;
  928. u8 *buf, *buf_pos;
  929. u16 mask;
  930. dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
  931. dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
  932. sa_idx = dma->resi;
  933. hifn_setup_src_desc(dev, spage, soff, nbytes);
  934. buf_pos = buf = dma->command_bufs[dma->cmdi];
  935. mask = 0;
  936. switch (ctx->op) {
  937. case ACRYPTO_OP_DECRYPT:
  938. mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
  939. break;
  940. case ACRYPTO_OP_ENCRYPT:
  941. mask = HIFN_BASE_CMD_CRYPT;
  942. break;
  943. case ACRYPTO_OP_HMAC:
  944. mask = HIFN_BASE_CMD_MAC;
  945. break;
  946. default:
  947. goto err_out;
  948. }
  949. buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
  950. nbytes, mask, dev->snum);
  951. if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) {
  952. u16 md = 0;
  953. if (ctx->keysize)
  954. md |= HIFN_CRYPT_CMD_NEW_KEY;
  955. if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB)
  956. md |= HIFN_CRYPT_CMD_NEW_IV;
  957. switch (ctx->mode) {
  958. case ACRYPTO_MODE_ECB:
  959. md |= HIFN_CRYPT_CMD_MODE_ECB;
  960. break;
  961. case ACRYPTO_MODE_CBC:
  962. md |= HIFN_CRYPT_CMD_MODE_CBC;
  963. break;
  964. case ACRYPTO_MODE_CFB:
  965. md |= HIFN_CRYPT_CMD_MODE_CFB;
  966. break;
  967. case ACRYPTO_MODE_OFB:
  968. md |= HIFN_CRYPT_CMD_MODE_OFB;
  969. break;
  970. default:
  971. goto err_out;
  972. }
  973. switch (ctx->type) {
  974. case ACRYPTO_TYPE_AES_128:
  975. if (ctx->keysize != 16)
  976. goto err_out;
  977. md |= HIFN_CRYPT_CMD_KSZ_128 |
  978. HIFN_CRYPT_CMD_ALG_AES;
  979. break;
  980. case ACRYPTO_TYPE_AES_192:
  981. if (ctx->keysize != 24)
  982. goto err_out;
  983. md |= HIFN_CRYPT_CMD_KSZ_192 |
  984. HIFN_CRYPT_CMD_ALG_AES;
  985. break;
  986. case ACRYPTO_TYPE_AES_256:
  987. if (ctx->keysize != 32)
  988. goto err_out;
  989. md |= HIFN_CRYPT_CMD_KSZ_256 |
  990. HIFN_CRYPT_CMD_ALG_AES;
  991. break;
  992. case ACRYPTO_TYPE_3DES:
  993. if (ctx->keysize != 24)
  994. goto err_out;
  995. md |= HIFN_CRYPT_CMD_ALG_3DES;
  996. break;
  997. case ACRYPTO_TYPE_DES:
  998. if (ctx->keysize != 8)
  999. goto err_out;
  1000. md |= HIFN_CRYPT_CMD_ALG_DES;
  1001. break;
  1002. default:
  1003. goto err_out;
  1004. }
  1005. buf_pos += hifn_setup_crypto_command(dev, buf_pos,
  1006. nbytes, nbytes, ctx->key, ctx->keysize,
  1007. ctx->iv, ctx->ivsize, md);
  1008. }
  1009. dev->sa[sa_idx] = priv;
  1010. cmd_len = buf_pos - buf;
  1011. dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
  1012. HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
  1013. if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
  1014. dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND |
  1015. HIFN_D_VALID | HIFN_D_LAST |
  1016. HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
  1017. dma->cmdi = 0;
  1018. } else
  1019. dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
  1020. if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
  1021. hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
  1022. dev->flags |= HIFN_FLAG_CMD_BUSY;
  1023. }
  1024. hifn_setup_dst_desc(dev, dpage, doff, nbytes);
  1025. hifn_setup_res_desc(dev);
  1026. return 0;
  1027. err_out:
  1028. return -EINVAL;
  1029. }
  1030. static int ablkcipher_walk_init(struct ablkcipher_walk *w,
  1031. int num, gfp_t gfp_flags)
  1032. {
  1033. int i;
  1034. num = min(ASYNC_SCATTERLIST_CACHE, num);
  1035. sg_init_table(w->cache, num);
  1036. w->num = 0;
  1037. for (i=0; i<num; ++i) {
  1038. struct page *page = alloc_page(gfp_flags);
  1039. struct scatterlist *s;
  1040. if (!page)
  1041. break;
  1042. s = &w->cache[i];
  1043. sg_set_page(s, page, PAGE_SIZE, 0);
  1044. w->num++;
  1045. }
  1046. return i;
  1047. }
  1048. static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
  1049. {
  1050. int i;
  1051. for (i=0; i<w->num; ++i) {
  1052. struct scatterlist *s = &w->cache[i];
  1053. __free_page(sg_page(s));
  1054. s->length = 0;
  1055. }
  1056. w->num = 0;
  1057. }
  1058. static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src,
  1059. unsigned int size, unsigned int *nbytesp)
  1060. {
  1061. unsigned int copy, drest = *drestp, nbytes = *nbytesp;
  1062. int idx = 0;
  1063. void *saddr;
  1064. if (drest < size || size > nbytes)
  1065. return -EINVAL;
  1066. while (size) {
  1067. copy = min(drest, src->length);
  1068. saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
  1069. memcpy(daddr, saddr + src->offset, copy);
  1070. kunmap_atomic(saddr, KM_SOFTIRQ1);
  1071. size -= copy;
  1072. drest -= copy;
  1073. nbytes -= copy;
  1074. daddr += copy;
  1075. dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
  1076. __func__, copy, size, drest, nbytes);
  1077. src++;
  1078. idx++;
  1079. }
  1080. *nbytesp = nbytes;
  1081. *drestp = drest;
  1082. return idx;
  1083. }
  1084. static int ablkcipher_walk(struct ablkcipher_request *req,
  1085. struct ablkcipher_walk *w)
  1086. {
  1087. unsigned blocksize =
  1088. crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
  1089. unsigned alignmask =
  1090. crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
  1091. struct scatterlist *src, *dst, *t;
  1092. void *daddr;
  1093. unsigned int nbytes = req->nbytes, offset, copy, diff;
  1094. int idx, tidx, err;
  1095. tidx = idx = 0;
  1096. offset = 0;
  1097. while (nbytes) {
  1098. if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
  1099. return -EINVAL;
  1100. src = &req->src[idx];
  1101. dst = &req->dst[idx];
  1102. dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
  1103. "blocksize: %u, nbytes: %u.\n",
  1104. __func__, src->length, dst->length, src->offset,
  1105. dst->offset, offset, blocksize, nbytes);
  1106. if (src->length & (blocksize - 1) ||
  1107. src->offset & (alignmask - 1) ||
  1108. dst->length & (blocksize - 1) ||
  1109. dst->offset & (alignmask - 1) ||
  1110. offset) {
  1111. unsigned slen = src->length - offset;
  1112. unsigned dlen = PAGE_SIZE;
  1113. t = &w->cache[idx];
  1114. daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
  1115. err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes);
  1116. if (err < 0)
  1117. goto err_out_unmap;
  1118. idx += err;
  1119. copy = slen & ~(blocksize - 1);
  1120. diff = slen & (blocksize - 1);
  1121. if (dlen < nbytes) {
  1122. /*
  1123. * Destination page does not have enough space
  1124. * to put there additional blocksized chunk,
  1125. * so we mark that page as containing only
  1126. * blocksize aligned chunks:
  1127. * t->length = (slen & ~(blocksize - 1));
  1128. * and increase number of bytes to be processed
  1129. * in next chunk:
  1130. * nbytes += diff;
  1131. */
  1132. nbytes += diff;
  1133. /*
  1134. * Temporary of course...
  1135. * Kick author if you will catch this one.
  1136. */
  1137. printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
  1138. "slen: %u, offset: %u.\n",
  1139. __func__, dlen, nbytes, slen, offset);
  1140. printk(KERN_ERR "%s: please contact author to fix this "
  1141. "issue, generally you should not catch "
  1142. "this path under any condition but who "
  1143. "knows how did you use crypto code.\n"
  1144. "Thank you.\n", __func__);
  1145. BUG();
  1146. } else {
  1147. copy += diff + nbytes;
  1148. src = &req->src[idx];
  1149. err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes);
  1150. if (err < 0)
  1151. goto err_out_unmap;
  1152. idx += err;
  1153. }
  1154. t->length = copy;
  1155. t->offset = offset;
  1156. kunmap_atomic(daddr, KM_SOFTIRQ0);
  1157. } else {
  1158. nbytes -= src->length;
  1159. idx++;
  1160. }
  1161. tidx++;
  1162. }
  1163. return tidx;
  1164. err_out_unmap:
  1165. kunmap_atomic(daddr, KM_SOFTIRQ0);
  1166. return err;
  1167. }
  1168. static int hifn_setup_session(struct ablkcipher_request *req)
  1169. {
  1170. struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
  1171. struct hifn_device *dev = ctx->dev;
  1172. struct page *spage, *dpage;
  1173. unsigned long soff, doff, flags;
  1174. unsigned int nbytes = req->nbytes, idx = 0, len;
  1175. int err = -EINVAL, sg_num;
  1176. struct scatterlist *src, *dst, *t;
  1177. unsigned blocksize =
  1178. crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
  1179. unsigned alignmask =
  1180. crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
  1181. if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
  1182. goto err_out_exit;
  1183. ctx->walk.flags = 0;
  1184. while (nbytes) {
  1185. src = &req->src[idx];
  1186. dst = &req->dst[idx];
  1187. if (src->length & (blocksize - 1) ||
  1188. src->offset & (alignmask - 1) ||
  1189. dst->length & (blocksize - 1) ||
  1190. dst->offset & (alignmask - 1)) {
  1191. ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
  1192. }
  1193. nbytes -= src->length;
  1194. idx++;
  1195. }
  1196. if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
  1197. err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC);
  1198. if (err < 0)
  1199. return err;
  1200. }
  1201. nbytes = req->nbytes;
  1202. idx = 0;
  1203. sg_num = ablkcipher_walk(req, &ctx->walk);
  1204. atomic_set(&ctx->sg_num, sg_num);
  1205. spin_lock_irqsave(&dev->lock, flags);
  1206. if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
  1207. err = -EAGAIN;
  1208. goto err_out;
  1209. }
  1210. dev->snum++;
  1211. dev->started += sg_num;
  1212. while (nbytes) {
  1213. src = &req->src[idx];
  1214. dst = &req->dst[idx];
  1215. t = &ctx->walk.cache[idx];
  1216. if (t->length) {
  1217. spage = dpage = sg_page(t);
  1218. soff = doff = 0;
  1219. len = t->length;
  1220. } else {
  1221. spage = sg_page(src);
  1222. soff = src->offset;
  1223. dpage = sg_page(dst);
  1224. doff = dst->offset;
  1225. len = dst->length;
  1226. }
  1227. idx++;
  1228. err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes,
  1229. req, ctx);
  1230. if (err)
  1231. goto err_out;
  1232. nbytes -= len;
  1233. }
  1234. dev->active = HIFN_DEFAULT_ACTIVE_NUM;
  1235. spin_unlock_irqrestore(&dev->lock, flags);
  1236. return 0;
  1237. err_out:
  1238. spin_unlock_irqrestore(&dev->lock, flags);
  1239. err_out_exit:
  1240. if (err && printk_ratelimit())
  1241. dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
  1242. "type: %u, err: %d.\n",
  1243. dev->name, ctx->iv, ctx->ivsize,
  1244. ctx->key, ctx->keysize,
  1245. ctx->mode, ctx->op, ctx->type, err);
  1246. return err;
  1247. }
  1248. static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
  1249. {
  1250. int n, err;
  1251. u8 src[16];
  1252. struct hifn_context ctx;
  1253. u8 fips_aes_ecb_from_zero[16] = {
  1254. 0x66, 0xE9, 0x4B, 0xD4,
  1255. 0xEF, 0x8A, 0x2C, 0x3B,
  1256. 0x88, 0x4C, 0xFA, 0x59,
  1257. 0xCA, 0x34, 0x2B, 0x2E};
  1258. memset(src, 0, sizeof(src));
  1259. memset(ctx.key, 0, sizeof(ctx.key));
  1260. ctx.dev = dev;
  1261. ctx.keysize = 16;
  1262. ctx.ivsize = 0;
  1263. ctx.iv = NULL;
  1264. ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
  1265. ctx.mode = ACRYPTO_MODE_ECB;
  1266. ctx.type = ACRYPTO_TYPE_AES_128;
  1267. atomic_set(&ctx.sg_num, 1);
  1268. err = hifn_setup_dma(dev,
  1269. virt_to_page(src), offset_in_page(src),
  1270. virt_to_page(src), offset_in_page(src),
  1271. sizeof(src), NULL, &ctx);
  1272. if (err)
  1273. goto err_out;
  1274. msleep(200);
  1275. dprintk("%s: decoded: ", dev->name);
  1276. for (n=0; n<sizeof(src); ++n)
  1277. dprintk("%02x ", src[n]);
  1278. dprintk("\n");
  1279. dprintk("%s: FIPS : ", dev->name);
  1280. for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
  1281. dprintk("%02x ", fips_aes_ecb_from_zero[n]);
  1282. dprintk("\n");
  1283. if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
  1284. printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
  1285. "passed.\n", dev->name);
  1286. return 0;
  1287. }
  1288. err_out:
  1289. printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
  1290. return -1;
  1291. }
  1292. static int hifn_start_device(struct hifn_device *dev)
  1293. {
  1294. int err;
  1295. hifn_reset_dma(dev, 1);
  1296. err = hifn_enable_crypto(dev);
  1297. if (err)
  1298. return err;
  1299. hifn_reset_puc(dev);
  1300. hifn_init_dma(dev);
  1301. hifn_init_registers(dev);
  1302. hifn_init_pubrng(dev);
  1303. return 0;
  1304. }
  1305. static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
  1306. struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
  1307. {
  1308. unsigned int srest = *srestp, nbytes = *nbytesp, copy;
  1309. void *daddr;
  1310. int idx = 0;
  1311. if (srest < size || size > nbytes)
  1312. return -EINVAL;
  1313. while (size) {
  1314. copy = min(dst->length, srest);
  1315. daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
  1316. memcpy(daddr + dst->offset + offset, saddr, copy);
  1317. kunmap_atomic(daddr, KM_IRQ0);
  1318. nbytes -= copy;
  1319. size -= copy;
  1320. srest -= copy;
  1321. saddr += copy;
  1322. offset = 0;
  1323. dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
  1324. __func__, copy, size, srest, nbytes);
  1325. dst++;
  1326. idx++;
  1327. }
  1328. *nbytesp = nbytes;
  1329. *srestp = srest;
  1330. return idx;
  1331. }
  1332. static void hifn_process_ready(struct ablkcipher_request *req, int error)
  1333. {
  1334. struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
  1335. struct hifn_device *dev;
  1336. dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx);
  1337. dev = ctx->dev;
  1338. dprintk("%s: req: %p, started: %d, sg_num: %d.\n",
  1339. __func__, req, dev->started, atomic_read(&ctx->sg_num));
  1340. if (--dev->started < 0)
  1341. BUG();
  1342. if (atomic_dec_and_test(&ctx->sg_num)) {
  1343. unsigned int nbytes = req->nbytes;
  1344. int idx = 0, err;
  1345. struct scatterlist *dst, *t;
  1346. void *saddr;
  1347. if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
  1348. while (nbytes) {
  1349. t = &ctx->walk.cache[idx];
  1350. dst = &req->dst[idx];
  1351. dprintk("\n%s: sg_page(t): %p, t->length: %u, "
  1352. "sg_page(dst): %p, dst->length: %u, "
  1353. "nbytes: %u.\n",
  1354. __func__, sg_page(t), t->length,
  1355. sg_page(dst), dst->length, nbytes);
  1356. if (!t->length) {
  1357. nbytes -= dst->length;
  1358. idx++;
  1359. continue;
  1360. }
  1361. saddr = kmap_atomic(sg_page(t), KM_IRQ1);
  1362. err = ablkcipher_get(saddr, &t->length, t->offset,
  1363. dst, nbytes, &nbytes);
  1364. if (err < 0) {
  1365. kunmap_atomic(saddr, KM_IRQ1);
  1366. break;
  1367. }
  1368. idx += err;
  1369. kunmap_atomic(saddr, KM_IRQ1);
  1370. }
  1371. ablkcipher_walk_exit(&ctx->walk);
  1372. }
  1373. req->base.complete(&req->base, error);
  1374. }
  1375. }
  1376. static void hifn_check_for_completion(struct hifn_device *dev, int error)
  1377. {
  1378. int i;
  1379. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  1380. for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
  1381. struct hifn_desc *d = &dma->resr[i];
  1382. if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) {
  1383. dev->success++;
  1384. dev->reset = 0;
  1385. hifn_process_ready(dev->sa[i], error);
  1386. dev->sa[i] = NULL;
  1387. }
  1388. if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER))
  1389. if (printk_ratelimit())
  1390. printk("%s: overflow detected [d: %u, o: %u] "
  1391. "at %d resr: l: %08x, p: %08x.\n",
  1392. dev->name,
  1393. !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)),
  1394. !!(d->l & __cpu_to_le32(HIFN_D_OVER)),
  1395. i, d->l, d->p);
  1396. }
  1397. }
  1398. static void hifn_clear_rings(struct hifn_device *dev)
  1399. {
  1400. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  1401. int i, u;
  1402. dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
  1403. "k: %d.%d.%d.%d.\n",
  1404. dev->name,
  1405. dma->cmdi, dma->srci, dma->dsti, dma->resi,
  1406. dma->cmdu, dma->srcu, dma->dstu, dma->resu,
  1407. dma->cmdk, dma->srck, dma->dstk, dma->resk);
  1408. i = dma->resk; u = dma->resu;
  1409. while (u != 0) {
  1410. if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
  1411. break;
  1412. if (i != HIFN_D_RES_RSIZE)
  1413. u--;
  1414. if (++i == (HIFN_D_RES_RSIZE + 1))
  1415. i = 0;
  1416. }
  1417. dma->resk = i; dma->resu = u;
  1418. i = dma->srck; u = dma->srcu;
  1419. while (u != 0) {
  1420. if (i == HIFN_D_SRC_RSIZE)
  1421. i = 0;
  1422. if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
  1423. break;
  1424. i++, u--;
  1425. }
  1426. dma->srck = i; dma->srcu = u;
  1427. i = dma->cmdk; u = dma->cmdu;
  1428. while (u != 0) {
  1429. if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
  1430. break;
  1431. if (i != HIFN_D_CMD_RSIZE)
  1432. u--;
  1433. if (++i == (HIFN_D_CMD_RSIZE + 1))
  1434. i = 0;
  1435. }
  1436. dma->cmdk = i; dma->cmdu = u;
  1437. i = dma->dstk; u = dma->dstu;
  1438. while (u != 0) {
  1439. if (i == HIFN_D_DST_RSIZE)
  1440. i = 0;
  1441. if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
  1442. break;
  1443. i++, u--;
  1444. }
  1445. dma->dstk = i; dma->dstu = u;
  1446. dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
  1447. "k: %d.%d.%d.%d.\n",
  1448. dev->name,
  1449. dma->cmdi, dma->srci, dma->dsti, dma->resi,
  1450. dma->cmdu, dma->srcu, dma->dstu, dma->resu,
  1451. dma->cmdk, dma->srck, dma->dstk, dma->resk);
  1452. }
  1453. static void hifn_work(struct work_struct *work)
  1454. {
  1455. struct delayed_work *dw = container_of(work, struct delayed_work, work);
  1456. struct hifn_device *dev = container_of(dw, struct hifn_device, work);
  1457. unsigned long flags;
  1458. int reset = 0;
  1459. u32 r = 0;
  1460. spin_lock_irqsave(&dev->lock, flags);
  1461. if (dev->active == 0) {
  1462. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  1463. if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
  1464. dev->flags &= ~HIFN_FLAG_CMD_BUSY;
  1465. r |= HIFN_DMACSR_C_CTRL_DIS;
  1466. }
  1467. if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
  1468. dev->flags &= ~HIFN_FLAG_SRC_BUSY;
  1469. r |= HIFN_DMACSR_S_CTRL_DIS;
  1470. }
  1471. if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
  1472. dev->flags &= ~HIFN_FLAG_DST_BUSY;
  1473. r |= HIFN_DMACSR_D_CTRL_DIS;
  1474. }
  1475. if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
  1476. dev->flags &= ~HIFN_FLAG_RES_BUSY;
  1477. r |= HIFN_DMACSR_R_CTRL_DIS;
  1478. }
  1479. if (r)
  1480. hifn_write_1(dev, HIFN_1_DMA_CSR, r);
  1481. } else
  1482. dev->active--;
  1483. if (dev->prev_success == dev->success && dev->started)
  1484. reset = 1;
  1485. dev->prev_success = dev->success;
  1486. spin_unlock_irqrestore(&dev->lock, flags);
  1487. if (reset) {
  1488. dprintk("%s: r: %08x, active: %d, started: %d, "
  1489. "success: %lu: reset: %d.\n",
  1490. dev->name, r, dev->active, dev->started,
  1491. dev->success, reset);
  1492. if (++dev->reset >= 5) {
  1493. dprintk("%s: really hard reset.\n", dev->name);
  1494. hifn_reset_dma(dev, 1);
  1495. hifn_stop_device(dev);
  1496. hifn_start_device(dev);
  1497. dev->reset = 0;
  1498. }
  1499. spin_lock_irqsave(&dev->lock, flags);
  1500. hifn_check_for_completion(dev, -EBUSY);
  1501. hifn_clear_rings(dev);
  1502. dev->started = 0;
  1503. spin_unlock_irqrestore(&dev->lock, flags);
  1504. }
  1505. schedule_delayed_work(&dev->work, HZ);
  1506. }
  1507. static irqreturn_t hifn_interrupt(int irq, void *data)
  1508. {
  1509. struct hifn_device *dev = (struct hifn_device *)data;
  1510. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  1511. u32 dmacsr, restart;
  1512. dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
  1513. dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
  1514. "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
  1515. dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
  1516. dma->cmdu, dma->srcu, dma->dstu, dma->resu,
  1517. dma->cmdi, dma->srci, dma->dsti, dma->resi);
  1518. if ((dmacsr & dev->dmareg) == 0)
  1519. return IRQ_NONE;
  1520. hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
  1521. if (dmacsr & HIFN_DMACSR_ENGINE)
  1522. hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
  1523. if (dmacsr & HIFN_DMACSR_PUBDONE)
  1524. hifn_write_1(dev, HIFN_1_PUB_STATUS,
  1525. hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
  1526. restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
  1527. if (restart) {
  1528. u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
  1529. if (printk_ratelimit())
  1530. printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
  1531. dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
  1532. !!(dmacsr & HIFN_DMACSR_D_OVER),
  1533. puisr, !!(puisr & HIFN_PUISR_DSTOVER));
  1534. if (!!(puisr & HIFN_PUISR_DSTOVER))
  1535. hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
  1536. hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
  1537. HIFN_DMACSR_D_OVER));
  1538. }
  1539. restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
  1540. HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
  1541. if (restart) {
  1542. if (printk_ratelimit())
  1543. printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
  1544. dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
  1545. !!(dmacsr & HIFN_DMACSR_S_ABORT),
  1546. !!(dmacsr & HIFN_DMACSR_D_ABORT),
  1547. !!(dmacsr & HIFN_DMACSR_R_ABORT));
  1548. hifn_reset_dma(dev, 1);
  1549. hifn_init_dma(dev);
  1550. hifn_init_registers(dev);
  1551. }
  1552. if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
  1553. dprintk("%s: wait on command.\n", dev->name);
  1554. dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
  1555. hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
  1556. }
  1557. hifn_check_for_completion(dev, 0);
  1558. hifn_clear_rings(dev);
  1559. return IRQ_HANDLED;
  1560. }
  1561. static void hifn_flush(struct hifn_device *dev)
  1562. {
  1563. unsigned long flags;
  1564. struct crypto_async_request *async_req;
  1565. struct hifn_context *ctx;
  1566. struct ablkcipher_request *req;
  1567. struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
  1568. int i;
  1569. spin_lock_irqsave(&dev->lock, flags);
  1570. for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
  1571. struct hifn_desc *d = &dma->resr[i];
  1572. if (dev->sa[i]) {
  1573. hifn_process_ready(dev->sa[i],
  1574. (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
  1575. }
  1576. }
  1577. while ((async_req = crypto_dequeue_request(&dev->queue))) {
  1578. ctx = crypto_tfm_ctx(async_req->tfm);
  1579. req = container_of(async_req, struct ablkcipher_request, base);
  1580. hifn_process_ready(req, -ENODEV);
  1581. }
  1582. spin_unlock_irqrestore(&dev->lock, flags);
  1583. }
  1584. static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  1585. unsigned int len)
  1586. {
  1587. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  1588. struct hifn_context *ctx = crypto_tfm_ctx(tfm);
  1589. struct hifn_device *dev = ctx->dev;
  1590. if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
  1591. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1592. return -1;
  1593. }
  1594. if (len == HIFN_DES_KEY_LENGTH) {
  1595. u32 tmp[DES_EXPKEY_WORDS];
  1596. int ret = des_ekey(tmp, key);
  1597. if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  1598. tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  1599. return -EINVAL;
  1600. }
  1601. }
  1602. dev->flags &= ~HIFN_FLAG_OLD_KEY;
  1603. memcpy(ctx->key, key, len);
  1604. ctx->keysize = len;
  1605. return 0;
  1606. }
  1607. static int hifn_handle_req(struct ablkcipher_request *req)
  1608. {
  1609. struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
  1610. struct hifn_device *dev = ctx->dev;
  1611. int err = -EAGAIN;
  1612. if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
  1613. err = hifn_setup_session(req);
  1614. if (err == -EAGAIN) {
  1615. unsigned long flags;
  1616. spin_lock_irqsave(&dev->lock, flags);
  1617. err = ablkcipher_enqueue_request(&dev->queue, req);
  1618. spin_unlock_irqrestore(&dev->lock, flags);
  1619. }
  1620. return err;
  1621. }
  1622. static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
  1623. u8 type, u8 mode)
  1624. {
  1625. struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
  1626. unsigned ivsize;
  1627. ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
  1628. if (req->info && mode != ACRYPTO_MODE_ECB) {
  1629. if (type == ACRYPTO_TYPE_AES_128)
  1630. ivsize = HIFN_AES_IV_LENGTH;
  1631. else if (type == ACRYPTO_TYPE_DES)
  1632. ivsize = HIFN_DES_KEY_LENGTH;
  1633. else if (type == ACRYPTO_TYPE_3DES)
  1634. ivsize = HIFN_3DES_KEY_LENGTH;
  1635. }
  1636. if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
  1637. if (ctx->keysize == 24)
  1638. type = ACRYPTO_TYPE_AES_192;
  1639. else if (ctx->keysize == 32)
  1640. type = ACRYPTO_TYPE_AES_256;
  1641. }
  1642. ctx->op = op;
  1643. ctx->mode = mode;
  1644. ctx->type = type;
  1645. ctx->iv = req->info;
  1646. ctx->ivsize = ivsize;
  1647. /*
  1648. * HEAVY TODO: needs to kick Herbert XU to write documentation.
  1649. * HEAVY TODO: needs to kick Herbert XU to write documentation.
  1650. * HEAVY TODO: needs to kick Herbert XU to write documentation.
  1651. */
  1652. return hifn_handle_req(req);
  1653. }
  1654. static int hifn_process_queue(struct hifn_device *dev)
  1655. {
  1656. struct crypto_async_request *async_req;
  1657. struct hifn_context *ctx;
  1658. struct ablkcipher_request *req;
  1659. unsigned long flags;
  1660. int err = 0;
  1661. while (dev->started < HIFN_QUEUE_LENGTH) {
  1662. spin_lock_irqsave(&dev->lock, flags);
  1663. async_req = crypto_dequeue_request(&dev->queue);
  1664. spin_unlock_irqrestore(&dev->lock, flags);
  1665. if (!async_req)
  1666. break;
  1667. ctx = crypto_tfm_ctx(async_req->tfm);
  1668. req = container_of(async_req, struct ablkcipher_request, base);
  1669. err = hifn_handle_req(req);
  1670. if (err)
  1671. break;
  1672. }
  1673. return err;
  1674. }
  1675. static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
  1676. u8 type, u8 mode)
  1677. {
  1678. int err;
  1679. struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
  1680. struct hifn_device *dev = ctx->dev;
  1681. err = hifn_setup_crypto_req(req, op, type, mode);
  1682. if (err)
  1683. return err;
  1684. if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
  1685. err = hifn_process_queue(dev);
  1686. return err;
  1687. }
  1688. /*
  1689. * AES ecryption functions.
  1690. */
  1691. static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
  1692. {
  1693. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1694. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
  1695. }
  1696. static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
  1697. {
  1698. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1699. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
  1700. }
  1701. static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
  1702. {
  1703. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1704. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
  1705. }
  1706. static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
  1707. {
  1708. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1709. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
  1710. }
  1711. /*
  1712. * AES decryption functions.
  1713. */
  1714. static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
  1715. {
  1716. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1717. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
  1718. }
  1719. static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
  1720. {
  1721. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1722. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
  1723. }
  1724. static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
  1725. {
  1726. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1727. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
  1728. }
  1729. static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
  1730. {
  1731. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1732. ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
  1733. }
  1734. /*
  1735. * DES ecryption functions.
  1736. */
  1737. static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
  1738. {
  1739. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1740. ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
  1741. }
  1742. static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
  1743. {
  1744. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1745. ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
  1746. }
  1747. static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
  1748. {
  1749. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1750. ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
  1751. }
  1752. static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
  1753. {
  1754. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1755. ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
  1756. }
  1757. /*
  1758. * DES decryption functions.
  1759. */
  1760. static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
  1761. {
  1762. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1763. ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
  1764. }
  1765. static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
  1766. {
  1767. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1768. ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
  1769. }
  1770. static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
  1771. {
  1772. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1773. ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
  1774. }
  1775. static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
  1776. {
  1777. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1778. ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
  1779. }
  1780. /*
  1781. * 3DES ecryption functions.
  1782. */
  1783. static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
  1784. {
  1785. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1786. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
  1787. }
  1788. static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
  1789. {
  1790. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1791. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
  1792. }
  1793. static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
  1794. {
  1795. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1796. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
  1797. }
  1798. static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
  1799. {
  1800. return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
  1801. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
  1802. }
  1803. /*
  1804. * 3DES decryption functions.
  1805. */
  1806. static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
  1807. {
  1808. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1809. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
  1810. }
  1811. static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
  1812. {
  1813. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1814. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
  1815. }
  1816. static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
  1817. {
  1818. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1819. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
  1820. }
  1821. static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
  1822. {
  1823. return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
  1824. ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
  1825. }
  1826. struct hifn_alg_template
  1827. {
  1828. char name[CRYPTO_MAX_ALG_NAME];
  1829. char drv_name[CRYPTO_MAX_ALG_NAME];
  1830. unsigned int bsize;
  1831. struct ablkcipher_alg ablkcipher;
  1832. };
  1833. static struct hifn_alg_template hifn_alg_templates[] = {
  1834. /*
  1835. * 3DES ECB, CBC, CFB and OFB modes.
  1836. */
  1837. {
  1838. .name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
  1839. .ablkcipher = {
  1840. .min_keysize = HIFN_3DES_KEY_LENGTH,
  1841. .max_keysize = HIFN_3DES_KEY_LENGTH,
  1842. .setkey = hifn_setkey,
  1843. .encrypt = hifn_encrypt_3des_cfb,
  1844. .decrypt = hifn_decrypt_3des_cfb,
  1845. },
  1846. },
  1847. {
  1848. .name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
  1849. .ablkcipher = {
  1850. .min_keysize = HIFN_3DES_KEY_LENGTH,
  1851. .max_keysize = HIFN_3DES_KEY_LENGTH,
  1852. .setkey = hifn_setkey,
  1853. .encrypt = hifn_encrypt_3des_ofb,
  1854. .decrypt = hifn_decrypt_3des_ofb,
  1855. },
  1856. },
  1857. {
  1858. .name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
  1859. .ablkcipher = {
  1860. .min_keysize = HIFN_3DES_KEY_LENGTH,
  1861. .max_keysize = HIFN_3DES_KEY_LENGTH,
  1862. .setkey = hifn_setkey,
  1863. .encrypt = hifn_encrypt_3des_cbc,
  1864. .decrypt = hifn_decrypt_3des_cbc,
  1865. },
  1866. },
  1867. {
  1868. .name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
  1869. .ablkcipher = {
  1870. .min_keysize = HIFN_3DES_KEY_LENGTH,
  1871. .max_keysize = HIFN_3DES_KEY_LENGTH,
  1872. .setkey = hifn_setkey,
  1873. .encrypt = hifn_encrypt_3des_ecb,
  1874. .decrypt = hifn_decrypt_3des_ecb,
  1875. },
  1876. },
  1877. /*
  1878. * DES ECB, CBC, CFB and OFB modes.
  1879. */
  1880. {
  1881. .name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8,
  1882. .ablkcipher = {
  1883. .min_keysize = HIFN_DES_KEY_LENGTH,
  1884. .max_keysize = HIFN_DES_KEY_LENGTH,
  1885. .setkey = hifn_setkey,
  1886. .encrypt = hifn_encrypt_des_cfb,
  1887. .decrypt = hifn_decrypt_des_cfb,
  1888. },
  1889. },
  1890. {
  1891. .name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8,
  1892. .ablkcipher = {
  1893. .min_keysize = HIFN_DES_KEY_LENGTH,
  1894. .max_keysize = HIFN_DES_KEY_LENGTH,
  1895. .setkey = hifn_setkey,
  1896. .encrypt = hifn_encrypt_des_ofb,
  1897. .decrypt = hifn_decrypt_des_ofb,
  1898. },
  1899. },
  1900. {
  1901. .name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8,
  1902. .ablkcipher = {
  1903. .min_keysize = HIFN_DES_KEY_LENGTH,
  1904. .max_keysize = HIFN_DES_KEY_LENGTH,
  1905. .setkey = hifn_setkey,
  1906. .encrypt = hifn_encrypt_des_cbc,
  1907. .decrypt = hifn_decrypt_des_cbc,
  1908. },
  1909. },
  1910. {
  1911. .name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8,
  1912. .ablkcipher = {
  1913. .min_keysize = HIFN_DES_KEY_LENGTH,
  1914. .max_keysize = HIFN_DES_KEY_LENGTH,
  1915. .setkey = hifn_setkey,
  1916. .encrypt = hifn_encrypt_des_ecb,
  1917. .decrypt = hifn_decrypt_des_ecb,
  1918. },
  1919. },
  1920. /*
  1921. * AES ECB, CBC, CFB and OFB modes.
  1922. */
  1923. {
  1924. .name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16,
  1925. .ablkcipher = {
  1926. .min_keysize = AES_MIN_KEY_SIZE,
  1927. .max_keysize = AES_MAX_KEY_SIZE,
  1928. .setkey = hifn_setkey,
  1929. .encrypt = hifn_encrypt_aes_ecb,
  1930. .decrypt = hifn_decrypt_aes_ecb,
  1931. },
  1932. },
  1933. {
  1934. .name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16,
  1935. .ablkcipher = {
  1936. .min_keysize = AES_MIN_KEY_SIZE,
  1937. .max_keysize = AES_MAX_KEY_SIZE,
  1938. .setkey = hifn_setkey,
  1939. .encrypt = hifn_encrypt_aes_cbc,
  1940. .decrypt = hifn_decrypt_aes_cbc,
  1941. },
  1942. },
  1943. {
  1944. .name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16,
  1945. .ablkcipher = {
  1946. .min_keysize = AES_MIN_KEY_SIZE,
  1947. .max_keysize = AES_MAX_KEY_SIZE,
  1948. .setkey = hifn_setkey,
  1949. .encrypt = hifn_encrypt_aes_cfb,
  1950. .decrypt = hifn_decrypt_aes_cfb,
  1951. },
  1952. },
  1953. {
  1954. .name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16,
  1955. .ablkcipher = {
  1956. .min_keysize = AES_MIN_KEY_SIZE,
  1957. .max_keysize = AES_MAX_KEY_SIZE,
  1958. .setkey = hifn_setkey,
  1959. .encrypt = hifn_encrypt_aes_ofb,
  1960. .decrypt = hifn_decrypt_aes_ofb,
  1961. },
  1962. },
  1963. };
  1964. static int hifn_cra_init(struct crypto_tfm *tfm)
  1965. {
  1966. struct crypto_alg *alg = tfm->__crt_alg;
  1967. struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
  1968. struct hifn_context *ctx = crypto_tfm_ctx(tfm);
  1969. ctx->dev = ha->dev;
  1970. return 0;
  1971. }
  1972. static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
  1973. {
  1974. struct hifn_crypto_alg *alg;
  1975. int err;
  1976. alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
  1977. if (!alg)
  1978. return -ENOMEM;
  1979. snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
  1980. snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name);
  1981. alg->alg.cra_priority = 300;
  1982. alg->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC;
  1983. alg->alg.cra_blocksize = t->bsize;
  1984. alg->alg.cra_ctxsize = sizeof(struct hifn_context);
  1985. alg->alg.cra_alignmask = 15;
  1986. if (t->bsize == 8)
  1987. alg->alg.cra_alignmask = 3;
  1988. alg->alg.cra_type = &crypto_ablkcipher_type;
  1989. alg->alg.cra_module = THIS_MODULE;
  1990. alg->alg.cra_u.ablkcipher = t->ablkcipher;
  1991. alg->alg.cra_init = hifn_cra_init;
  1992. alg->dev = dev;
  1993. list_add_tail(&alg->entry, &dev->alg_list);
  1994. err = crypto_register_alg(&alg->alg);
  1995. if (err) {
  1996. list_del(&alg->entry);
  1997. kfree(alg);
  1998. }
  1999. return err;
  2000. }
  2001. static void hifn_unregister_alg(struct hifn_device *dev)
  2002. {
  2003. struct hifn_crypto_alg *a, *n;
  2004. list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
  2005. list_del(&a->entry);
  2006. crypto_unregister_alg(&a->alg);
  2007. kfree(a);
  2008. }
  2009. }
  2010. static int hifn_register_alg(struct hifn_device *dev)
  2011. {
  2012. int i, err;
  2013. for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
  2014. err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
  2015. if (err)
  2016. goto err_out_exit;
  2017. }
  2018. return 0;
  2019. err_out_exit:
  2020. hifn_unregister_alg(dev);
  2021. return err;
  2022. }
  2023. static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  2024. {
  2025. int err, i;
  2026. struct hifn_device *dev;
  2027. char name[8];
  2028. err = pci_enable_device(pdev);
  2029. if (err)
  2030. return err;
  2031. pci_set_master(pdev);
  2032. err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  2033. if (err)
  2034. goto err_out_disable_pci_device;
  2035. snprintf(name, sizeof(name), "hifn%d",
  2036. atomic_inc_return(&hifn_dev_number)-1);
  2037. err = pci_request_regions(pdev, name);
  2038. if (err)
  2039. goto err_out_disable_pci_device;
  2040. if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
  2041. pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
  2042. pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
  2043. dprintk("%s: Broken hardware - I/O regions are too small.\n",
  2044. pci_name(pdev));
  2045. err = -ENODEV;
  2046. goto err_out_free_regions;
  2047. }
  2048. dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
  2049. GFP_KERNEL);
  2050. if (!dev) {
  2051. err = -ENOMEM;
  2052. goto err_out_free_regions;
  2053. }
  2054. INIT_LIST_HEAD(&dev->alg_list);
  2055. snprintf(dev->name, sizeof(dev->name), "%s", name);
  2056. spin_lock_init(&dev->lock);
  2057. for (i=0; i<3; ++i) {
  2058. unsigned long addr, size;
  2059. addr = pci_resource_start(pdev, i);
  2060. size = pci_resource_len(pdev, i);
  2061. dev->bar[i] = ioremap_nocache(addr, size);
  2062. if (!dev->bar[i])
  2063. goto err_out_unmap_bars;
  2064. }
  2065. dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER);
  2066. if (!dev->result_mem) {
  2067. dprintk("Failed to allocate %d pages for result_mem.\n",
  2068. HIFN_MAX_RESULT_ORDER);
  2069. goto err_out_unmap_bars;
  2070. }
  2071. memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER));
  2072. dev->dst = pci_map_single(pdev, (void *)dev->result_mem,
  2073. PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE);
  2074. dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
  2075. &dev->desc_dma);
  2076. if (!dev->desc_virt) {
  2077. dprintk("Failed to allocate descriptor rings.\n");
  2078. goto err_out_free_result_pages;
  2079. }
  2080. memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
  2081. dev->pdev = pdev;
  2082. dev->irq = pdev->irq;
  2083. for (i=0; i<HIFN_D_RES_RSIZE; ++i)
  2084. dev->sa[i] = NULL;
  2085. pci_set_drvdata(pdev, dev);
  2086. crypto_init_queue(&dev->queue, 1);
  2087. err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
  2088. if (err) {
  2089. dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
  2090. dev->irq = 0;
  2091. goto err_out_free_desc;
  2092. }
  2093. err = hifn_start_device(dev);
  2094. if (err)
  2095. goto err_out_free_irq;
  2096. err = hifn_test(dev, 1, 0);
  2097. if (err)
  2098. goto err_out_stop_device;
  2099. err = hifn_register_alg(dev);
  2100. if (err)
  2101. goto err_out_stop_device;
  2102. INIT_DELAYED_WORK(&dev->work, hifn_work);
  2103. schedule_delayed_work(&dev->work, HZ);
  2104. dprintk("HIFN crypto accelerator card at %s has been "
  2105. "successfully registered as %s.\n",
  2106. pci_name(pdev), dev->name);
  2107. return 0;
  2108. err_out_stop_device:
  2109. hifn_reset_dma(dev, 1);
  2110. hifn_stop_device(dev);
  2111. err_out_free_irq:
  2112. free_irq(dev->irq, dev->name);
  2113. err_out_free_desc:
  2114. pci_free_consistent(pdev, sizeof(struct hifn_dma),
  2115. dev->desc_virt, dev->desc_dma);
  2116. err_out_free_result_pages:
  2117. pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
  2118. PCI_DMA_FROMDEVICE);
  2119. free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
  2120. err_out_unmap_bars:
  2121. for (i=0; i<3; ++i)
  2122. if (dev->bar[i])
  2123. iounmap(dev->bar[i]);
  2124. err_out_free_regions:
  2125. pci_release_regions(pdev);
  2126. err_out_disable_pci_device:
  2127. pci_disable_device(pdev);
  2128. return err;
  2129. }
  2130. static void hifn_remove(struct pci_dev *pdev)
  2131. {
  2132. int i;
  2133. struct hifn_device *dev;
  2134. dev = pci_get_drvdata(pdev);
  2135. if (dev) {
  2136. cancel_delayed_work(&dev->work);
  2137. flush_scheduled_work();
  2138. hifn_unregister_alg(dev);
  2139. hifn_reset_dma(dev, 1);
  2140. hifn_stop_device(dev);
  2141. free_irq(dev->irq, dev->name);
  2142. hifn_flush(dev);
  2143. pci_free_consistent(pdev, sizeof(struct hifn_dma),
  2144. dev->desc_virt, dev->desc_dma);
  2145. pci_unmap_single(pdev, dev->dst,
  2146. PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
  2147. PCI_DMA_FROMDEVICE);
  2148. free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
  2149. for (i=0; i<3; ++i)
  2150. if (dev->bar[i])
  2151. iounmap(dev->bar[i]);
  2152. kfree(dev);
  2153. }
  2154. pci_release_regions(pdev);
  2155. pci_disable_device(pdev);
  2156. }
  2157. static struct pci_device_id hifn_pci_tbl[] = {
  2158. { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
  2159. { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
  2160. { 0 }
  2161. };
  2162. MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
  2163. static struct pci_driver hifn_pci_driver = {
  2164. .name = "hifn795x",
  2165. .id_table = hifn_pci_tbl,
  2166. .probe = hifn_probe,
  2167. .remove = __devexit_p(hifn_remove),
  2168. };
  2169. static int __devinit hifn_init(void)
  2170. {
  2171. int err;
  2172. err = pci_register_driver(&hifn_pci_driver);
  2173. if (err < 0) {
  2174. dprintk("Failed to register PCI driver for %s device.\n",
  2175. hifn_pci_driver.name);
  2176. return -ENODEV;
  2177. }
  2178. printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
  2179. "has been successfully registered.\n");
  2180. return 0;
  2181. }
  2182. static void __devexit hifn_fini(void)
  2183. {
  2184. pci_unregister_driver(&hifn_pci_driver);
  2185. printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
  2186. "has been successfully unregistered.\n");
  2187. }
  2188. module_init(hifn_init);
  2189. module_exit(hifn_fini);
  2190. MODULE_LICENSE("GPL");
  2191. MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
  2192. MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");