z90main.c 89 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359
  1. /*
  2. * linux/drivers/s390/crypto/z90main.c
  3. *
  4. * z90crypt 1.3.2
  5. *
  6. * Copyright (C) 2001, 2004 IBM Corporation
  7. * Author(s): Robert Burroughs (burrough@us.ibm.com)
  8. * Eric Rossman (edrossma@us.ibm.com)
  9. *
  10. * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. #include <asm/uaccess.h> // copy_(from|to)_user
  27. #include <linux/compat.h>
  28. #include <linux/compiler.h>
  29. #include <linux/delay.h> // mdelay
  30. #include <linux/init.h>
  31. #include <linux/interrupt.h> // for tasklets
  32. #include <linux/ioctl32.h>
  33. #include <linux/miscdevice.h>
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/kobject_uevent.h>
  37. #include <linux/proc_fs.h>
  38. #include <linux/syscalls.h>
  39. #include <linux/version.h>
  40. #include "z90crypt.h"
  41. #include "z90common.h"
  42. #define VERSION_Z90MAIN_C "$Revision: 1.62 $"
  43. static char z90main_version[] __initdata =
  44. "z90main.o (" VERSION_Z90MAIN_C "/"
  45. VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
  46. extern char z90hardware_version[];
  47. /**
  48. * Defaults that may be modified.
  49. */
  50. /**
  51. * You can specify a different minor at compile time.
  52. */
  53. #ifndef Z90CRYPT_MINOR
  54. #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
  55. #endif
  56. /**
  57. * You can specify a different domain at compile time or on the insmod
  58. * command line.
  59. */
  60. #ifndef DOMAIN_INDEX
  61. #define DOMAIN_INDEX -1
  62. #endif
  63. /**
  64. * This is the name under which the device is registered in /proc/modules.
  65. */
  66. #define REG_NAME "z90crypt"
  67. /**
  68. * Cleanup should run every CLEANUPTIME seconds and should clean up requests
  69. * older than CLEANUPTIME seconds in the past.
  70. */
  71. #ifndef CLEANUPTIME
  72. #define CLEANUPTIME 15
  73. #endif
  74. /**
  75. * Config should run every CONFIGTIME seconds
  76. */
  77. #ifndef CONFIGTIME
  78. #define CONFIGTIME 30
  79. #endif
  80. /**
  81. * The first execution of the config task should take place
  82. * immediately after initialization
  83. */
  84. #ifndef INITIAL_CONFIGTIME
  85. #define INITIAL_CONFIGTIME 1
  86. #endif
  87. /**
  88. * Reader should run every READERTIME milliseconds
  89. * With the 100Hz patch for s390, z90crypt can lock the system solid while
  90. * under heavy load. We'll try to avoid that.
  91. */
  92. #ifndef READERTIME
  93. #if HZ > 1000
  94. #define READERTIME 2
  95. #else
  96. #define READERTIME 10
  97. #endif
  98. #endif
  99. /**
  100. * turn long device array index into device pointer
  101. */
  102. #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
  103. /**
  104. * turn short device array index into long device array index
  105. */
  106. #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
  107. /**
  108. * turn short device array index into device pointer
  109. */
  110. #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
  111. /**
  112. * Status for a work-element
  113. */
  114. #define STAT_DEFAULT 0x00 // request has not been processed
  115. #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
  116. // else, device is determined each write
  117. #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
  118. // before being sent to the hardware.
  119. #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
  120. // 0x20 // UNUSED state
  121. #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
  122. #define STAT_NOWORK 0x00 // bits off: no work on any queue
  123. #define STAT_RDWRMASK 0x30 // mask for bits 5-4
  124. /**
  125. * Macros to check the status RDWRMASK
  126. */
  127. #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
  128. #define SET_RDWRMASK(statbyte, newval) \
  129. {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
  130. /**
  131. * Audit Trail. Progress of a Work element
  132. * audit[0]: Unless noted otherwise, these bits are all set by the process
  133. */
  134. #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
  135. #define FP_BUFFREQ 0x40 // Low Level buffer requested
  136. #define FP_BUFFGOT 0x20 // Low Level buffer obtained
  137. #define FP_SENT 0x10 // Work element sent to a crypto device
  138. // (may be set by process or by reader task)
  139. #define FP_PENDING 0x08 // Work element placed on pending queue
  140. // (may be set by process or by reader task)
  141. #define FP_REQUEST 0x04 // Work element placed on request queue
  142. #define FP_ASLEEP 0x02 // Work element about to sleep
  143. #define FP_AWAKE 0x01 // Work element has been awakened
  144. /**
  145. * audit[1]: These bits are set by the reader task and/or the cleanup task
  146. */
  147. #define FP_NOTPENDING 0x80 // Work element removed from pending queue
  148. #define FP_AWAKENING 0x40 // Caller about to be awakened
  149. #define FP_TIMEDOUT 0x20 // Caller timed out
  150. #define FP_RESPSIZESET 0x10 // Response size copied to work element
  151. #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
  152. #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
  153. #define FP_REMREQUEST 0x02 // Work element removed from request queue
  154. #define FP_SIGNALED 0x01 // Work element was awakened by a signal
  155. /**
  156. * audit[2]: unused
  157. */
  158. /**
  159. * state of the file handle in private_data.status
  160. */
  161. #define STAT_OPEN 0
  162. #define STAT_CLOSED 1
  163. /**
  164. * PID() expands to the process ID of the current process
  165. */
  166. #define PID() (current->pid)
  167. /**
  168. * Selected Constants. The number of APs and the number of devices
  169. */
  170. #ifndef Z90CRYPT_NUM_APS
  171. #define Z90CRYPT_NUM_APS 64
  172. #endif
  173. #ifndef Z90CRYPT_NUM_DEVS
  174. #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
  175. #endif
  176. /**
  177. * Buffer size for receiving responses. The maximum Response Size
  178. * is actually the maximum request size, since in an error condition
  179. * the request itself may be returned unchanged.
  180. */
  181. #define MAX_RESPONSE_SIZE 0x0000077C
  182. /**
  183. * A count and status-byte mask
  184. */
  185. struct status {
  186. int st_count; // # of enabled devices
  187. int disabled_count; // # of disabled devices
  188. int user_disabled_count; // # of devices disabled via proc fs
  189. unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
  190. };
  191. /**
  192. * The array of device indexes is a mechanism for fast indexing into
  193. * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
  194. * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
  195. * z90CDeviceIndex[2] is 47.
  196. */
  197. struct device_x {
  198. int device_index[Z90CRYPT_NUM_DEVS];
  199. };
  200. /**
  201. * All devices are arranged in a single array: 64 APs
  202. */
  203. struct device {
  204. int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
  205. // PCIXCC_MCL3, CEX2C
  206. enum devstat dev_stat; // current device status
  207. int dev_self_x; // Index in array
  208. int disabled; // Set when device is in error
  209. int user_disabled; // Set when device is disabled by user
  210. int dev_q_depth; // q depth
  211. unsigned char * dev_resp_p; // Response buffer address
  212. int dev_resp_l; // Response Buffer length
  213. int dev_caller_count; // Number of callers
  214. int dev_total_req_cnt; // # requests for device since load
  215. struct list_head dev_caller_list; // List of callers
  216. };
  217. /**
  218. * There's a struct status and a struct device_x for each device type.
  219. */
  220. struct hdware_block {
  221. struct status hdware_mask;
  222. struct status type_mask[Z90CRYPT_NUM_TYPES];
  223. struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
  224. unsigned char device_type_array[Z90CRYPT_NUM_APS];
  225. };
  226. /**
  227. * z90crypt is the topmost data structure in the hierarchy.
  228. */
  229. struct z90crypt {
  230. int max_count; // Nr of possible crypto devices
  231. struct status mask;
  232. int q_depth_array[Z90CRYPT_NUM_DEVS];
  233. int dev_type_array[Z90CRYPT_NUM_DEVS];
  234. struct device_x overall_device_x; // array device indexes
  235. struct device * device_p[Z90CRYPT_NUM_DEVS];
  236. int terminating;
  237. int domain_established;// TRUE: domain has been found
  238. int cdx; // Crypto Domain Index
  239. int len; // Length of this data structure
  240. struct hdware_block *hdware_info;
  241. };
  242. /**
  243. * An array of these structures is pointed to from dev_caller
  244. * The length of the array depends on the device type. For APs,
  245. * there are 8.
  246. *
  247. * The caller buffer is allocated to the user at OPEN. At WRITE,
  248. * it contains the request; at READ, the response. The function
  249. * send_to_crypto_device converts the request to device-dependent
  250. * form and use the caller's OPEN-allocated buffer for the response.
  251. *
  252. * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
  253. * because that points to it, see the discussion in z90hardware.c.
  254. * Search for "extended request message block".
  255. */
  256. struct caller {
  257. int caller_buf_l; // length of original request
  258. unsigned char * caller_buf_p; // Original request on WRITE
  259. int caller_dev_dep_req_l; // len device dependent request
  260. unsigned char * caller_dev_dep_req_p; // Device dependent form
  261. unsigned char caller_id[8]; // caller-supplied message id
  262. struct list_head caller_liste;
  263. unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
  264. };
  265. /**
  266. * Function prototypes from z90hardware.c
  267. */
  268. enum hdstat query_online(int, int, int, int *, int *);
  269. enum devstat reset_device(int, int, int);
  270. enum devstat send_to_AP(int, int, int, unsigned char *);
  271. enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
  272. int convert_request(unsigned char *, int, short, int, int, int *,
  273. unsigned char *);
  274. int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
  275. /**
  276. * Low level function prototypes
  277. */
  278. static int create_z90crypt(int *);
  279. static int refresh_z90crypt(int *);
  280. static int find_crypto_devices(struct status *);
  281. static int create_crypto_device(int);
  282. static int destroy_crypto_device(int);
  283. static void destroy_z90crypt(void);
  284. static int refresh_index_array(struct status *, struct device_x *);
  285. static int probe_device_type(struct device *);
  286. static int probe_PCIXCC_type(struct device *);
  287. /**
  288. * proc fs definitions
  289. */
  290. static struct proc_dir_entry *z90crypt_entry;
  291. /**
  292. * data structures
  293. */
  294. /**
  295. * work_element.opener points back to this structure
  296. */
  297. struct priv_data {
  298. pid_t opener_pid;
  299. unsigned char status; // 0: open 1: closed
  300. };
  301. /**
  302. * A work element is allocated for each request
  303. */
  304. struct work_element {
  305. struct priv_data *priv_data;
  306. pid_t pid;
  307. int devindex; // index of device processing this w_e
  308. // (If request did not specify device,
  309. // -1 until placed onto a queue)
  310. int devtype;
  311. struct list_head liste; // used for requestq and pendingq
  312. char buffer[128]; // local copy of user request
  313. int buff_size; // size of the buffer for the request
  314. char resp_buff[RESPBUFFSIZE];
  315. int resp_buff_size;
  316. char __user * resp_addr; // address of response in user space
  317. unsigned int funccode; // function code of request
  318. wait_queue_head_t waitq;
  319. unsigned long requestsent; // time at which the request was sent
  320. atomic_t alarmrung; // wake-up signal
  321. unsigned char caller_id[8]; // pid + counter, for this w_e
  322. unsigned char status[1]; // bits to mark status of the request
  323. unsigned char audit[3]; // record of work element's progress
  324. unsigned char * requestptr; // address of request buffer
  325. int retcode; // return code of request
  326. };
  327. /**
  328. * High level function prototypes
  329. */
  330. static int z90crypt_open(struct inode *, struct file *);
  331. static int z90crypt_release(struct inode *, struct file *);
  332. static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
  333. static ssize_t z90crypt_write(struct file *, const char __user *,
  334. size_t, loff_t *);
  335. static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
  336. static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
  337. static void z90crypt_reader_task(unsigned long);
  338. static void z90crypt_schedule_reader_task(unsigned long);
  339. static void z90crypt_config_task(unsigned long);
  340. static void z90crypt_cleanup_task(unsigned long);
  341. static int z90crypt_status(char *, char **, off_t, int, int *, void *);
  342. static int z90crypt_status_write(struct file *, const char __user *,
  343. unsigned long, void *);
  344. /**
  345. * Storage allocated at initialization and used throughout the life of
  346. * this insmod
  347. */
  348. static int domain = DOMAIN_INDEX;
  349. static struct z90crypt z90crypt;
  350. static int quiesce_z90crypt;
  351. static spinlock_t queuespinlock;
  352. static struct list_head request_list;
  353. static int requestq_count;
  354. static struct list_head pending_list;
  355. static int pendingq_count;
  356. static struct tasklet_struct reader_tasklet;
  357. static struct timer_list reader_timer;
  358. static struct timer_list config_timer;
  359. static struct timer_list cleanup_timer;
  360. static atomic_t total_open;
  361. static atomic_t z90crypt_step;
  362. static struct file_operations z90crypt_fops = {
  363. .owner = THIS_MODULE,
  364. .read = z90crypt_read,
  365. .write = z90crypt_write,
  366. .unlocked_ioctl = z90crypt_unlocked_ioctl,
  367. #ifdef CONFIG_COMPAT
  368. .compat_ioctl = z90crypt_compat_ioctl,
  369. #endif
  370. .open = z90crypt_open,
  371. .release = z90crypt_release
  372. };
  373. static struct miscdevice z90crypt_misc_device = {
  374. .minor = Z90CRYPT_MINOR,
  375. .name = DEV_NAME,
  376. .fops = &z90crypt_fops,
  377. .devfs_name = DEV_NAME
  378. };
  379. /**
  380. * Documentation values.
  381. */
  382. MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
  383. "and Jochen Roehrig");
  384. MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
  385. "Copyright 2001, 2004 IBM Corporation");
  386. MODULE_LICENSE("GPL");
  387. module_param(domain, int, 0);
  388. MODULE_PARM_DESC(domain, "domain index for device");
  389. #ifdef CONFIG_COMPAT
  390. /**
  391. * ioctl32 conversion routines
  392. */
  393. struct ica_rsa_modexpo_32 { // For 32-bit callers
  394. compat_uptr_t inputdata;
  395. unsigned int inputdatalength;
  396. compat_uptr_t outputdata;
  397. unsigned int outputdatalength;
  398. compat_uptr_t b_key;
  399. compat_uptr_t n_modulus;
  400. };
  401. static long
  402. trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
  403. {
  404. struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
  405. struct ica_rsa_modexpo_32 mex32k;
  406. struct ica_rsa_modexpo __user *mex64;
  407. long ret = 0;
  408. unsigned int i;
  409. if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
  410. return -EFAULT;
  411. mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
  412. if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
  413. return -EFAULT;
  414. if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
  415. return -EFAULT;
  416. if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
  417. __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
  418. __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
  419. __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
  420. __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
  421. __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
  422. return -EFAULT;
  423. ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
  424. if (!ret)
  425. if (__get_user(i, &mex64->outputdatalength) ||
  426. __put_user(i, &mex32u->outputdatalength))
  427. ret = -EFAULT;
  428. return ret;
  429. }
  430. struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
  431. compat_uptr_t inputdata;
  432. unsigned int inputdatalength;
  433. compat_uptr_t outputdata;
  434. unsigned int outputdatalength;
  435. compat_uptr_t bp_key;
  436. compat_uptr_t bq_key;
  437. compat_uptr_t np_prime;
  438. compat_uptr_t nq_prime;
  439. compat_uptr_t u_mult_inv;
  440. };
  441. static long
  442. trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
  443. {
  444. struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
  445. struct ica_rsa_modexpo_crt_32 crt32k;
  446. struct ica_rsa_modexpo_crt __user *crt64;
  447. long ret = 0;
  448. unsigned int i;
  449. if (!access_ok(VERIFY_WRITE, crt32u,
  450. sizeof(struct ica_rsa_modexpo_crt_32)))
  451. return -EFAULT;
  452. crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
  453. if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
  454. return -EFAULT;
  455. if (copy_from_user(&crt32k, crt32u,
  456. sizeof(struct ica_rsa_modexpo_crt_32)))
  457. return -EFAULT;
  458. if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
  459. __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
  460. __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
  461. __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
  462. __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
  463. __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
  464. __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
  465. __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
  466. __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
  467. return -EFAULT;
  468. ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
  469. if (!ret)
  470. if (__get_user(i, &crt64->outputdatalength) ||
  471. __put_user(i, &crt32u->outputdatalength))
  472. ret = -EFAULT;
  473. return ret;
  474. }
  475. static long
  476. z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  477. {
  478. switch (cmd) {
  479. case ICAZ90STATUS:
  480. case Z90QUIESCE:
  481. case Z90STAT_TOTALCOUNT:
  482. case Z90STAT_PCICACOUNT:
  483. case Z90STAT_PCICCCOUNT:
  484. case Z90STAT_PCIXCCCOUNT:
  485. case Z90STAT_PCIXCCMCL2COUNT:
  486. case Z90STAT_PCIXCCMCL3COUNT:
  487. case Z90STAT_CEX2CCOUNT:
  488. case Z90STAT_REQUESTQ_COUNT:
  489. case Z90STAT_PENDINGQ_COUNT:
  490. case Z90STAT_TOTALOPEN_COUNT:
  491. case Z90STAT_DOMAIN_INDEX:
  492. case Z90STAT_STATUS_MASK:
  493. case Z90STAT_QDEPTH_MASK:
  494. case Z90STAT_PERDEV_REQCNT:
  495. return z90crypt_unlocked_ioctl(filp, cmd, arg);
  496. case ICARSAMODEXPO:
  497. return trans_modexpo32(filp, cmd, arg);
  498. case ICARSACRT:
  499. return trans_modexpo_crt32(filp, cmd, arg);
  500. default:
  501. return -ENOIOCTLCMD;
  502. }
  503. }
  504. #endif
  505. /**
  506. * The module initialization code.
  507. */
  508. static int __init
  509. z90crypt_init_module(void)
  510. {
  511. int result, nresult;
  512. struct proc_dir_entry *entry;
  513. PDEBUG("PID %d\n", PID());
  514. if ((domain < -1) || (domain > 15)) {
  515. PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
  516. return -EINVAL;
  517. }
  518. /* Register as misc device with given minor (or get a dynamic one). */
  519. result = misc_register(&z90crypt_misc_device);
  520. if (result < 0) {
  521. PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
  522. z90crypt_misc_device.minor, result);
  523. return result;
  524. }
  525. PDEBUG("Registered " DEV_NAME " with result %d\n", result);
  526. result = create_z90crypt(&domain);
  527. if (result != 0) {
  528. PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
  529. domain, result);
  530. result = -ENOMEM;
  531. goto init_module_cleanup;
  532. }
  533. if (result == 0) {
  534. PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
  535. z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
  536. __DATE__, __TIME__);
  537. PRINTKN("%s\n", z90main_version);
  538. PRINTKN("%s\n", z90hardware_version);
  539. PDEBUG("create_z90crypt (domain index %d) successful.\n",
  540. domain);
  541. } else
  542. PRINTK("No devices at startup\n");
  543. /* Initialize globals. */
  544. spin_lock_init(&queuespinlock);
  545. INIT_LIST_HEAD(&pending_list);
  546. pendingq_count = 0;
  547. INIT_LIST_HEAD(&request_list);
  548. requestq_count = 0;
  549. quiesce_z90crypt = 0;
  550. atomic_set(&total_open, 0);
  551. atomic_set(&z90crypt_step, 0);
  552. /* Set up the cleanup task. */
  553. init_timer(&cleanup_timer);
  554. cleanup_timer.function = z90crypt_cleanup_task;
  555. cleanup_timer.data = 0;
  556. cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
  557. add_timer(&cleanup_timer);
  558. /* Set up the proc file system */
  559. entry = create_proc_entry("driver/z90crypt", 0644, 0);
  560. if (entry) {
  561. entry->nlink = 1;
  562. entry->data = 0;
  563. entry->read_proc = z90crypt_status;
  564. entry->write_proc = z90crypt_status_write;
  565. }
  566. else
  567. PRINTK("Couldn't create z90crypt proc entry\n");
  568. z90crypt_entry = entry;
  569. /* Set up the configuration task. */
  570. init_timer(&config_timer);
  571. config_timer.function = z90crypt_config_task;
  572. config_timer.data = 0;
  573. config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
  574. add_timer(&config_timer);
  575. /* Set up the reader task */
  576. tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
  577. init_timer(&reader_timer);
  578. reader_timer.function = z90crypt_schedule_reader_task;
  579. reader_timer.data = 0;
  580. reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
  581. add_timer(&reader_timer);
  582. return 0; // success
  583. init_module_cleanup:
  584. if ((nresult = misc_deregister(&z90crypt_misc_device)))
  585. PRINTK("misc_deregister failed with %d.\n", nresult);
  586. else
  587. PDEBUG("misc_deregister successful.\n");
  588. return result; // failure
  589. }
  590. /**
  591. * The module termination code
  592. */
  593. static void __exit
  594. z90crypt_cleanup_module(void)
  595. {
  596. int nresult;
  597. PDEBUG("PID %d\n", PID());
  598. remove_proc_entry("driver/z90crypt", 0);
  599. if ((nresult = misc_deregister(&z90crypt_misc_device)))
  600. PRINTK("misc_deregister failed with %d.\n", nresult);
  601. else
  602. PDEBUG("misc_deregister successful.\n");
  603. /* Remove the tasks */
  604. tasklet_kill(&reader_tasklet);
  605. del_timer(&reader_timer);
  606. del_timer(&config_timer);
  607. del_timer(&cleanup_timer);
  608. if (z90_device_work)
  609. destroy_workqueue(z90_device_work);
  610. destroy_z90crypt();
  611. PRINTKN("Unloaded.\n");
  612. }
  613. /**
  614. * Functions running under a process id
  615. *
  616. * The I/O functions:
  617. * z90crypt_open
  618. * z90crypt_release
  619. * z90crypt_read
  620. * z90crypt_write
  621. * z90crypt_unlocked_ioctl
  622. * z90crypt_status
  623. * z90crypt_status_write
  624. * disable_card
  625. * enable_card
  626. *
  627. * Helper functions:
  628. * z90crypt_rsa
  629. * z90crypt_prepare
  630. * z90crypt_send
  631. * z90crypt_process_results
  632. *
  633. */
  634. static int
  635. z90crypt_open(struct inode *inode, struct file *filp)
  636. {
  637. struct priv_data *private_data_p;
  638. if (quiesce_z90crypt)
  639. return -EQUIESCE;
  640. private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
  641. if (!private_data_p) {
  642. PRINTK("Memory allocate failed\n");
  643. return -ENOMEM;
  644. }
  645. memset((void *)private_data_p, 0, sizeof(struct priv_data));
  646. private_data_p->status = STAT_OPEN;
  647. private_data_p->opener_pid = PID();
  648. filp->private_data = private_data_p;
  649. atomic_inc(&total_open);
  650. return 0;
  651. }
  652. static int
  653. z90crypt_release(struct inode *inode, struct file *filp)
  654. {
  655. struct priv_data *private_data_p = filp->private_data;
  656. PDEBUG("PID %d (filp %p)\n", PID(), filp);
  657. private_data_p->status = STAT_CLOSED;
  658. memset(private_data_p, 0, sizeof(struct priv_data));
  659. kfree(private_data_p);
  660. atomic_dec(&total_open);
  661. return 0;
  662. }
  663. /*
  664. * there are two read functions, of which compile options will choose one
  665. * without USE_GET_RANDOM_BYTES
  666. * => read() always returns -EPERM;
  667. * otherwise
  668. * => read() uses get_random_bytes() kernel function
  669. */
  670. #ifndef USE_GET_RANDOM_BYTES
  671. /**
  672. * z90crypt_read will not be supported beyond z90crypt 1.3.1
  673. */
  674. static ssize_t
  675. z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  676. {
  677. PDEBUG("filp %p (PID %d)\n", filp, PID());
  678. return -EPERM;
  679. }
  680. #else // we want to use get_random_bytes
  681. /**
  682. * read() just returns a string of random bytes. Since we have no way
  683. * to generate these cryptographically, we just execute get_random_bytes
  684. * for the length specified.
  685. */
  686. #include <linux/random.h>
  687. static ssize_t
  688. z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  689. {
  690. unsigned char *temp_buff;
  691. PDEBUG("filp %p (PID %d)\n", filp, PID());
  692. if (quiesce_z90crypt)
  693. return -EQUIESCE;
  694. if (count < 0) {
  695. PRINTK("Requested random byte count negative: %ld\n", count);
  696. return -EINVAL;
  697. }
  698. if (count > RESPBUFFSIZE) {
  699. PDEBUG("count[%d] > RESPBUFFSIZE", count);
  700. return -EINVAL;
  701. }
  702. if (count == 0)
  703. return 0;
  704. temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
  705. if (!temp_buff) {
  706. PRINTK("Memory allocate failed\n");
  707. return -ENOMEM;
  708. }
  709. get_random_bytes(temp_buff, count);
  710. if (copy_to_user(buf, temp_buff, count) != 0) {
  711. kfree(temp_buff);
  712. return -EFAULT;
  713. }
  714. kfree(temp_buff);
  715. return count;
  716. }
  717. #endif
  718. /**
  719. * Write is is not allowed
  720. */
  721. static ssize_t
  722. z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
  723. {
  724. PDEBUG("filp %p (PID %d)\n", filp, PID());
  725. return -EPERM;
  726. }
  727. /**
  728. * New status functions
  729. */
  730. static inline int
  731. get_status_totalcount(void)
  732. {
  733. return z90crypt.hdware_info->hdware_mask.st_count;
  734. }
  735. static inline int
  736. get_status_PCICAcount(void)
  737. {
  738. return z90crypt.hdware_info->type_mask[PCICA].st_count;
  739. }
  740. static inline int
  741. get_status_PCICCcount(void)
  742. {
  743. return z90crypt.hdware_info->type_mask[PCICC].st_count;
  744. }
  745. static inline int
  746. get_status_PCIXCCcount(void)
  747. {
  748. return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
  749. z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
  750. }
  751. static inline int
  752. get_status_PCIXCCMCL2count(void)
  753. {
  754. return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
  755. }
  756. static inline int
  757. get_status_PCIXCCMCL3count(void)
  758. {
  759. return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
  760. }
  761. static inline int
  762. get_status_CEX2Ccount(void)
  763. {
  764. return z90crypt.hdware_info->type_mask[CEX2C].st_count;
  765. }
  766. static inline int
  767. get_status_requestq_count(void)
  768. {
  769. return requestq_count;
  770. }
  771. static inline int
  772. get_status_pendingq_count(void)
  773. {
  774. return pendingq_count;
  775. }
  776. static inline int
  777. get_status_totalopen_count(void)
  778. {
  779. return atomic_read(&total_open);
  780. }
  781. static inline int
  782. get_status_domain_index(void)
  783. {
  784. return z90crypt.cdx;
  785. }
  786. static inline unsigned char *
  787. get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
  788. {
  789. int i, ix;
  790. memcpy(status, z90crypt.hdware_info->device_type_array,
  791. Z90CRYPT_NUM_APS);
  792. for (i = 0; i < get_status_totalcount(); i++) {
  793. ix = SHRT2LONG(i);
  794. if (LONG2DEVPTR(ix)->user_disabled)
  795. status[ix] = 0x0d;
  796. }
  797. return status;
  798. }
  799. static inline unsigned char *
  800. get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
  801. {
  802. int i, ix;
  803. memset(qdepth, 0, Z90CRYPT_NUM_APS);
  804. for (i = 0; i < get_status_totalcount(); i++) {
  805. ix = SHRT2LONG(i);
  806. qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
  807. }
  808. return qdepth;
  809. }
  810. static inline unsigned int *
  811. get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
  812. {
  813. int i, ix;
  814. memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
  815. for (i = 0; i < get_status_totalcount(); i++) {
  816. ix = SHRT2LONG(i);
  817. reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
  818. }
  819. return reqcnt;
  820. }
  821. static inline void
  822. init_work_element(struct work_element *we_p,
  823. struct priv_data *priv_data, pid_t pid)
  824. {
  825. int step;
  826. we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
  827. /* Come up with a unique id for this caller. */
  828. step = atomic_inc_return(&z90crypt_step);
  829. memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
  830. memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
  831. we_p->pid = pid;
  832. we_p->priv_data = priv_data;
  833. we_p->status[0] = STAT_DEFAULT;
  834. we_p->audit[0] = 0x00;
  835. we_p->audit[1] = 0x00;
  836. we_p->audit[2] = 0x00;
  837. we_p->resp_buff_size = 0;
  838. we_p->retcode = 0;
  839. we_p->devindex = -1;
  840. we_p->devtype = -1;
  841. atomic_set(&we_p->alarmrung, 0);
  842. init_waitqueue_head(&we_p->waitq);
  843. INIT_LIST_HEAD(&(we_p->liste));
  844. }
  845. static inline int
  846. allocate_work_element(struct work_element **we_pp,
  847. struct priv_data *priv_data_p, pid_t pid)
  848. {
  849. struct work_element *we_p;
  850. we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
  851. if (!we_p)
  852. return -ENOMEM;
  853. init_work_element(we_p, priv_data_p, pid);
  854. *we_pp = we_p;
  855. return 0;
  856. }
  857. static inline void
  858. remove_device(struct device *device_p)
  859. {
  860. if (!device_p || (device_p->disabled != 0))
  861. return;
  862. device_p->disabled = 1;
  863. z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
  864. z90crypt.hdware_info->hdware_mask.disabled_count++;
  865. }
  866. /**
  867. * Bitlength limits for each card
  868. *
  869. * There are new MCLs which allow more bitlengths. See the table for details.
  870. * The MCL must be applied and the newer bitlengths enabled for these to work.
  871. *
  872. * Card Type Old limit New limit
  873. * PCICA ??-2048 same (the lower limit is less than 128 bit...)
  874. * PCICC 512-1024 512-2048
  875. * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
  876. * PCIXCC_MCL3 ----- 128-2048
  877. * CEX2C 512-2048 128-2048
  878. *
  879. * ext_bitlens (extended bitlengths) is a global, since you should not apply an
  880. * MCL to just one card in a machine. We assume, at first, that all cards have
  881. * these capabilities.
  882. */
  883. int ext_bitlens = 1; // This is global
  884. #define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
  885. #define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
  886. #define PCICC_MIN_MOD_SIZE 64 // 512 bits
  887. #define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
  888. #define MAX_MOD_SIZE 256 // 2048 bits
  889. static inline int
  890. select_device_type(int *dev_type_p, int bytelength)
  891. {
  892. static int count = 0;
  893. int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
  894. struct status *stat;
  895. if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
  896. (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
  897. (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
  898. return -1;
  899. if (*dev_type_p != ANYDEV) {
  900. stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
  901. if (stat->st_count >
  902. (stat->disabled_count + stat->user_disabled_count))
  903. return 0;
  904. return -1;
  905. }
  906. /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
  907. stat = &z90crypt.hdware_info->type_mask[PCICA];
  908. PCICA_avail = stat->st_count -
  909. (stat->disabled_count + stat->user_disabled_count);
  910. stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
  911. PCIXCC_MCL3_avail = stat->st_count -
  912. (stat->disabled_count + stat->user_disabled_count);
  913. stat = &z90crypt.hdware_info->type_mask[CEX2C];
  914. CEX2C_avail = stat->st_count -
  915. (stat->disabled_count + stat->user_disabled_count);
  916. if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
  917. /**
  918. * bitlength is a factor, PCICA is the most capable, even with
  919. * the new MCL for PCIXCC.
  920. */
  921. if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
  922. (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
  923. if (!PCICA_avail)
  924. return -1;
  925. else {
  926. *dev_type_p = PCICA;
  927. return 0;
  928. }
  929. }
  930. index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
  931. CEX2C_avail);
  932. if (index_to_use < PCICA_avail)
  933. *dev_type_p = PCICA;
  934. else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
  935. *dev_type_p = PCIXCC_MCL3;
  936. else
  937. *dev_type_p = CEX2C;
  938. count++;
  939. return 0;
  940. }
  941. /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
  942. if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
  943. return -1;
  944. stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
  945. if (stat->st_count >
  946. (stat->disabled_count + stat->user_disabled_count)) {
  947. *dev_type_p = PCIXCC_MCL2;
  948. return 0;
  949. }
  950. /**
  951. * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
  952. * (if we don't have the MCL applied and the newer bitlengths enabled)
  953. * cannot go to a PCICC
  954. */
  955. if ((bytelength < PCICC_MIN_MOD_SIZE) ||
  956. (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
  957. return -1;
  958. }
  959. stat = &z90crypt.hdware_info->type_mask[PCICC];
  960. if (stat->st_count >
  961. (stat->disabled_count + stat->user_disabled_count)) {
  962. *dev_type_p = PCICC;
  963. return 0;
  964. }
  965. return -1;
  966. }
  967. /**
  968. * Try the selected number, then the selected type (can be ANYDEV)
  969. */
  970. static inline int
  971. select_device(int *dev_type_p, int *device_nr_p, int bytelength)
  972. {
  973. int i, indx, devTp, low_count, low_indx;
  974. struct device_x *index_p;
  975. struct device *dev_ptr;
  976. PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
  977. if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
  978. PDEBUG("trying index = %d\n", *device_nr_p);
  979. dev_ptr = z90crypt.device_p[*device_nr_p];
  980. if (dev_ptr &&
  981. (dev_ptr->dev_stat != DEV_GONE) &&
  982. (dev_ptr->disabled == 0) &&
  983. (dev_ptr->user_disabled == 0)) {
  984. PDEBUG("selected by number, index = %d\n",
  985. *device_nr_p);
  986. *dev_type_p = dev_ptr->dev_type;
  987. return *device_nr_p;
  988. }
  989. }
  990. *device_nr_p = -1;
  991. PDEBUG("trying type = %d\n", *dev_type_p);
  992. devTp = *dev_type_p;
  993. if (select_device_type(&devTp, bytelength) == -1) {
  994. PDEBUG("failed to select by type\n");
  995. return -1;
  996. }
  997. PDEBUG("selected type = %d\n", devTp);
  998. index_p = &z90crypt.hdware_info->type_x_addr[devTp];
  999. low_count = 0x0000FFFF;
  1000. low_indx = -1;
  1001. for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
  1002. indx = index_p->device_index[i];
  1003. dev_ptr = z90crypt.device_p[indx];
  1004. if (dev_ptr &&
  1005. (dev_ptr->dev_stat != DEV_GONE) &&
  1006. (dev_ptr->disabled == 0) &&
  1007. (dev_ptr->user_disabled == 0) &&
  1008. (devTp == dev_ptr->dev_type) &&
  1009. (low_count > dev_ptr->dev_caller_count)) {
  1010. low_count = dev_ptr->dev_caller_count;
  1011. low_indx = indx;
  1012. }
  1013. }
  1014. *device_nr_p = low_indx;
  1015. return low_indx;
  1016. }
  1017. static inline int
  1018. send_to_crypto_device(struct work_element *we_p)
  1019. {
  1020. struct caller *caller_p;
  1021. struct device *device_p;
  1022. int dev_nr;
  1023. int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
  1024. if (!we_p->requestptr)
  1025. return SEN_FATAL_ERROR;
  1026. caller_p = (struct caller *)we_p->requestptr;
  1027. dev_nr = we_p->devindex;
  1028. if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
  1029. if (z90crypt.hdware_info->hdware_mask.st_count != 0)
  1030. return SEN_RETRY;
  1031. else
  1032. return SEN_NOT_AVAIL;
  1033. }
  1034. we_p->devindex = dev_nr;
  1035. device_p = z90crypt.device_p[dev_nr];
  1036. if (!device_p)
  1037. return SEN_NOT_AVAIL;
  1038. if (device_p->dev_type != we_p->devtype)
  1039. return SEN_RETRY;
  1040. if (device_p->dev_caller_count >= device_p->dev_q_depth)
  1041. return SEN_QUEUE_FULL;
  1042. PDEBUG("device number prior to send: %d\n", dev_nr);
  1043. switch (send_to_AP(dev_nr, z90crypt.cdx,
  1044. caller_p->caller_dev_dep_req_l,
  1045. caller_p->caller_dev_dep_req_p)) {
  1046. case DEV_SEN_EXCEPTION:
  1047. PRINTKC("Exception during send to device %d\n", dev_nr);
  1048. z90crypt.terminating = 1;
  1049. return SEN_FATAL_ERROR;
  1050. case DEV_GONE:
  1051. PRINTK("Device %d not available\n", dev_nr);
  1052. remove_device(device_p);
  1053. return SEN_NOT_AVAIL;
  1054. case DEV_EMPTY:
  1055. return SEN_NOT_AVAIL;
  1056. case DEV_NO_WORK:
  1057. return SEN_FATAL_ERROR;
  1058. case DEV_BAD_MESSAGE:
  1059. return SEN_USER_ERROR;
  1060. case DEV_QUEUE_FULL:
  1061. return SEN_QUEUE_FULL;
  1062. default:
  1063. case DEV_ONLINE:
  1064. break;
  1065. }
  1066. list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
  1067. device_p->dev_caller_count++;
  1068. return 0;
  1069. }
  1070. /**
  1071. * Send puts the user's work on one of two queues:
  1072. * the pending queue if the send was successful
  1073. * the request queue if the send failed because device full or busy
  1074. */
  1075. static inline int
  1076. z90crypt_send(struct work_element *we_p, const char *buf)
  1077. {
  1078. int rv;
  1079. PDEBUG("PID %d\n", PID());
  1080. if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
  1081. PDEBUG("PID %d tried to send more work but has outstanding "
  1082. "work.\n", PID());
  1083. return -EWORKPEND;
  1084. }
  1085. we_p->devindex = -1; // Reset device number
  1086. spin_lock_irq(&queuespinlock);
  1087. rv = send_to_crypto_device(we_p);
  1088. switch (rv) {
  1089. case 0:
  1090. we_p->requestsent = jiffies;
  1091. we_p->audit[0] |= FP_SENT;
  1092. list_add_tail(&we_p->liste, &pending_list);
  1093. ++pendingq_count;
  1094. we_p->audit[0] |= FP_PENDING;
  1095. break;
  1096. case SEN_BUSY:
  1097. case SEN_QUEUE_FULL:
  1098. rv = 0;
  1099. we_p->devindex = -1; // any device will do
  1100. we_p->requestsent = jiffies;
  1101. list_add_tail(&we_p->liste, &request_list);
  1102. ++requestq_count;
  1103. we_p->audit[0] |= FP_REQUEST;
  1104. break;
  1105. case SEN_RETRY:
  1106. rv = -ERESTARTSYS;
  1107. break;
  1108. case SEN_NOT_AVAIL:
  1109. PRINTK("*** No devices available.\n");
  1110. rv = we_p->retcode = -ENODEV;
  1111. we_p->status[0] |= STAT_FAILED;
  1112. break;
  1113. case REC_OPERAND_INV:
  1114. case REC_OPERAND_SIZE:
  1115. case REC_EVEN_MOD:
  1116. case REC_INVALID_PAD:
  1117. rv = we_p->retcode = -EINVAL;
  1118. we_p->status[0] |= STAT_FAILED;
  1119. break;
  1120. default:
  1121. we_p->retcode = rv;
  1122. we_p->status[0] |= STAT_FAILED;
  1123. break;
  1124. }
  1125. if (rv != -ERESTARTSYS)
  1126. SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
  1127. spin_unlock_irq(&queuespinlock);
  1128. if (rv == 0)
  1129. tasklet_schedule(&reader_tasklet);
  1130. return rv;
  1131. }
  1132. /**
  1133. * process_results copies the user's work from kernel space.
  1134. */
  1135. static inline int
  1136. z90crypt_process_results(struct work_element *we_p, char __user *buf)
  1137. {
  1138. int rv;
  1139. PDEBUG("we_p %p (PID %d)\n", we_p, PID());
  1140. LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
  1141. SET_RDWRMASK(we_p->status[0], STAT_READPEND);
  1142. rv = 0;
  1143. if (!we_p->buffer) {
  1144. PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
  1145. we_p, PID());
  1146. rv = -ENOBUFF;
  1147. }
  1148. if (!rv)
  1149. if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
  1150. PDEBUG("copy_to_user failed: rv = %d\n", rv);
  1151. rv = -EFAULT;
  1152. }
  1153. if (!rv)
  1154. rv = we_p->retcode;
  1155. if (!rv)
  1156. if (we_p->resp_buff_size
  1157. && copy_to_user(we_p->resp_addr, we_p->resp_buff,
  1158. we_p->resp_buff_size))
  1159. rv = -EFAULT;
  1160. SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
  1161. return rv;
  1162. }
  1163. static unsigned char NULL_psmid[8] =
  1164. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  1165. /**
  1166. * Used in device configuration functions
  1167. */
  1168. #define MAX_RESET 90
  1169. /**
  1170. * This is used only for PCICC support
  1171. */
  1172. static inline int
  1173. is_PKCS11_padded(unsigned char *buffer, int length)
  1174. {
  1175. int i;
  1176. if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
  1177. return 0;
  1178. for (i = 2; i < length; i++)
  1179. if (buffer[i] != 0xFF)
  1180. break;
  1181. if ((i < 10) || (i == length))
  1182. return 0;
  1183. if (buffer[i] != 0x00)
  1184. return 0;
  1185. return 1;
  1186. }
  1187. /**
  1188. * This is used only for PCICC support
  1189. */
  1190. static inline int
  1191. is_PKCS12_padded(unsigned char *buffer, int length)
  1192. {
  1193. int i;
  1194. if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
  1195. return 0;
  1196. for (i = 2; i < length; i++)
  1197. if (buffer[i] == 0x00)
  1198. break;
  1199. if ((i < 10) || (i == length))
  1200. return 0;
  1201. if (buffer[i] != 0x00)
  1202. return 0;
  1203. return 1;
  1204. }
  1205. /**
  1206. * builds struct caller and converts message from generic format to
  1207. * device-dependent format
  1208. * func is ICARSAMODEXPO or ICARSACRT
  1209. * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
  1210. */
  1211. static inline int
  1212. build_caller(struct work_element *we_p, short function)
  1213. {
  1214. int rv;
  1215. struct caller *caller_p = (struct caller *)we_p->requestptr;
  1216. if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
  1217. (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
  1218. (we_p->devtype != CEX2C))
  1219. return SEN_NOT_AVAIL;
  1220. memcpy(caller_p->caller_id, we_p->caller_id,
  1221. sizeof(caller_p->caller_id));
  1222. caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
  1223. caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
  1224. caller_p->caller_buf_p = we_p->buffer;
  1225. INIT_LIST_HEAD(&(caller_p->caller_liste));
  1226. rv = convert_request(we_p->buffer, we_p->funccode, function,
  1227. z90crypt.cdx, we_p->devtype,
  1228. &caller_p->caller_dev_dep_req_l,
  1229. caller_p->caller_dev_dep_req_p);
  1230. if (rv) {
  1231. if (rv == SEN_NOT_AVAIL)
  1232. PDEBUG("request can't be processed on hdwr avail\n");
  1233. else
  1234. PRINTK("Error from convert_request: %d\n", rv);
  1235. }
  1236. else
  1237. memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
  1238. return rv;
  1239. }
  1240. static inline void
  1241. unbuild_caller(struct device *device_p, struct caller *caller_p)
  1242. {
  1243. if (!caller_p)
  1244. return;
  1245. if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
  1246. if (!list_empty(&caller_p->caller_liste)) {
  1247. list_del_init(&caller_p->caller_liste);
  1248. device_p->dev_caller_count--;
  1249. }
  1250. memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
  1251. }
  1252. static inline int
  1253. get_crypto_request_buffer(struct work_element *we_p)
  1254. {
  1255. struct ica_rsa_modexpo *mex_p;
  1256. struct ica_rsa_modexpo_crt *crt_p;
  1257. unsigned char *temp_buffer;
  1258. short function;
  1259. int rv;
  1260. mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
  1261. crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
  1262. PDEBUG("device type input = %d\n", we_p->devtype);
  1263. if (z90crypt.terminating)
  1264. return REC_NO_RESPONSE;
  1265. if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
  1266. PRINTK("psmid zeroes\n");
  1267. return SEN_FATAL_ERROR;
  1268. }
  1269. if (!we_p->buffer) {
  1270. PRINTK("buffer pointer NULL\n");
  1271. return SEN_USER_ERROR;
  1272. }
  1273. if (!we_p->requestptr) {
  1274. PRINTK("caller pointer NULL\n");
  1275. return SEN_USER_ERROR;
  1276. }
  1277. if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
  1278. (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
  1279. (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
  1280. PRINTK("invalid device type\n");
  1281. return SEN_USER_ERROR;
  1282. }
  1283. if ((mex_p->inputdatalength < 1) ||
  1284. (mex_p->inputdatalength > MAX_MOD_SIZE)) {
  1285. PRINTK("inputdatalength[%d] is not valid\n",
  1286. mex_p->inputdatalength);
  1287. return SEN_USER_ERROR;
  1288. }
  1289. if (mex_p->outputdatalength < mex_p->inputdatalength) {
  1290. PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
  1291. mex_p->outputdatalength, mex_p->inputdatalength);
  1292. return SEN_USER_ERROR;
  1293. }
  1294. if (!mex_p->inputdata || !mex_p->outputdata) {
  1295. PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
  1296. mex_p->outputdata, mex_p->inputdata);
  1297. return SEN_USER_ERROR;
  1298. }
  1299. /**
  1300. * As long as outputdatalength is big enough, we can set the
  1301. * outputdatalength equal to the inputdatalength, since that is the
  1302. * number of bytes we will copy in any case
  1303. */
  1304. mex_p->outputdatalength = mex_p->inputdatalength;
  1305. rv = 0;
  1306. switch (we_p->funccode) {
  1307. case ICARSAMODEXPO:
  1308. if (!mex_p->b_key || !mex_p->n_modulus)
  1309. rv = SEN_USER_ERROR;
  1310. break;
  1311. case ICARSACRT:
  1312. if (!IS_EVEN(crt_p->inputdatalength)) {
  1313. PRINTK("inputdatalength[%d] is odd, CRT form\n",
  1314. crt_p->inputdatalength);
  1315. rv = SEN_USER_ERROR;
  1316. break;
  1317. }
  1318. if (!crt_p->bp_key ||
  1319. !crt_p->bq_key ||
  1320. !crt_p->np_prime ||
  1321. !crt_p->nq_prime ||
  1322. !crt_p->u_mult_inv) {
  1323. PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
  1324. crt_p->bp_key, crt_p->bq_key,
  1325. crt_p->np_prime, crt_p->nq_prime,
  1326. crt_p->u_mult_inv);
  1327. rv = SEN_USER_ERROR;
  1328. }
  1329. break;
  1330. default:
  1331. PRINTK("bad func = %d\n", we_p->funccode);
  1332. rv = SEN_USER_ERROR;
  1333. break;
  1334. }
  1335. if (rv != 0)
  1336. return rv;
  1337. if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
  1338. return SEN_NOT_AVAIL;
  1339. temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
  1340. sizeof(struct caller);
  1341. if (copy_from_user(temp_buffer, mex_p->inputdata,
  1342. mex_p->inputdatalength) != 0)
  1343. return SEN_RELEASED;
  1344. function = PCI_FUNC_KEY_ENCRYPT;
  1345. switch (we_p->devtype) {
  1346. /* PCICA does everything with a simple RSA mod-expo operation */
  1347. case PCICA:
  1348. function = PCI_FUNC_KEY_ENCRYPT;
  1349. break;
  1350. /**
  1351. * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
  1352. * operation, and all CRT forms with a PKCS-1.2 format decrypt.
  1353. * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
  1354. * mod-expo operation
  1355. */
  1356. case PCIXCC_MCL2:
  1357. if (we_p->funccode == ICARSAMODEXPO)
  1358. function = PCI_FUNC_KEY_ENCRYPT;
  1359. else
  1360. function = PCI_FUNC_KEY_DECRYPT;
  1361. break;
  1362. case PCIXCC_MCL3:
  1363. case CEX2C:
  1364. if (we_p->funccode == ICARSAMODEXPO)
  1365. function = PCI_FUNC_KEY_ENCRYPT;
  1366. else
  1367. function = PCI_FUNC_KEY_DECRYPT;
  1368. break;
  1369. /**
  1370. * PCICC does everything as a PKCS-1.2 format request
  1371. */
  1372. case PCICC:
  1373. /* PCICC cannot handle input that is is PKCS#1.1 padded */
  1374. if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
  1375. return SEN_NOT_AVAIL;
  1376. }
  1377. if (we_p->funccode == ICARSAMODEXPO) {
  1378. if (is_PKCS12_padded(temp_buffer,
  1379. mex_p->inputdatalength))
  1380. function = PCI_FUNC_KEY_ENCRYPT;
  1381. else
  1382. function = PCI_FUNC_KEY_DECRYPT;
  1383. } else
  1384. /* all CRT forms are decrypts */
  1385. function = PCI_FUNC_KEY_DECRYPT;
  1386. break;
  1387. }
  1388. PDEBUG("function: %04x\n", function);
  1389. rv = build_caller(we_p, function);
  1390. PDEBUG("rv from build_caller = %d\n", rv);
  1391. return rv;
  1392. }
  1393. static inline int
  1394. z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
  1395. const char __user *buffer)
  1396. {
  1397. int rv;
  1398. we_p->devindex = -1;
  1399. if (funccode == ICARSAMODEXPO)
  1400. we_p->buff_size = sizeof(struct ica_rsa_modexpo);
  1401. else
  1402. we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
  1403. if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
  1404. return -EFAULT;
  1405. we_p->audit[0] |= FP_COPYFROM;
  1406. SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
  1407. we_p->funccode = funccode;
  1408. we_p->devtype = -1;
  1409. we_p->audit[0] |= FP_BUFFREQ;
  1410. rv = get_crypto_request_buffer(we_p);
  1411. switch (rv) {
  1412. case 0:
  1413. we_p->audit[0] |= FP_BUFFGOT;
  1414. break;
  1415. case SEN_USER_ERROR:
  1416. rv = -EINVAL;
  1417. break;
  1418. case SEN_QUEUE_FULL:
  1419. rv = 0;
  1420. break;
  1421. case SEN_RELEASED:
  1422. rv = -EFAULT;
  1423. break;
  1424. case REC_NO_RESPONSE:
  1425. rv = -ENODEV;
  1426. break;
  1427. case SEN_NOT_AVAIL:
  1428. case EGETBUFF:
  1429. rv = -EGETBUFF;
  1430. break;
  1431. default:
  1432. PRINTK("rv = %d\n", rv);
  1433. rv = -EGETBUFF;
  1434. break;
  1435. }
  1436. if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
  1437. SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
  1438. return rv;
  1439. }
  1440. static inline void
  1441. purge_work_element(struct work_element *we_p)
  1442. {
  1443. struct list_head *lptr;
  1444. spin_lock_irq(&queuespinlock);
  1445. list_for_each(lptr, &request_list) {
  1446. if (lptr == &we_p->liste) {
  1447. list_del_init(lptr);
  1448. requestq_count--;
  1449. break;
  1450. }
  1451. }
  1452. list_for_each(lptr, &pending_list) {
  1453. if (lptr == &we_p->liste) {
  1454. list_del_init(lptr);
  1455. pendingq_count--;
  1456. break;
  1457. }
  1458. }
  1459. spin_unlock_irq(&queuespinlock);
  1460. }
  1461. /**
  1462. * Build the request and send it.
  1463. */
  1464. static inline int
  1465. z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
  1466. unsigned int cmd, unsigned long arg)
  1467. {
  1468. struct work_element *we_p;
  1469. int rv;
  1470. if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
  1471. PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
  1472. return rv;
  1473. }
  1474. if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
  1475. PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
  1476. if (!rv)
  1477. if ((rv = z90crypt_send(we_p, (const char *)arg)))
  1478. PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
  1479. if (!rv) {
  1480. we_p->audit[0] |= FP_ASLEEP;
  1481. wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
  1482. we_p->audit[0] |= FP_AWAKE;
  1483. rv = we_p->retcode;
  1484. }
  1485. if (!rv)
  1486. rv = z90crypt_process_results(we_p, (char __user *)arg);
  1487. if ((we_p->status[0] & STAT_FAILED)) {
  1488. switch (rv) {
  1489. /**
  1490. * EINVAL *after* receive is almost always a padding error or
  1491. * length error issued by a coprocessor (not an accelerator).
  1492. * We convert this return value to -EGETBUFF which should
  1493. * trigger a fallback to software.
  1494. */
  1495. case -EINVAL:
  1496. if (we_p->devtype != PCICA)
  1497. rv = -EGETBUFF;
  1498. break;
  1499. case -ETIMEOUT:
  1500. if (z90crypt.mask.st_count > 0)
  1501. rv = -ERESTARTSYS; // retry with another
  1502. else
  1503. rv = -ENODEV; // no cards left
  1504. /* fall through to clean up request queue */
  1505. case -ERESTARTSYS:
  1506. case -ERELEASED:
  1507. switch (CHK_RDWRMASK(we_p->status[0])) {
  1508. case STAT_WRITTEN:
  1509. purge_work_element(we_p);
  1510. break;
  1511. case STAT_READPEND:
  1512. case STAT_NOWORK:
  1513. default:
  1514. break;
  1515. }
  1516. break;
  1517. default:
  1518. we_p->status[0] ^= STAT_FAILED;
  1519. break;
  1520. }
  1521. }
  1522. free_page((long)we_p);
  1523. return rv;
  1524. }
  1525. /**
  1526. * This function is a little long, but it's really just one large switch
  1527. * statement.
  1528. */
  1529. static long
  1530. z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1531. {
  1532. struct priv_data *private_data_p = filp->private_data;
  1533. unsigned char *status;
  1534. unsigned char *qdepth;
  1535. unsigned int *reqcnt;
  1536. struct ica_z90_status *pstat;
  1537. int ret, i, loopLim, tempstat;
  1538. static int deprecated_msg_count1 = 0;
  1539. static int deprecated_msg_count2 = 0;
  1540. PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
  1541. PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
  1542. cmd,
  1543. !_IOC_DIR(cmd) ? "NO"
  1544. : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
  1545. : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
  1546. : "WR")),
  1547. _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
  1548. if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
  1549. PRINTK("cmd 0x%08X contains bad magic\n", cmd);
  1550. return -ENOTTY;
  1551. }
  1552. ret = 0;
  1553. switch (cmd) {
  1554. case ICARSAMODEXPO:
  1555. case ICARSACRT:
  1556. if (quiesce_z90crypt) {
  1557. ret = -EQUIESCE;
  1558. break;
  1559. }
  1560. ret = -ENODEV; // Default if no devices
  1561. loopLim = z90crypt.hdware_info->hdware_mask.st_count -
  1562. (z90crypt.hdware_info->hdware_mask.disabled_count +
  1563. z90crypt.hdware_info->hdware_mask.user_disabled_count);
  1564. for (i = 0; i < loopLim; i++) {
  1565. ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
  1566. if (ret != -ERESTARTSYS)
  1567. break;
  1568. }
  1569. if (ret == -ERESTARTSYS)
  1570. ret = -ENODEV;
  1571. break;
  1572. case Z90STAT_TOTALCOUNT:
  1573. tempstat = get_status_totalcount();
  1574. if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
  1575. ret = -EFAULT;
  1576. break;
  1577. case Z90STAT_PCICACOUNT:
  1578. tempstat = get_status_PCICAcount();
  1579. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1580. ret = -EFAULT;
  1581. break;
  1582. case Z90STAT_PCICCCOUNT:
  1583. tempstat = get_status_PCICCcount();
  1584. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1585. ret = -EFAULT;
  1586. break;
  1587. case Z90STAT_PCIXCCMCL2COUNT:
  1588. tempstat = get_status_PCIXCCMCL2count();
  1589. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1590. ret = -EFAULT;
  1591. break;
  1592. case Z90STAT_PCIXCCMCL3COUNT:
  1593. tempstat = get_status_PCIXCCMCL3count();
  1594. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1595. ret = -EFAULT;
  1596. break;
  1597. case Z90STAT_CEX2CCOUNT:
  1598. tempstat = get_status_CEX2Ccount();
  1599. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1600. ret = -EFAULT;
  1601. break;
  1602. case Z90STAT_REQUESTQ_COUNT:
  1603. tempstat = get_status_requestq_count();
  1604. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1605. ret = -EFAULT;
  1606. break;
  1607. case Z90STAT_PENDINGQ_COUNT:
  1608. tempstat = get_status_pendingq_count();
  1609. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1610. ret = -EFAULT;
  1611. break;
  1612. case Z90STAT_TOTALOPEN_COUNT:
  1613. tempstat = get_status_totalopen_count();
  1614. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1615. ret = -EFAULT;
  1616. break;
  1617. case Z90STAT_DOMAIN_INDEX:
  1618. tempstat = get_status_domain_index();
  1619. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1620. ret = -EFAULT;
  1621. break;
  1622. case Z90STAT_STATUS_MASK:
  1623. status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
  1624. if (!status) {
  1625. PRINTK("kmalloc for status failed!\n");
  1626. ret = -ENOMEM;
  1627. break;
  1628. }
  1629. get_status_status_mask(status);
  1630. if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
  1631. != 0)
  1632. ret = -EFAULT;
  1633. kfree(status);
  1634. break;
  1635. case Z90STAT_QDEPTH_MASK:
  1636. qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
  1637. if (!qdepth) {
  1638. PRINTK("kmalloc for qdepth failed!\n");
  1639. ret = -ENOMEM;
  1640. break;
  1641. }
  1642. get_status_qdepth_mask(qdepth);
  1643. if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
  1644. ret = -EFAULT;
  1645. kfree(qdepth);
  1646. break;
  1647. case Z90STAT_PERDEV_REQCNT:
  1648. reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
  1649. if (!reqcnt) {
  1650. PRINTK("kmalloc for reqcnt failed!\n");
  1651. ret = -ENOMEM;
  1652. break;
  1653. }
  1654. get_status_perdevice_reqcnt(reqcnt);
  1655. if (copy_to_user((char __user *) arg, reqcnt,
  1656. Z90CRYPT_NUM_APS * sizeof(int)) != 0)
  1657. ret = -EFAULT;
  1658. kfree(reqcnt);
  1659. break;
  1660. /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
  1661. case ICAZ90STATUS:
  1662. if (deprecated_msg_count1 < 20) {
  1663. PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
  1664. deprecated_msg_count1++;
  1665. if (deprecated_msg_count1 == 20)
  1666. PRINTK("No longer issuing messages related to "
  1667. "deprecated call to ICAZ90STATUS.\n");
  1668. }
  1669. pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
  1670. if (!pstat) {
  1671. PRINTK("kmalloc for pstat failed!\n");
  1672. ret = -ENOMEM;
  1673. break;
  1674. }
  1675. pstat->totalcount = get_status_totalcount();
  1676. pstat->leedslitecount = get_status_PCICAcount();
  1677. pstat->leeds2count = get_status_PCICCcount();
  1678. pstat->requestqWaitCount = get_status_requestq_count();
  1679. pstat->pendingqWaitCount = get_status_pendingq_count();
  1680. pstat->totalOpenCount = get_status_totalopen_count();
  1681. pstat->cryptoDomain = get_status_domain_index();
  1682. get_status_status_mask(pstat->status);
  1683. get_status_qdepth_mask(pstat->qdepth);
  1684. if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
  1685. sizeof(struct ica_z90_status)) != 0)
  1686. ret = -EFAULT;
  1687. kfree(pstat);
  1688. break;
  1689. /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
  1690. case Z90STAT_PCIXCCCOUNT:
  1691. if (deprecated_msg_count2 < 20) {
  1692. PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
  1693. deprecated_msg_count2++;
  1694. if (deprecated_msg_count2 == 20)
  1695. PRINTK("No longer issuing messages about depre"
  1696. "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
  1697. }
  1698. tempstat = get_status_PCIXCCcount();
  1699. if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
  1700. ret = -EFAULT;
  1701. break;
  1702. case Z90QUIESCE:
  1703. if (current->euid != 0) {
  1704. PRINTK("QUIESCE fails: euid %d\n",
  1705. current->euid);
  1706. ret = -EACCES;
  1707. } else {
  1708. PRINTK("QUIESCE device from PID %d\n", PID());
  1709. quiesce_z90crypt = 1;
  1710. }
  1711. break;
  1712. default:
  1713. /* user passed an invalid IOCTL number */
  1714. PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
  1715. ret = -ENOTTY;
  1716. break;
  1717. }
  1718. return ret;
  1719. }
  1720. static inline int
  1721. sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
  1722. {
  1723. int hl, i;
  1724. hl = 0;
  1725. for (i = 0; i < len; i++)
  1726. hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
  1727. hl += sprintf(outaddr+hl, " ");
  1728. return hl;
  1729. }
  1730. static inline int
  1731. sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
  1732. {
  1733. int hl, inl, c, cx;
  1734. hl = sprintf(outaddr, " ");
  1735. inl = 0;
  1736. for (c = 0; c < (len / 16); c++) {
  1737. hl += sprintcl(outaddr+hl, addr+inl, 16);
  1738. inl += 16;
  1739. }
  1740. cx = len%16;
  1741. if (cx) {
  1742. hl += sprintcl(outaddr+hl, addr+inl, cx);
  1743. inl += cx;
  1744. }
  1745. hl += sprintf(outaddr+hl, "\n");
  1746. return hl;
  1747. }
  1748. static inline int
  1749. sprinthx(unsigned char *title, unsigned char *outaddr,
  1750. unsigned char *addr, unsigned int len)
  1751. {
  1752. int hl, inl, r, rx;
  1753. hl = sprintf(outaddr, "\n%s\n", title);
  1754. inl = 0;
  1755. for (r = 0; r < (len / 64); r++) {
  1756. hl += sprintrw(outaddr+hl, addr+inl, 64);
  1757. inl += 64;
  1758. }
  1759. rx = len % 64;
  1760. if (rx) {
  1761. hl += sprintrw(outaddr+hl, addr+inl, rx);
  1762. inl += rx;
  1763. }
  1764. hl += sprintf(outaddr+hl, "\n");
  1765. return hl;
  1766. }
  1767. static inline int
  1768. sprinthx4(unsigned char *title, unsigned char *outaddr,
  1769. unsigned int *array, unsigned int len)
  1770. {
  1771. int hl, r;
  1772. hl = sprintf(outaddr, "\n%s\n", title);
  1773. for (r = 0; r < len; r++) {
  1774. if ((r % 8) == 0)
  1775. hl += sprintf(outaddr+hl, " ");
  1776. hl += sprintf(outaddr+hl, "%08X ", array[r]);
  1777. if ((r % 8) == 7)
  1778. hl += sprintf(outaddr+hl, "\n");
  1779. }
  1780. hl += sprintf(outaddr+hl, "\n");
  1781. return hl;
  1782. }
  1783. static int
  1784. z90crypt_status(char *resp_buff, char **start, off_t offset,
  1785. int count, int *eof, void *data)
  1786. {
  1787. unsigned char *workarea;
  1788. int len;
  1789. /* resp_buff is a page. Use the right half for a work area */
  1790. workarea = resp_buff+2000;
  1791. len = 0;
  1792. len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
  1793. z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
  1794. len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
  1795. get_status_domain_index());
  1796. len += sprintf(resp_buff+len, "Total device count: %d\n",
  1797. get_status_totalcount());
  1798. len += sprintf(resp_buff+len, "PCICA count: %d\n",
  1799. get_status_PCICAcount());
  1800. len += sprintf(resp_buff+len, "PCICC count: %d\n",
  1801. get_status_PCICCcount());
  1802. len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
  1803. get_status_PCIXCCMCL2count());
  1804. len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
  1805. get_status_PCIXCCMCL3count());
  1806. len += sprintf(resp_buff+len, "CEX2C count: %d\n",
  1807. get_status_CEX2Ccount());
  1808. len += sprintf(resp_buff+len, "requestq count: %d\n",
  1809. get_status_requestq_count());
  1810. len += sprintf(resp_buff+len, "pendingq count: %d\n",
  1811. get_status_pendingq_count());
  1812. len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
  1813. get_status_totalopen_count());
  1814. len += sprinthx(
  1815. "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
  1816. "4: PCIXCC (MCL3), 5: CEX2C",
  1817. resp_buff+len,
  1818. get_status_status_mask(workarea),
  1819. Z90CRYPT_NUM_APS);
  1820. len += sprinthx("Waiting work element counts",
  1821. resp_buff+len,
  1822. get_status_qdepth_mask(workarea),
  1823. Z90CRYPT_NUM_APS);
  1824. len += sprinthx4(
  1825. "Per-device successfully completed request counts",
  1826. resp_buff+len,
  1827. get_status_perdevice_reqcnt((unsigned int *)workarea),
  1828. Z90CRYPT_NUM_APS);
  1829. *eof = 1;
  1830. memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
  1831. return len;
  1832. }
  1833. static inline void
  1834. disable_card(int card_index)
  1835. {
  1836. struct device *devp;
  1837. devp = LONG2DEVPTR(card_index);
  1838. if (!devp || devp->user_disabled)
  1839. return;
  1840. devp->user_disabled = 1;
  1841. z90crypt.hdware_info->hdware_mask.user_disabled_count++;
  1842. if (devp->dev_type == -1)
  1843. return;
  1844. z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
  1845. }
  1846. static inline void
  1847. enable_card(int card_index)
  1848. {
  1849. struct device *devp;
  1850. devp = LONG2DEVPTR(card_index);
  1851. if (!devp || !devp->user_disabled)
  1852. return;
  1853. devp->user_disabled = 0;
  1854. z90crypt.hdware_info->hdware_mask.user_disabled_count--;
  1855. if (devp->dev_type == -1)
  1856. return;
  1857. z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
  1858. }
  1859. static int
  1860. z90crypt_status_write(struct file *file, const char __user *buffer,
  1861. unsigned long count, void *data)
  1862. {
  1863. int j, eol;
  1864. unsigned char *lbuf, *ptr;
  1865. unsigned int local_count;
  1866. #define LBUFSIZE 1200
  1867. lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
  1868. if (!lbuf) {
  1869. PRINTK("kmalloc failed!\n");
  1870. return 0;
  1871. }
  1872. if (count <= 0)
  1873. return 0;
  1874. local_count = UMIN((unsigned int)count, LBUFSIZE-1);
  1875. if (copy_from_user(lbuf, buffer, local_count) != 0) {
  1876. kfree(lbuf);
  1877. return -EFAULT;
  1878. }
  1879. lbuf[local_count] = '\0';
  1880. ptr = strstr(lbuf, "Online devices");
  1881. if (ptr == 0) {
  1882. PRINTK("Unable to parse data (missing \"Online devices\")\n");
  1883. kfree(lbuf);
  1884. return count;
  1885. }
  1886. ptr = strstr(ptr, "\n");
  1887. if (ptr == 0) {
  1888. PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
  1889. kfree(lbuf);
  1890. return count;
  1891. }
  1892. ptr++;
  1893. if (strstr(ptr, "Waiting work element counts") == NULL) {
  1894. PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
  1895. kfree(lbuf);
  1896. return count;
  1897. }
  1898. j = 0;
  1899. eol = 0;
  1900. while ((j < 64) && (*ptr != '\0')) {
  1901. switch (*ptr) {
  1902. case '\t':
  1903. case ' ':
  1904. break;
  1905. case '\n':
  1906. default:
  1907. eol = 1;
  1908. break;
  1909. case '0': // no device
  1910. case '1': // PCICA
  1911. case '2': // PCICC
  1912. case '3': // PCIXCC_MCL2
  1913. case '4': // PCIXCC_MCL3
  1914. case '5': // CEX2C
  1915. j++;
  1916. break;
  1917. case 'd':
  1918. case 'D':
  1919. disable_card(j);
  1920. j++;
  1921. break;
  1922. case 'e':
  1923. case 'E':
  1924. enable_card(j);
  1925. j++;
  1926. break;
  1927. }
  1928. if (eol)
  1929. break;
  1930. ptr++;
  1931. }
  1932. kfree(lbuf);
  1933. return count;
  1934. }
  1935. /**
  1936. * Functions that run under a timer, with no process id
  1937. *
  1938. * The task functions:
  1939. * z90crypt_reader_task
  1940. * helper_send_work
  1941. * helper_handle_work_element
  1942. * helper_receive_rc
  1943. * z90crypt_config_task
  1944. * z90crypt_cleanup_task
  1945. *
  1946. * Helper functions:
  1947. * z90crypt_schedule_reader_timer
  1948. * z90crypt_schedule_reader_task
  1949. * z90crypt_schedule_config_task
  1950. * z90crypt_schedule_cleanup_task
  1951. */
  1952. static inline int
  1953. receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
  1954. unsigned char *buff, unsigned char __user **dest_p_p)
  1955. {
  1956. int dv, rv;
  1957. struct device *dev_ptr;
  1958. struct caller *caller_p;
  1959. struct ica_rsa_modexpo *icaMsg_p;
  1960. struct list_head *ptr, *tptr;
  1961. memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
  1962. if (z90crypt.terminating)
  1963. return REC_FATAL_ERROR;
  1964. caller_p = 0;
  1965. dev_ptr = z90crypt.device_p[index];
  1966. rv = 0;
  1967. do {
  1968. if (!dev_ptr || dev_ptr->disabled) {
  1969. rv = REC_NO_WORK; // a disabled device can't return work
  1970. break;
  1971. }
  1972. if (dev_ptr->dev_self_x != index) {
  1973. PRINTKC("Corrupt dev ptr\n");
  1974. z90crypt.terminating = 1;
  1975. rv = REC_FATAL_ERROR;
  1976. break;
  1977. }
  1978. if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
  1979. dv = DEV_REC_EXCEPTION;
  1980. PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
  1981. dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
  1982. } else {
  1983. PDEBUG("Dequeue called for device %d\n", index);
  1984. dv = receive_from_AP(index, z90crypt.cdx,
  1985. dev_ptr->dev_resp_l,
  1986. dev_ptr->dev_resp_p, psmid);
  1987. }
  1988. switch (dv) {
  1989. case DEV_REC_EXCEPTION:
  1990. rv = REC_FATAL_ERROR;
  1991. z90crypt.terminating = 1;
  1992. PRINTKC("Exception in receive from device %d\n",
  1993. index);
  1994. break;
  1995. case DEV_ONLINE:
  1996. rv = 0;
  1997. break;
  1998. case DEV_EMPTY:
  1999. rv = REC_EMPTY;
  2000. break;
  2001. case DEV_NO_WORK:
  2002. rv = REC_NO_WORK;
  2003. break;
  2004. case DEV_BAD_MESSAGE:
  2005. case DEV_GONE:
  2006. case REC_HARDWAR_ERR:
  2007. default:
  2008. rv = REC_NO_RESPONSE;
  2009. break;
  2010. }
  2011. if (rv)
  2012. break;
  2013. if (dev_ptr->dev_caller_count <= 0) {
  2014. rv = REC_USER_GONE;
  2015. break;
  2016. }
  2017. list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
  2018. caller_p = list_entry(ptr, struct caller, caller_liste);
  2019. if (!memcmp(caller_p->caller_id, psmid,
  2020. sizeof(caller_p->caller_id))) {
  2021. if (!list_empty(&caller_p->caller_liste)) {
  2022. list_del_init(ptr);
  2023. dev_ptr->dev_caller_count--;
  2024. break;
  2025. }
  2026. }
  2027. caller_p = 0;
  2028. }
  2029. if (!caller_p) {
  2030. PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
  2031. "%02X%02X%02X in device list\n",
  2032. psmid[0], psmid[1], psmid[2], psmid[3],
  2033. psmid[4], psmid[5], psmid[6], psmid[7]);
  2034. rv = REC_USER_GONE;
  2035. break;
  2036. }
  2037. PDEBUG("caller_p after successful receive: %p\n", caller_p);
  2038. rv = convert_response(dev_ptr->dev_resp_p,
  2039. caller_p->caller_buf_p, buff_len_p, buff);
  2040. switch (rv) {
  2041. case REC_USE_PCICA:
  2042. break;
  2043. case REC_OPERAND_INV:
  2044. case REC_OPERAND_SIZE:
  2045. case REC_EVEN_MOD:
  2046. case REC_INVALID_PAD:
  2047. PDEBUG("device %d: 'user error' %d\n", index, rv);
  2048. break;
  2049. case WRONG_DEVICE_TYPE:
  2050. case REC_HARDWAR_ERR:
  2051. case REC_BAD_MESSAGE:
  2052. PRINTKW("device %d: hardware error %d\n", index, rv);
  2053. rv = REC_NO_RESPONSE;
  2054. break;
  2055. default:
  2056. PDEBUG("device %d: rv = %d\n", index, rv);
  2057. break;
  2058. }
  2059. } while (0);
  2060. switch (rv) {
  2061. case 0:
  2062. PDEBUG("Successful receive from device %d\n", index);
  2063. icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
  2064. *dest_p_p = icaMsg_p->outputdata;
  2065. if (*buff_len_p == 0)
  2066. PRINTK("Zero *buff_len_p\n");
  2067. break;
  2068. case REC_NO_RESPONSE:
  2069. PRINTKW("Removing device %d from availability\n", index);
  2070. remove_device(dev_ptr);
  2071. break;
  2072. }
  2073. if (caller_p)
  2074. unbuild_caller(dev_ptr, caller_p);
  2075. return rv;
  2076. }
  2077. static inline void
  2078. helper_send_work(int index)
  2079. {
  2080. struct work_element *rq_p;
  2081. int rv;
  2082. if (list_empty(&request_list))
  2083. return;
  2084. requestq_count--;
  2085. rq_p = list_entry(request_list.next, struct work_element, liste);
  2086. list_del_init(&rq_p->liste);
  2087. rq_p->audit[1] |= FP_REMREQUEST;
  2088. if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
  2089. rq_p->devindex = SHRT2LONG(index);
  2090. rv = send_to_crypto_device(rq_p);
  2091. if (rv == 0) {
  2092. rq_p->requestsent = jiffies;
  2093. rq_p->audit[0] |= FP_SENT;
  2094. list_add_tail(&rq_p->liste, &pending_list);
  2095. ++pendingq_count;
  2096. rq_p->audit[0] |= FP_PENDING;
  2097. } else {
  2098. switch (rv) {
  2099. case REC_OPERAND_INV:
  2100. case REC_OPERAND_SIZE:
  2101. case REC_EVEN_MOD:
  2102. case REC_INVALID_PAD:
  2103. rq_p->retcode = -EINVAL;
  2104. break;
  2105. case SEN_NOT_AVAIL:
  2106. case SEN_RETRY:
  2107. case REC_NO_RESPONSE:
  2108. default:
  2109. if (z90crypt.mask.st_count > 1)
  2110. rq_p->retcode =
  2111. -ERESTARTSYS;
  2112. else
  2113. rq_p->retcode = -ENODEV;
  2114. break;
  2115. }
  2116. rq_p->status[0] |= STAT_FAILED;
  2117. rq_p->audit[1] |= FP_AWAKENING;
  2118. atomic_set(&rq_p->alarmrung, 1);
  2119. wake_up(&rq_p->waitq);
  2120. }
  2121. } else {
  2122. if (z90crypt.mask.st_count > 1)
  2123. rq_p->retcode = -ERESTARTSYS;
  2124. else
  2125. rq_p->retcode = -ENODEV;
  2126. rq_p->status[0] |= STAT_FAILED;
  2127. rq_p->audit[1] |= FP_AWAKENING;
  2128. atomic_set(&rq_p->alarmrung, 1);
  2129. wake_up(&rq_p->waitq);
  2130. }
  2131. }
  2132. static inline void
  2133. helper_handle_work_element(int index, unsigned char psmid[8], int rc,
  2134. int buff_len, unsigned char *buff,
  2135. unsigned char __user *resp_addr)
  2136. {
  2137. struct work_element *pq_p;
  2138. struct list_head *lptr, *tptr;
  2139. pq_p = 0;
  2140. list_for_each_safe(lptr, tptr, &pending_list) {
  2141. pq_p = list_entry(lptr, struct work_element, liste);
  2142. if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
  2143. list_del_init(lptr);
  2144. pendingq_count--;
  2145. pq_p->audit[1] |= FP_NOTPENDING;
  2146. break;
  2147. }
  2148. pq_p = 0;
  2149. }
  2150. if (!pq_p) {
  2151. PRINTK("device %d has work but no caller exists on pending Q\n",
  2152. SHRT2LONG(index));
  2153. return;
  2154. }
  2155. switch (rc) {
  2156. case 0:
  2157. pq_p->resp_buff_size = buff_len;
  2158. pq_p->audit[1] |= FP_RESPSIZESET;
  2159. if (buff_len) {
  2160. pq_p->resp_addr = resp_addr;
  2161. pq_p->audit[1] |= FP_RESPADDRCOPIED;
  2162. memcpy(pq_p->resp_buff, buff, buff_len);
  2163. pq_p->audit[1] |= FP_RESPBUFFCOPIED;
  2164. }
  2165. break;
  2166. case REC_OPERAND_INV:
  2167. case REC_OPERAND_SIZE:
  2168. case REC_EVEN_MOD:
  2169. case REC_INVALID_PAD:
  2170. PDEBUG("-EINVAL after application error %d\n", rc);
  2171. pq_p->retcode = -EINVAL;
  2172. pq_p->status[0] |= STAT_FAILED;
  2173. break;
  2174. case REC_USE_PCICA:
  2175. pq_p->retcode = -ERESTARTSYS;
  2176. pq_p->status[0] |= STAT_FAILED;
  2177. break;
  2178. case REC_NO_RESPONSE:
  2179. default:
  2180. if (z90crypt.mask.st_count > 1)
  2181. pq_p->retcode = -ERESTARTSYS;
  2182. else
  2183. pq_p->retcode = -ENODEV;
  2184. pq_p->status[0] |= STAT_FAILED;
  2185. break;
  2186. }
  2187. if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
  2188. pq_p->audit[1] |= FP_AWAKENING;
  2189. atomic_set(&pq_p->alarmrung, 1);
  2190. wake_up(&pq_p->waitq);
  2191. }
  2192. }
  2193. /**
  2194. * return TRUE if the work element should be removed from the queue
  2195. */
  2196. static inline int
  2197. helper_receive_rc(int index, int *rc_p)
  2198. {
  2199. switch (*rc_p) {
  2200. case 0:
  2201. case REC_OPERAND_INV:
  2202. case REC_OPERAND_SIZE:
  2203. case REC_EVEN_MOD:
  2204. case REC_INVALID_PAD:
  2205. case REC_USE_PCICA:
  2206. break;
  2207. case REC_BUSY:
  2208. case REC_NO_WORK:
  2209. case REC_EMPTY:
  2210. case REC_RETRY_DEV:
  2211. case REC_FATAL_ERROR:
  2212. return 0;
  2213. case REC_NO_RESPONSE:
  2214. break;
  2215. default:
  2216. PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
  2217. *rc_p, SHRT2LONG(index));
  2218. *rc_p = REC_NO_RESPONSE;
  2219. break;
  2220. }
  2221. return 1;
  2222. }
  2223. static inline void
  2224. z90crypt_schedule_reader_timer(void)
  2225. {
  2226. if (timer_pending(&reader_timer))
  2227. return;
  2228. if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
  2229. PRINTK("Timer pending while modifying reader timer\n");
  2230. }
  2231. static void
  2232. z90crypt_reader_task(unsigned long ptr)
  2233. {
  2234. int workavail, index, rc, buff_len;
  2235. unsigned char psmid[8];
  2236. unsigned char __user *resp_addr;
  2237. static unsigned char buff[1024];
  2238. /**
  2239. * we use workavail = 2 to ensure 2 passes with nothing dequeued before
  2240. * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
  2241. * loop, there is no work remaining on the queues.
  2242. */
  2243. resp_addr = 0;
  2244. workavail = 2;
  2245. buff_len = 0;
  2246. while (workavail) {
  2247. workavail--;
  2248. rc = 0;
  2249. spin_lock_irq(&queuespinlock);
  2250. memset(buff, 0x00, sizeof(buff));
  2251. /* Dequeue once from each device in round robin. */
  2252. for (index = 0; index < z90crypt.mask.st_count; index++) {
  2253. PDEBUG("About to receive.\n");
  2254. rc = receive_from_crypto_device(SHRT2LONG(index),
  2255. psmid,
  2256. &buff_len,
  2257. buff,
  2258. &resp_addr);
  2259. PDEBUG("Dequeued: rc = %d.\n", rc);
  2260. if (helper_receive_rc(index, &rc)) {
  2261. if (rc != REC_NO_RESPONSE) {
  2262. helper_send_work(index);
  2263. workavail = 2;
  2264. }
  2265. helper_handle_work_element(index, psmid, rc,
  2266. buff_len, buff,
  2267. resp_addr);
  2268. }
  2269. if (rc == REC_FATAL_ERROR)
  2270. PRINTKW("REC_FATAL_ERROR from device %d!\n",
  2271. SHRT2LONG(index));
  2272. }
  2273. spin_unlock_irq(&queuespinlock);
  2274. }
  2275. if (pendingq_count + requestq_count)
  2276. z90crypt_schedule_reader_timer();
  2277. }
  2278. static inline void
  2279. z90crypt_schedule_config_task(unsigned int expiration)
  2280. {
  2281. if (timer_pending(&config_timer))
  2282. return;
  2283. if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
  2284. PRINTK("Timer pending while modifying config timer\n");
  2285. }
  2286. static void
  2287. z90crypt_config_task(unsigned long ptr)
  2288. {
  2289. int rc;
  2290. PDEBUG("jiffies %ld\n", jiffies);
  2291. if ((rc = refresh_z90crypt(&z90crypt.cdx)))
  2292. PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
  2293. /* If return was fatal, don't bother reconfiguring */
  2294. if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
  2295. z90crypt_schedule_config_task(CONFIGTIME);
  2296. }
  2297. static inline void
  2298. z90crypt_schedule_cleanup_task(void)
  2299. {
  2300. if (timer_pending(&cleanup_timer))
  2301. return;
  2302. if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
  2303. PRINTK("Timer pending while modifying cleanup timer\n");
  2304. }
  2305. static inline void
  2306. helper_drain_queues(void)
  2307. {
  2308. struct work_element *pq_p;
  2309. struct list_head *lptr, *tptr;
  2310. list_for_each_safe(lptr, tptr, &pending_list) {
  2311. pq_p = list_entry(lptr, struct work_element, liste);
  2312. pq_p->retcode = -ENODEV;
  2313. pq_p->status[0] |= STAT_FAILED;
  2314. unbuild_caller(LONG2DEVPTR(pq_p->devindex),
  2315. (struct caller *)pq_p->requestptr);
  2316. list_del_init(lptr);
  2317. pendingq_count--;
  2318. pq_p->audit[1] |= FP_NOTPENDING;
  2319. pq_p->audit[1] |= FP_AWAKENING;
  2320. atomic_set(&pq_p->alarmrung, 1);
  2321. wake_up(&pq_p->waitq);
  2322. }
  2323. list_for_each_safe(lptr, tptr, &request_list) {
  2324. pq_p = list_entry(lptr, struct work_element, liste);
  2325. pq_p->retcode = -ENODEV;
  2326. pq_p->status[0] |= STAT_FAILED;
  2327. list_del_init(lptr);
  2328. requestq_count--;
  2329. pq_p->audit[1] |= FP_REMREQUEST;
  2330. pq_p->audit[1] |= FP_AWAKENING;
  2331. atomic_set(&pq_p->alarmrung, 1);
  2332. wake_up(&pq_p->waitq);
  2333. }
  2334. }
  2335. static inline void
  2336. helper_timeout_requests(void)
  2337. {
  2338. struct work_element *pq_p;
  2339. struct list_head *lptr, *tptr;
  2340. long timelimit;
  2341. timelimit = jiffies - (CLEANUPTIME * HZ);
  2342. /* The list is in strict chronological order */
  2343. list_for_each_safe(lptr, tptr, &pending_list) {
  2344. pq_p = list_entry(lptr, struct work_element, liste);
  2345. if (pq_p->requestsent >= timelimit)
  2346. break;
  2347. PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
  2348. ((struct caller *)pq_p->requestptr)->caller_id[0],
  2349. ((struct caller *)pq_p->requestptr)->caller_id[1],
  2350. ((struct caller *)pq_p->requestptr)->caller_id[2],
  2351. ((struct caller *)pq_p->requestptr)->caller_id[3],
  2352. ((struct caller *)pq_p->requestptr)->caller_id[4],
  2353. ((struct caller *)pq_p->requestptr)->caller_id[5],
  2354. ((struct caller *)pq_p->requestptr)->caller_id[6],
  2355. ((struct caller *)pq_p->requestptr)->caller_id[7]);
  2356. pq_p->retcode = -ETIMEOUT;
  2357. pq_p->status[0] |= STAT_FAILED;
  2358. /* get this off any caller queue it may be on */
  2359. unbuild_caller(LONG2DEVPTR(pq_p->devindex),
  2360. (struct caller *) pq_p->requestptr);
  2361. list_del_init(lptr);
  2362. pendingq_count--;
  2363. pq_p->audit[1] |= FP_TIMEDOUT;
  2364. pq_p->audit[1] |= FP_NOTPENDING;
  2365. pq_p->audit[1] |= FP_AWAKENING;
  2366. atomic_set(&pq_p->alarmrung, 1);
  2367. wake_up(&pq_p->waitq);
  2368. }
  2369. /**
  2370. * If pending count is zero, items left on the request queue may
  2371. * never be processed.
  2372. */
  2373. if (pendingq_count <= 0) {
  2374. list_for_each_safe(lptr, tptr, &request_list) {
  2375. pq_p = list_entry(lptr, struct work_element, liste);
  2376. if (pq_p->requestsent >= timelimit)
  2377. break;
  2378. PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
  2379. ((struct caller *)pq_p->requestptr)->caller_id[0],
  2380. ((struct caller *)pq_p->requestptr)->caller_id[1],
  2381. ((struct caller *)pq_p->requestptr)->caller_id[2],
  2382. ((struct caller *)pq_p->requestptr)->caller_id[3],
  2383. ((struct caller *)pq_p->requestptr)->caller_id[4],
  2384. ((struct caller *)pq_p->requestptr)->caller_id[5],
  2385. ((struct caller *)pq_p->requestptr)->caller_id[6],
  2386. ((struct caller *)pq_p->requestptr)->caller_id[7]);
  2387. pq_p->retcode = -ETIMEOUT;
  2388. pq_p->status[0] |= STAT_FAILED;
  2389. list_del_init(lptr);
  2390. requestq_count--;
  2391. pq_p->audit[1] |= FP_TIMEDOUT;
  2392. pq_p->audit[1] |= FP_REMREQUEST;
  2393. pq_p->audit[1] |= FP_AWAKENING;
  2394. atomic_set(&pq_p->alarmrung, 1);
  2395. wake_up(&pq_p->waitq);
  2396. }
  2397. }
  2398. }
  2399. static void
  2400. z90crypt_cleanup_task(unsigned long ptr)
  2401. {
  2402. PDEBUG("jiffies %ld\n", jiffies);
  2403. spin_lock_irq(&queuespinlock);
  2404. if (z90crypt.mask.st_count <= 0) // no devices!
  2405. helper_drain_queues();
  2406. else
  2407. helper_timeout_requests();
  2408. spin_unlock_irq(&queuespinlock);
  2409. z90crypt_schedule_cleanup_task();
  2410. }
  2411. static void
  2412. z90crypt_schedule_reader_task(unsigned long ptr)
  2413. {
  2414. tasklet_schedule(&reader_tasklet);
  2415. }
  2416. /**
  2417. * Lowlevel Functions:
  2418. *
  2419. * create_z90crypt: creates and initializes basic data structures
  2420. * refresh_z90crypt: re-initializes basic data structures
  2421. * find_crypto_devices: returns a count and mask of hardware status
  2422. * create_crypto_device: builds the descriptor for a device
  2423. * destroy_crypto_device: unallocates the descriptor for a device
  2424. * destroy_z90crypt: drains all work, unallocates structs
  2425. */
  2426. /**
  2427. * build the z90crypt root structure using the given domain index
  2428. */
  2429. static int
  2430. create_z90crypt(int *cdx_p)
  2431. {
  2432. struct hdware_block *hdware_blk_p;
  2433. memset(&z90crypt, 0x00, sizeof(struct z90crypt));
  2434. z90crypt.domain_established = 0;
  2435. z90crypt.len = sizeof(struct z90crypt);
  2436. z90crypt.max_count = Z90CRYPT_NUM_DEVS;
  2437. z90crypt.cdx = *cdx_p;
  2438. hdware_blk_p = (struct hdware_block *)
  2439. kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
  2440. if (!hdware_blk_p) {
  2441. PDEBUG("kmalloc for hardware block failed\n");
  2442. return ENOMEM;
  2443. }
  2444. memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
  2445. z90crypt.hdware_info = hdware_blk_p;
  2446. return 0;
  2447. }
  2448. static inline int
  2449. helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
  2450. {
  2451. enum hdstat hd_stat;
  2452. int q_depth, dev_type;
  2453. int indx, chkdom, numdomains;
  2454. q_depth = dev_type = numdomains = 0;
  2455. for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
  2456. for (indx = 0; indx < z90crypt.max_count; indx++) {
  2457. hd_stat = HD_NOT_THERE;
  2458. numdomains = 0;
  2459. for (chkdom = 0; chkdom <= 15; chkdom++) {
  2460. hd_stat = query_online(indx, chkdom, MAX_RESET,
  2461. &q_depth, &dev_type);
  2462. if (hd_stat == HD_TSQ_EXCEPTION) {
  2463. z90crypt.terminating = 1;
  2464. PRINTKC("exception taken!\n");
  2465. break;
  2466. }
  2467. if (hd_stat == HD_ONLINE) {
  2468. cdx_array[numdomains++] = chkdom;
  2469. if (*cdx_p == chkdom) {
  2470. *correct_cdx_found = 1;
  2471. break;
  2472. }
  2473. }
  2474. }
  2475. if ((*correct_cdx_found == 1) || (numdomains != 0))
  2476. break;
  2477. if (z90crypt.terminating)
  2478. break;
  2479. }
  2480. return numdomains;
  2481. }
  2482. static inline int
  2483. probe_crypto_domain(int *cdx_p)
  2484. {
  2485. int cdx_array[16];
  2486. char cdx_array_text[53], temp[5];
  2487. int correct_cdx_found, numdomains;
  2488. correct_cdx_found = 0;
  2489. numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
  2490. if (z90crypt.terminating)
  2491. return TSQ_FATAL_ERROR;
  2492. if (correct_cdx_found)
  2493. return 0;
  2494. if (numdomains == 0) {
  2495. PRINTKW("Unable to find crypto domain: No devices found\n");
  2496. return Z90C_NO_DEVICES;
  2497. }
  2498. if (numdomains == 1) {
  2499. if (*cdx_p == -1) {
  2500. *cdx_p = cdx_array[0];
  2501. return 0;
  2502. }
  2503. PRINTKW("incorrect domain: specified = %d, found = %d\n",
  2504. *cdx_p, cdx_array[0]);
  2505. return Z90C_INCORRECT_DOMAIN;
  2506. }
  2507. numdomains--;
  2508. sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
  2509. while (numdomains) {
  2510. numdomains--;
  2511. sprintf(temp, ", %d", cdx_array[numdomains]);
  2512. strcat(cdx_array_text, temp);
  2513. }
  2514. PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
  2515. *cdx_p, cdx_array_text);
  2516. return Z90C_AMBIGUOUS_DOMAIN;
  2517. }
  2518. static int
  2519. refresh_z90crypt(int *cdx_p)
  2520. {
  2521. int i, j, indx, rv;
  2522. static struct status local_mask;
  2523. struct device *devPtr;
  2524. unsigned char oldStat, newStat;
  2525. int return_unchanged;
  2526. if (z90crypt.len != sizeof(z90crypt))
  2527. return ENOTINIT;
  2528. if (z90crypt.terminating)
  2529. return TSQ_FATAL_ERROR;
  2530. rv = 0;
  2531. if (!z90crypt.hdware_info->hdware_mask.st_count &&
  2532. !z90crypt.domain_established) {
  2533. rv = probe_crypto_domain(cdx_p);
  2534. if (z90crypt.terminating)
  2535. return TSQ_FATAL_ERROR;
  2536. if (rv == Z90C_NO_DEVICES)
  2537. return 0; // try later
  2538. if (rv)
  2539. return rv;
  2540. z90crypt.cdx = *cdx_p;
  2541. z90crypt.domain_established = 1;
  2542. }
  2543. rv = find_crypto_devices(&local_mask);
  2544. if (rv) {
  2545. PRINTK("find crypto devices returned %d\n", rv);
  2546. return rv;
  2547. }
  2548. if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
  2549. sizeof(struct status))) {
  2550. return_unchanged = 1;
  2551. for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
  2552. /**
  2553. * Check for disabled cards. If any device is marked
  2554. * disabled, destroy it.
  2555. */
  2556. for (j = 0;
  2557. j < z90crypt.hdware_info->type_mask[i].st_count;
  2558. j++) {
  2559. indx = z90crypt.hdware_info->type_x_addr[i].
  2560. device_index[j];
  2561. devPtr = z90crypt.device_p[indx];
  2562. if (devPtr && devPtr->disabled) {
  2563. local_mask.st_mask[indx] = HD_NOT_THERE;
  2564. return_unchanged = 0;
  2565. }
  2566. }
  2567. }
  2568. if (return_unchanged == 1)
  2569. return 0;
  2570. }
  2571. spin_lock_irq(&queuespinlock);
  2572. for (i = 0; i < z90crypt.max_count; i++) {
  2573. oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
  2574. newStat = local_mask.st_mask[i];
  2575. if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
  2576. destroy_crypto_device(i);
  2577. else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
  2578. rv = create_crypto_device(i);
  2579. if (rv >= REC_FATAL_ERROR)
  2580. return rv;
  2581. if (rv != 0) {
  2582. local_mask.st_mask[i] = HD_NOT_THERE;
  2583. local_mask.st_count--;
  2584. }
  2585. }
  2586. }
  2587. memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
  2588. sizeof(local_mask.st_mask));
  2589. z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
  2590. z90crypt.hdware_info->hdware_mask.disabled_count =
  2591. local_mask.disabled_count;
  2592. refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
  2593. for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
  2594. refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
  2595. &(z90crypt.hdware_info->type_x_addr[i]));
  2596. spin_unlock_irq(&queuespinlock);
  2597. return rv;
  2598. }
  2599. static int
  2600. find_crypto_devices(struct status *deviceMask)
  2601. {
  2602. int i, q_depth, dev_type;
  2603. enum hdstat hd_stat;
  2604. deviceMask->st_count = 0;
  2605. deviceMask->disabled_count = 0;
  2606. deviceMask->user_disabled_count = 0;
  2607. for (i = 0; i < z90crypt.max_count; i++) {
  2608. hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
  2609. &dev_type);
  2610. if (hd_stat == HD_TSQ_EXCEPTION) {
  2611. z90crypt.terminating = 1;
  2612. PRINTKC("Exception during probe for crypto devices\n");
  2613. return TSQ_FATAL_ERROR;
  2614. }
  2615. deviceMask->st_mask[i] = hd_stat;
  2616. if (hd_stat == HD_ONLINE) {
  2617. PDEBUG("Got an online crypto!: %d\n", i);
  2618. PDEBUG("Got a queue depth of %d\n", q_depth);
  2619. PDEBUG("Got a device type of %d\n", dev_type);
  2620. if (q_depth <= 0)
  2621. return TSQ_FATAL_ERROR;
  2622. deviceMask->st_count++;
  2623. z90crypt.q_depth_array[i] = q_depth;
  2624. z90crypt.dev_type_array[i] = dev_type;
  2625. }
  2626. }
  2627. return 0;
  2628. }
  2629. static int
  2630. refresh_index_array(struct status *status_str, struct device_x *index_array)
  2631. {
  2632. int i, count;
  2633. enum devstat stat;
  2634. i = -1;
  2635. count = 0;
  2636. do {
  2637. stat = status_str->st_mask[++i];
  2638. if (stat == DEV_ONLINE)
  2639. index_array->device_index[count++] = i;
  2640. } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
  2641. return count;
  2642. }
  2643. static int
  2644. create_crypto_device(int index)
  2645. {
  2646. int rv, devstat, total_size;
  2647. struct device *dev_ptr;
  2648. struct status *type_str_p;
  2649. int deviceType;
  2650. dev_ptr = z90crypt.device_p[index];
  2651. if (!dev_ptr) {
  2652. total_size = sizeof(struct device) +
  2653. z90crypt.q_depth_array[index] * sizeof(int);
  2654. dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
  2655. if (!dev_ptr) {
  2656. PRINTK("kmalloc device %d failed\n", index);
  2657. return ENOMEM;
  2658. }
  2659. memset(dev_ptr, 0, total_size);
  2660. dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
  2661. if (!dev_ptr->dev_resp_p) {
  2662. kfree(dev_ptr);
  2663. PRINTK("kmalloc device %d rec buffer failed\n", index);
  2664. return ENOMEM;
  2665. }
  2666. dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
  2667. INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
  2668. }
  2669. devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
  2670. if (devstat == DEV_RSQ_EXCEPTION) {
  2671. PRINTK("exception during reset device %d\n", index);
  2672. kfree(dev_ptr->dev_resp_p);
  2673. kfree(dev_ptr);
  2674. return RSQ_FATAL_ERROR;
  2675. }
  2676. if (devstat == DEV_ONLINE) {
  2677. dev_ptr->dev_self_x = index;
  2678. dev_ptr->dev_type = z90crypt.dev_type_array[index];
  2679. if (dev_ptr->dev_type == NILDEV) {
  2680. rv = probe_device_type(dev_ptr);
  2681. if (rv) {
  2682. PRINTK("rv = %d from probe_device_type %d\n",
  2683. rv, index);
  2684. kfree(dev_ptr->dev_resp_p);
  2685. kfree(dev_ptr);
  2686. return rv;
  2687. }
  2688. }
  2689. if (dev_ptr->dev_type == PCIXCC_UNK) {
  2690. rv = probe_PCIXCC_type(dev_ptr);
  2691. if (rv) {
  2692. PRINTK("rv = %d from probe_PCIXCC_type %d\n",
  2693. rv, index);
  2694. kfree(dev_ptr->dev_resp_p);
  2695. kfree(dev_ptr);
  2696. return rv;
  2697. }
  2698. }
  2699. deviceType = dev_ptr->dev_type;
  2700. z90crypt.dev_type_array[index] = deviceType;
  2701. if (deviceType == PCICA)
  2702. z90crypt.hdware_info->device_type_array[index] = 1;
  2703. else if (deviceType == PCICC)
  2704. z90crypt.hdware_info->device_type_array[index] = 2;
  2705. else if (deviceType == PCIXCC_MCL2)
  2706. z90crypt.hdware_info->device_type_array[index] = 3;
  2707. else if (deviceType == PCIXCC_MCL3)
  2708. z90crypt.hdware_info->device_type_array[index] = 4;
  2709. else if (deviceType == CEX2C)
  2710. z90crypt.hdware_info->device_type_array[index] = 5;
  2711. else
  2712. z90crypt.hdware_info->device_type_array[index] = -1;
  2713. }
  2714. /**
  2715. * 'q_depth' returned by the hardware is one less than
  2716. * the actual depth
  2717. */
  2718. dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
  2719. dev_ptr->dev_type = z90crypt.dev_type_array[index];
  2720. dev_ptr->dev_stat = devstat;
  2721. dev_ptr->disabled = 0;
  2722. z90crypt.device_p[index] = dev_ptr;
  2723. if (devstat == DEV_ONLINE) {
  2724. if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
  2725. z90crypt.mask.st_mask[index] = DEV_ONLINE;
  2726. z90crypt.mask.st_count++;
  2727. }
  2728. deviceType = dev_ptr->dev_type;
  2729. type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
  2730. if (type_str_p->st_mask[index] != DEV_ONLINE) {
  2731. type_str_p->st_mask[index] = DEV_ONLINE;
  2732. type_str_p->st_count++;
  2733. }
  2734. }
  2735. return 0;
  2736. }
  2737. static int
  2738. destroy_crypto_device(int index)
  2739. {
  2740. struct device *dev_ptr;
  2741. int t, disabledFlag;
  2742. dev_ptr = z90crypt.device_p[index];
  2743. /* remember device type; get rid of device struct */
  2744. if (dev_ptr) {
  2745. disabledFlag = dev_ptr->disabled;
  2746. t = dev_ptr->dev_type;
  2747. if (dev_ptr->dev_resp_p)
  2748. kfree(dev_ptr->dev_resp_p);
  2749. kfree(dev_ptr);
  2750. } else {
  2751. disabledFlag = 0;
  2752. t = -1;
  2753. }
  2754. z90crypt.device_p[index] = 0;
  2755. /* if the type is valid, remove the device from the type_mask */
  2756. if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
  2757. z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
  2758. z90crypt.hdware_info->type_mask[t].st_count--;
  2759. if (disabledFlag == 1)
  2760. z90crypt.hdware_info->type_mask[t].disabled_count--;
  2761. }
  2762. if (z90crypt.mask.st_mask[index] != DEV_GONE) {
  2763. z90crypt.mask.st_mask[index] = DEV_GONE;
  2764. z90crypt.mask.st_count--;
  2765. }
  2766. z90crypt.hdware_info->device_type_array[index] = 0;
  2767. return 0;
  2768. }
  2769. static void
  2770. destroy_z90crypt(void)
  2771. {
  2772. int i;
  2773. for (i = 0; i < z90crypt.max_count; i++)
  2774. if (z90crypt.device_p[i])
  2775. destroy_crypto_device(i);
  2776. if (z90crypt.hdware_info)
  2777. kfree((void *)z90crypt.hdware_info);
  2778. memset((void *)&z90crypt, 0, sizeof(z90crypt));
  2779. }
  2780. static unsigned char static_testmsg[384] = {
  2781. 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
  2782. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
  2783. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
  2784. 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
  2785. 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2786. 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2787. 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
  2788. 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2789. 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2790. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2791. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2792. 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2793. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
  2794. 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
  2795. 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
  2796. 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
  2797. 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
  2798. 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
  2799. 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
  2800. 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
  2801. 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
  2802. 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
  2803. 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
  2804. 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
  2805. };
  2806. static int
  2807. probe_device_type(struct device *devPtr)
  2808. {
  2809. int rv, dv, i, index, length;
  2810. unsigned char psmid[8];
  2811. static unsigned char loc_testmsg[sizeof(static_testmsg)];
  2812. index = devPtr->dev_self_x;
  2813. rv = 0;
  2814. do {
  2815. memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
  2816. length = sizeof(static_testmsg) - 24;
  2817. /* the -24 allows for the header */
  2818. dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
  2819. if (dv) {
  2820. PDEBUG("dv returned by send during probe: %d\n", dv);
  2821. if (dv == DEV_SEN_EXCEPTION) {
  2822. rv = SEN_FATAL_ERROR;
  2823. PRINTKC("exception in send to AP %d\n", index);
  2824. break;
  2825. }
  2826. PDEBUG("return value from send_to_AP: %d\n", rv);
  2827. switch (dv) {
  2828. case DEV_GONE:
  2829. PDEBUG("dev %d not available\n", index);
  2830. rv = SEN_NOT_AVAIL;
  2831. break;
  2832. case DEV_ONLINE:
  2833. rv = 0;
  2834. break;
  2835. case DEV_EMPTY:
  2836. rv = SEN_NOT_AVAIL;
  2837. break;
  2838. case DEV_NO_WORK:
  2839. rv = SEN_FATAL_ERROR;
  2840. break;
  2841. case DEV_BAD_MESSAGE:
  2842. rv = SEN_USER_ERROR;
  2843. break;
  2844. case DEV_QUEUE_FULL:
  2845. rv = SEN_QUEUE_FULL;
  2846. break;
  2847. default:
  2848. PRINTK("unknown dv=%d for dev %d\n", dv, index);
  2849. rv = SEN_NOT_AVAIL;
  2850. break;
  2851. }
  2852. }
  2853. if (rv)
  2854. break;
  2855. for (i = 0; i < 6; i++) {
  2856. mdelay(300);
  2857. dv = receive_from_AP(index, z90crypt.cdx,
  2858. devPtr->dev_resp_l,
  2859. devPtr->dev_resp_p, psmid);
  2860. PDEBUG("dv returned by DQ = %d\n", dv);
  2861. if (dv == DEV_REC_EXCEPTION) {
  2862. rv = REC_FATAL_ERROR;
  2863. PRINTKC("exception in dequeue %d\n",
  2864. index);
  2865. break;
  2866. }
  2867. switch (dv) {
  2868. case DEV_ONLINE:
  2869. rv = 0;
  2870. break;
  2871. case DEV_EMPTY:
  2872. rv = REC_EMPTY;
  2873. break;
  2874. case DEV_NO_WORK:
  2875. rv = REC_NO_WORK;
  2876. break;
  2877. case DEV_BAD_MESSAGE:
  2878. case DEV_GONE:
  2879. default:
  2880. rv = REC_NO_RESPONSE;
  2881. break;
  2882. }
  2883. if ((rv != 0) && (rv != REC_NO_WORK))
  2884. break;
  2885. if (rv == 0)
  2886. break;
  2887. }
  2888. if (rv)
  2889. break;
  2890. rv = (devPtr->dev_resp_p[0] == 0x00) &&
  2891. (devPtr->dev_resp_p[1] == 0x86);
  2892. if (rv)
  2893. devPtr->dev_type = PCICC;
  2894. else
  2895. devPtr->dev_type = PCICA;
  2896. rv = 0;
  2897. } while (0);
  2898. /* In a general error case, the card is not marked online */
  2899. return rv;
  2900. }
  2901. static unsigned char MCL3_testmsg[] = {
  2902. 0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
  2903. 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2904. 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2905. 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2906. 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
  2907. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
  2908. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
  2909. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
  2910. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2911. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2912. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2913. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2914. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2915. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2916. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2917. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2918. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2919. 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2920. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2921. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2922. 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
  2923. 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
  2924. 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
  2925. 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
  2926. 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
  2927. 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
  2928. 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
  2929. 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
  2930. 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
  2931. 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
  2932. 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
  2933. 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
  2934. 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
  2935. 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
  2936. 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
  2937. };
  2938. static int
  2939. probe_PCIXCC_type(struct device *devPtr)
  2940. {
  2941. int rv, dv, i, index, length;
  2942. unsigned char psmid[8];
  2943. static unsigned char loc_testmsg[548];
  2944. struct CPRBX *cprbx_p;
  2945. index = devPtr->dev_self_x;
  2946. rv = 0;
  2947. do {
  2948. memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
  2949. length = sizeof(MCL3_testmsg) - 0x0C;
  2950. dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
  2951. if (dv) {
  2952. PDEBUG("dv returned = %d\n", dv);
  2953. if (dv == DEV_SEN_EXCEPTION) {
  2954. rv = SEN_FATAL_ERROR;
  2955. PRINTKC("exception in send to AP %d\n", index);
  2956. break;
  2957. }
  2958. PDEBUG("return value from send_to_AP: %d\n", rv);
  2959. switch (dv) {
  2960. case DEV_GONE:
  2961. PDEBUG("dev %d not available\n", index);
  2962. rv = SEN_NOT_AVAIL;
  2963. break;
  2964. case DEV_ONLINE:
  2965. rv = 0;
  2966. break;
  2967. case DEV_EMPTY:
  2968. rv = SEN_NOT_AVAIL;
  2969. break;
  2970. case DEV_NO_WORK:
  2971. rv = SEN_FATAL_ERROR;
  2972. break;
  2973. case DEV_BAD_MESSAGE:
  2974. rv = SEN_USER_ERROR;
  2975. break;
  2976. case DEV_QUEUE_FULL:
  2977. rv = SEN_QUEUE_FULL;
  2978. break;
  2979. default:
  2980. PRINTK("unknown dv=%d for dev %d\n", dv, index);
  2981. rv = SEN_NOT_AVAIL;
  2982. break;
  2983. }
  2984. }
  2985. if (rv)
  2986. break;
  2987. for (i = 0; i < 6; i++) {
  2988. mdelay(300);
  2989. dv = receive_from_AP(index, z90crypt.cdx,
  2990. devPtr->dev_resp_l,
  2991. devPtr->dev_resp_p, psmid);
  2992. PDEBUG("dv returned by DQ = %d\n", dv);
  2993. if (dv == DEV_REC_EXCEPTION) {
  2994. rv = REC_FATAL_ERROR;
  2995. PRINTKC("exception in dequeue %d\n",
  2996. index);
  2997. break;
  2998. }
  2999. switch (dv) {
  3000. case DEV_ONLINE:
  3001. rv = 0;
  3002. break;
  3003. case DEV_EMPTY:
  3004. rv = REC_EMPTY;
  3005. break;
  3006. case DEV_NO_WORK:
  3007. rv = REC_NO_WORK;
  3008. break;
  3009. case DEV_BAD_MESSAGE:
  3010. case DEV_GONE:
  3011. default:
  3012. rv = REC_NO_RESPONSE;
  3013. break;
  3014. }
  3015. if ((rv != 0) && (rv != REC_NO_WORK))
  3016. break;
  3017. if (rv == 0)
  3018. break;
  3019. }
  3020. if (rv)
  3021. break;
  3022. cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
  3023. if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
  3024. devPtr->dev_type = PCIXCC_MCL2;
  3025. PDEBUG("device %d is MCL2\n", index);
  3026. } else {
  3027. devPtr->dev_type = PCIXCC_MCL3;
  3028. PDEBUG("device %d is MCL3\n", index);
  3029. }
  3030. } while (0);
  3031. /* In a general error case, the card is not marked online */
  3032. return rv;
  3033. }
  3034. module_init(z90crypt_init_module);
  3035. module_exit(z90crypt_cleanup_module);