z90main.c 90 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393
  1. /*
  2. * linux/drivers/s390/crypto/z90main.c
  3. *
  4. * z90crypt 1.3.2
  5. *
  6. * Copyright (C) 2001, 2004 IBM Corporation
  7. * Author(s): Robert Burroughs (burrough@us.ibm.com)
  8. * Eric Rossman (edrossma@us.ibm.com)
  9. *
  10. * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. #include <asm/uaccess.h> // copy_(from|to)_user
  27. #include <linux/compat.h>
  28. #include <linux/compiler.h>
  29. #include <linux/delay.h> // mdelay
  30. #include <linux/init.h>
  31. #include <linux/interrupt.h> // for tasklets
  32. #include <linux/miscdevice.h>
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/proc_fs.h>
  36. #include <linux/syscalls.h>
  37. #include "z90crypt.h"
  38. #include "z90common.h"
  39. #define VERSION_Z90MAIN_C "$Revision: 1.62 $"
  40. static char z90main_version[] __initdata =
  41. "z90main.o (" VERSION_Z90MAIN_C "/"
  42. VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
  43. extern char z90hardware_version[];
  44. /**
  45. * Defaults that may be modified.
  46. */
  47. /**
  48. * You can specify a different minor at compile time.
  49. */
  50. #ifndef Z90CRYPT_MINOR
  51. #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
  52. #endif
  53. /**
  54. * You can specify a different domain at compile time or on the insmod
  55. * command line.
  56. */
  57. #ifndef DOMAIN_INDEX
  58. #define DOMAIN_INDEX -1
  59. #endif
  60. /**
  61. * This is the name under which the device is registered in /proc/modules.
  62. */
  63. #define REG_NAME "z90crypt"
  64. /**
  65. * Cleanup should run every CLEANUPTIME seconds and should clean up requests
  66. * older than CLEANUPTIME seconds in the past.
  67. */
  68. #ifndef CLEANUPTIME
  69. #define CLEANUPTIME 15
  70. #endif
  71. /**
  72. * Config should run every CONFIGTIME seconds
  73. */
  74. #ifndef CONFIGTIME
  75. #define CONFIGTIME 30
  76. #endif
  77. /**
  78. * The first execution of the config task should take place
  79. * immediately after initialization
  80. */
  81. #ifndef INITIAL_CONFIGTIME
  82. #define INITIAL_CONFIGTIME 1
  83. #endif
  84. /**
  85. * Reader should run every READERTIME milliseconds
  86. * With the 100Hz patch for s390, z90crypt can lock the system solid while
  87. * under heavy load. We'll try to avoid that.
  88. */
  89. #ifndef READERTIME
  90. #if HZ > 1000
  91. #define READERTIME 2
  92. #else
  93. #define READERTIME 10
  94. #endif
  95. #endif
  96. /**
  97. * turn long device array index into device pointer
  98. */
  99. #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
  100. /**
  101. * turn short device array index into long device array index
  102. */
  103. #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
  104. /**
  105. * turn short device array index into device pointer
  106. */
  107. #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
  108. /**
  109. * Status for a work-element
  110. */
  111. #define STAT_DEFAULT 0x00 // request has not been processed
  112. #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
  113. // else, device is determined each write
  114. #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
  115. // before being sent to the hardware.
  116. #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
  117. // 0x20 // UNUSED state
  118. #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
  119. #define STAT_NOWORK 0x00 // bits off: no work on any queue
  120. #define STAT_RDWRMASK 0x30 // mask for bits 5-4
  121. /**
  122. * Macros to check the status RDWRMASK
  123. */
  124. #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
  125. #define SET_RDWRMASK(statbyte, newval) \
  126. {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
  127. /**
  128. * Audit Trail. Progress of a Work element
  129. * audit[0]: Unless noted otherwise, these bits are all set by the process
  130. */
  131. #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
  132. #define FP_BUFFREQ 0x40 // Low Level buffer requested
  133. #define FP_BUFFGOT 0x20 // Low Level buffer obtained
  134. #define FP_SENT 0x10 // Work element sent to a crypto device
  135. // (may be set by process or by reader task)
  136. #define FP_PENDING 0x08 // Work element placed on pending queue
  137. // (may be set by process or by reader task)
  138. #define FP_REQUEST 0x04 // Work element placed on request queue
  139. #define FP_ASLEEP 0x02 // Work element about to sleep
  140. #define FP_AWAKE 0x01 // Work element has been awakened
  141. /**
  142. * audit[1]: These bits are set by the reader task and/or the cleanup task
  143. */
  144. #define FP_NOTPENDING 0x80 // Work element removed from pending queue
  145. #define FP_AWAKENING 0x40 // Caller about to be awakened
  146. #define FP_TIMEDOUT 0x20 // Caller timed out
  147. #define FP_RESPSIZESET 0x10 // Response size copied to work element
  148. #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
  149. #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
  150. #define FP_REMREQUEST 0x02 // Work element removed from request queue
  151. #define FP_SIGNALED 0x01 // Work element was awakened by a signal
  152. /**
  153. * audit[2]: unused
  154. */
  155. /**
  156. * state of the file handle in private_data.status
  157. */
  158. #define STAT_OPEN 0
  159. #define STAT_CLOSED 1
  160. /**
  161. * PID() expands to the process ID of the current process
  162. */
  163. #define PID() (current->pid)
  164. /**
  165. * Selected Constants. The number of APs and the number of devices
  166. */
  167. #ifndef Z90CRYPT_NUM_APS
  168. #define Z90CRYPT_NUM_APS 64
  169. #endif
  170. #ifndef Z90CRYPT_NUM_DEVS
  171. #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
  172. #endif
  173. /**
  174. * Buffer size for receiving responses. The maximum Response Size
  175. * is actually the maximum request size, since in an error condition
  176. * the request itself may be returned unchanged.
  177. */
  178. #define MAX_RESPONSE_SIZE 0x0000077C
  179. /**
  180. * A count and status-byte mask
  181. */
  182. struct status {
  183. int st_count; // # of enabled devices
  184. int disabled_count; // # of disabled devices
  185. int user_disabled_count; // # of devices disabled via proc fs
  186. unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
  187. };
  188. /**
  189. * The array of device indexes is a mechanism for fast indexing into
  190. * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
  191. * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
  192. * z90CDeviceIndex[2] is 47.
  193. */
  194. struct device_x {
  195. int device_index[Z90CRYPT_NUM_DEVS];
  196. };
  197. /**
  198. * All devices are arranged in a single array: 64 APs
  199. */
  200. struct device {
  201. int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
  202. // PCIXCC_MCL3, CEX2C, CEX2A
  203. enum devstat dev_stat; // current device status
  204. int dev_self_x; // Index in array
  205. int disabled; // Set when device is in error
  206. int user_disabled; // Set when device is disabled by user
  207. int dev_q_depth; // q depth
  208. unsigned char * dev_resp_p; // Response buffer address
  209. int dev_resp_l; // Response Buffer length
  210. int dev_caller_count; // Number of callers
  211. int dev_total_req_cnt; // # requests for device since load
  212. struct list_head dev_caller_list; // List of callers
  213. };
  214. /**
  215. * There's a struct status and a struct device_x for each device type.
  216. */
  217. struct hdware_block {
  218. struct status hdware_mask;
  219. struct status type_mask[Z90CRYPT_NUM_TYPES];
  220. struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
  221. unsigned char device_type_array[Z90CRYPT_NUM_APS];
  222. };
  223. /**
  224. * z90crypt is the topmost data structure in the hierarchy.
  225. */
  226. struct z90crypt {
  227. int max_count; // Nr of possible crypto devices
  228. struct status mask;
  229. int q_depth_array[Z90CRYPT_NUM_DEVS];
  230. int dev_type_array[Z90CRYPT_NUM_DEVS];
  231. struct device_x overall_device_x; // array device indexes
  232. struct device * device_p[Z90CRYPT_NUM_DEVS];
  233. int terminating;
  234. int domain_established;// TRUE: domain has been found
  235. int cdx; // Crypto Domain Index
  236. int len; // Length of this data structure
  237. struct hdware_block *hdware_info;
  238. };
  239. /**
  240. * An array of these structures is pointed to from dev_caller
  241. * The length of the array depends on the device type. For APs,
  242. * there are 8.
  243. *
  244. * The caller buffer is allocated to the user at OPEN. At WRITE,
  245. * it contains the request; at READ, the response. The function
  246. * send_to_crypto_device converts the request to device-dependent
  247. * form and use the caller's OPEN-allocated buffer for the response.
  248. *
  249. * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
  250. * because that points to it, see the discussion in z90hardware.c.
  251. * Search for "extended request message block".
  252. */
  253. struct caller {
  254. int caller_buf_l; // length of original request
  255. unsigned char * caller_buf_p; // Original request on WRITE
  256. int caller_dev_dep_req_l; // len device dependent request
  257. unsigned char * caller_dev_dep_req_p; // Device dependent form
  258. unsigned char caller_id[8]; // caller-supplied message id
  259. struct list_head caller_liste;
  260. unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
  261. };
  262. /**
  263. * Function prototypes from z90hardware.c
  264. */
  265. enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
  266. int *dev_type);
  267. enum devstat reset_device(int deviceNr, int cdx, int resetNr);
  268. enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
  269. enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
  270. unsigned char *resp, unsigned char *psmid);
  271. int convert_request(unsigned char *buffer, int func, unsigned short function,
  272. int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
  273. int convert_response(unsigned char *response, unsigned char *buffer,
  274. int *respbufflen_p, unsigned char *resp_buff);
  275. /**
  276. * Low level function prototypes
  277. */
  278. static int create_z90crypt(int *cdx_p);
  279. static int refresh_z90crypt(int *cdx_p);
  280. static int find_crypto_devices(struct status *deviceMask);
  281. static int create_crypto_device(int index);
  282. static int destroy_crypto_device(int index);
  283. static void destroy_z90crypt(void);
  284. static int refresh_index_array(struct status *status_str,
  285. struct device_x *index_array);
  286. static int probe_device_type(struct device *devPtr);
  287. static int probe_PCIXCC_type(struct device *devPtr);
  288. /**
  289. * proc fs definitions
  290. */
  291. static struct proc_dir_entry *z90crypt_entry;
  292. /**
  293. * data structures
  294. */
  295. /**
  296. * work_element.opener points back to this structure
  297. */
  298. struct priv_data {
  299. pid_t opener_pid;
  300. unsigned char status; // 0: open 1: closed
  301. };
  302. /**
  303. * A work element is allocated for each request
  304. */
  305. struct work_element {
  306. struct priv_data *priv_data;
  307. pid_t pid;
  308. int devindex; // index of device processing this w_e
  309. // (If request did not specify device,
  310. // -1 until placed onto a queue)
  311. int devtype;
  312. struct list_head liste; // used for requestq and pendingq
  313. char buffer[128]; // local copy of user request
  314. int buff_size; // size of the buffer for the request
  315. char resp_buff[RESPBUFFSIZE];
  316. int resp_buff_size;
  317. char __user * resp_addr; // address of response in user space
  318. unsigned int funccode; // function code of request
  319. wait_queue_head_t waitq;
  320. unsigned long requestsent; // time at which the request was sent
  321. atomic_t alarmrung; // wake-up signal
  322. unsigned char caller_id[8]; // pid + counter, for this w_e
  323. unsigned char status[1]; // bits to mark status of the request
  324. unsigned char audit[3]; // record of work element's progress
  325. unsigned char * requestptr; // address of request buffer
  326. int retcode; // return code of request
  327. };
  328. /**
  329. * High level function prototypes
  330. */
  331. static int z90crypt_open(struct inode *, struct file *);
  332. static int z90crypt_release(struct inode *, struct file *);
  333. static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
  334. static ssize_t z90crypt_write(struct file *, const char __user *,
  335. size_t, loff_t *);
  336. static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
  337. static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
  338. static void z90crypt_reader_task(unsigned long);
  339. static void z90crypt_schedule_reader_task(unsigned long);
  340. static void z90crypt_config_task(unsigned long);
  341. static void z90crypt_cleanup_task(unsigned long);
  342. static int z90crypt_status(char *, char **, off_t, int, int *, void *);
  343. static int z90crypt_status_write(struct file *, const char __user *,
  344. unsigned long, void *);
  345. /**
  346. * Storage allocated at initialization and used throughout the life of
  347. * this insmod
  348. */
  349. static int domain = DOMAIN_INDEX;
  350. static struct z90crypt z90crypt;
  351. static int quiesce_z90crypt;
  352. static spinlock_t queuespinlock;
  353. static struct list_head request_list;
  354. static int requestq_count;
  355. static struct list_head pending_list;
  356. static int pendingq_count;
  357. static struct tasklet_struct reader_tasklet;
  358. static struct timer_list reader_timer;
  359. static struct timer_list config_timer;
  360. static struct timer_list cleanup_timer;
  361. static atomic_t total_open;
  362. static atomic_t z90crypt_step;
  363. static struct file_operations z90crypt_fops = {
  364. .owner = THIS_MODULE,
  365. .read = z90crypt_read,
  366. .write = z90crypt_write,
  367. .unlocked_ioctl = z90crypt_unlocked_ioctl,
  368. #ifdef CONFIG_COMPAT
  369. .compat_ioctl = z90crypt_compat_ioctl,
  370. #endif
  371. .open = z90crypt_open,
  372. .release = z90crypt_release
  373. };
  374. static struct miscdevice z90crypt_misc_device = {
  375. .minor = Z90CRYPT_MINOR,
  376. .name = DEV_NAME,
  377. .fops = &z90crypt_fops,
  378. .devfs_name = DEV_NAME
  379. };
  380. /**
  381. * Documentation values.
  382. */
  383. MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
  384. "and Jochen Roehrig");
  385. MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
  386. "Copyright 2001, 2005 IBM Corporation");
  387. MODULE_LICENSE("GPL");
  388. module_param(domain, int, 0);
  389. MODULE_PARM_DESC(domain, "domain index for device");
  390. #ifdef CONFIG_COMPAT
  391. /**
  392. * ioctl32 conversion routines
  393. */
  394. struct ica_rsa_modexpo_32 { // For 32-bit callers
  395. compat_uptr_t inputdata;
  396. unsigned int inputdatalength;
  397. compat_uptr_t outputdata;
  398. unsigned int outputdatalength;
  399. compat_uptr_t b_key;
  400. compat_uptr_t n_modulus;
  401. };
  402. static long
  403. trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
  404. {
  405. struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
  406. struct ica_rsa_modexpo_32 mex32k;
  407. struct ica_rsa_modexpo __user *mex64;
  408. long ret = 0;
  409. unsigned int i;
  410. if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
  411. return -EFAULT;
  412. mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
  413. if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
  414. return -EFAULT;
  415. if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
  416. return -EFAULT;
  417. if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
  418. __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
  419. __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
  420. __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
  421. __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
  422. __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
  423. return -EFAULT;
  424. ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
  425. if (!ret)
  426. if (__get_user(i, &mex64->outputdatalength) ||
  427. __put_user(i, &mex32u->outputdatalength))
  428. ret = -EFAULT;
  429. return ret;
  430. }
  431. struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
  432. compat_uptr_t inputdata;
  433. unsigned int inputdatalength;
  434. compat_uptr_t outputdata;
  435. unsigned int outputdatalength;
  436. compat_uptr_t bp_key;
  437. compat_uptr_t bq_key;
  438. compat_uptr_t np_prime;
  439. compat_uptr_t nq_prime;
  440. compat_uptr_t u_mult_inv;
  441. };
  442. static long
  443. trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
  444. {
  445. struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
  446. struct ica_rsa_modexpo_crt_32 crt32k;
  447. struct ica_rsa_modexpo_crt __user *crt64;
  448. long ret = 0;
  449. unsigned int i;
  450. if (!access_ok(VERIFY_WRITE, crt32u,
  451. sizeof(struct ica_rsa_modexpo_crt_32)))
  452. return -EFAULT;
  453. crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
  454. if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
  455. return -EFAULT;
  456. if (copy_from_user(&crt32k, crt32u,
  457. sizeof(struct ica_rsa_modexpo_crt_32)))
  458. return -EFAULT;
  459. if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
  460. __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
  461. __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
  462. __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
  463. __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
  464. __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
  465. __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
  466. __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
  467. __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
  468. return -EFAULT;
  469. ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
  470. if (!ret)
  471. if (__get_user(i, &crt64->outputdatalength) ||
  472. __put_user(i, &crt32u->outputdatalength))
  473. ret = -EFAULT;
  474. return ret;
  475. }
  476. static long
  477. z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  478. {
  479. switch (cmd) {
  480. case ICAZ90STATUS:
  481. case Z90QUIESCE:
  482. case Z90STAT_TOTALCOUNT:
  483. case Z90STAT_PCICACOUNT:
  484. case Z90STAT_PCICCCOUNT:
  485. case Z90STAT_PCIXCCCOUNT:
  486. case Z90STAT_PCIXCCMCL2COUNT:
  487. case Z90STAT_PCIXCCMCL3COUNT:
  488. case Z90STAT_CEX2CCOUNT:
  489. case Z90STAT_REQUESTQ_COUNT:
  490. case Z90STAT_PENDINGQ_COUNT:
  491. case Z90STAT_TOTALOPEN_COUNT:
  492. case Z90STAT_DOMAIN_INDEX:
  493. case Z90STAT_STATUS_MASK:
  494. case Z90STAT_QDEPTH_MASK:
  495. case Z90STAT_PERDEV_REQCNT:
  496. return z90crypt_unlocked_ioctl(filp, cmd, arg);
  497. case ICARSAMODEXPO:
  498. return trans_modexpo32(filp, cmd, arg);
  499. case ICARSACRT:
  500. return trans_modexpo_crt32(filp, cmd, arg);
  501. default:
  502. return -ENOIOCTLCMD;
  503. }
  504. }
  505. #endif
  506. /**
  507. * The module initialization code.
  508. */
  509. static int __init
  510. z90crypt_init_module(void)
  511. {
  512. int result, nresult;
  513. struct proc_dir_entry *entry;
  514. PDEBUG("PID %d\n", PID());
  515. if ((domain < -1) || (domain > 15)) {
  516. PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
  517. return -EINVAL;
  518. }
  519. /* Register as misc device with given minor (or get a dynamic one). */
  520. result = misc_register(&z90crypt_misc_device);
  521. if (result < 0) {
  522. PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
  523. z90crypt_misc_device.minor, result);
  524. return result;
  525. }
  526. PDEBUG("Registered " DEV_NAME " with result %d\n", result);
  527. result = create_z90crypt(&domain);
  528. if (result != 0) {
  529. PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
  530. domain, result);
  531. result = -ENOMEM;
  532. goto init_module_cleanup;
  533. }
  534. if (result == 0) {
  535. PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
  536. z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
  537. __DATE__, __TIME__);
  538. PRINTKN("%s\n", z90main_version);
  539. PRINTKN("%s\n", z90hardware_version);
  540. PDEBUG("create_z90crypt (domain index %d) successful.\n",
  541. domain);
  542. } else
  543. PRINTK("No devices at startup\n");
  544. /* Initialize globals. */
  545. spin_lock_init(&queuespinlock);
  546. INIT_LIST_HEAD(&pending_list);
  547. pendingq_count = 0;
  548. INIT_LIST_HEAD(&request_list);
  549. requestq_count = 0;
  550. quiesce_z90crypt = 0;
  551. atomic_set(&total_open, 0);
  552. atomic_set(&z90crypt_step, 0);
  553. /* Set up the cleanup task. */
  554. init_timer(&cleanup_timer);
  555. cleanup_timer.function = z90crypt_cleanup_task;
  556. cleanup_timer.data = 0;
  557. cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
  558. add_timer(&cleanup_timer);
  559. /* Set up the proc file system */
  560. entry = create_proc_entry("driver/z90crypt", 0644, 0);
  561. if (entry) {
  562. entry->nlink = 1;
  563. entry->data = 0;
  564. entry->read_proc = z90crypt_status;
  565. entry->write_proc = z90crypt_status_write;
  566. }
  567. else
  568. PRINTK("Couldn't create z90crypt proc entry\n");
  569. z90crypt_entry = entry;
  570. /* Set up the configuration task. */
  571. init_timer(&config_timer);
  572. config_timer.function = z90crypt_config_task;
  573. config_timer.data = 0;
  574. config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
  575. add_timer(&config_timer);
  576. /* Set up the reader task */
  577. tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
  578. init_timer(&reader_timer);
  579. reader_timer.function = z90crypt_schedule_reader_task;
  580. reader_timer.data = 0;
  581. reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
  582. add_timer(&reader_timer);
  583. return 0; // success
  584. init_module_cleanup:
  585. if ((nresult = misc_deregister(&z90crypt_misc_device)))
  586. PRINTK("misc_deregister failed with %d.\n", nresult);
  587. else
  588. PDEBUG("misc_deregister successful.\n");
  589. return result; // failure
  590. }
  591. /**
  592. * The module termination code
  593. */
  594. static void __exit
  595. z90crypt_cleanup_module(void)
  596. {
  597. int nresult;
  598. PDEBUG("PID %d\n", PID());
  599. remove_proc_entry("driver/z90crypt", 0);
  600. if ((nresult = misc_deregister(&z90crypt_misc_device)))
  601. PRINTK("misc_deregister failed with %d.\n", nresult);
  602. else
  603. PDEBUG("misc_deregister successful.\n");
  604. /* Remove the tasks */
  605. tasklet_kill(&reader_tasklet);
  606. del_timer(&reader_timer);
  607. del_timer(&config_timer);
  608. del_timer(&cleanup_timer);
  609. destroy_z90crypt();
  610. PRINTKN("Unloaded.\n");
  611. }
  612. /**
  613. * Functions running under a process id
  614. *
  615. * The I/O functions:
  616. * z90crypt_open
  617. * z90crypt_release
  618. * z90crypt_read
  619. * z90crypt_write
  620. * z90crypt_unlocked_ioctl
  621. * z90crypt_status
  622. * z90crypt_status_write
  623. * disable_card
  624. * enable_card
  625. *
  626. * Helper functions:
  627. * z90crypt_rsa
  628. * z90crypt_prepare
  629. * z90crypt_send
  630. * z90crypt_process_results
  631. *
  632. */
  633. static int
  634. z90crypt_open(struct inode *inode, struct file *filp)
  635. {
  636. struct priv_data *private_data_p;
  637. if (quiesce_z90crypt)
  638. return -EQUIESCE;
  639. private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
  640. if (!private_data_p) {
  641. PRINTK("Memory allocate failed\n");
  642. return -ENOMEM;
  643. }
  644. memset((void *)private_data_p, 0, sizeof(struct priv_data));
  645. private_data_p->status = STAT_OPEN;
  646. private_data_p->opener_pid = PID();
  647. filp->private_data = private_data_p;
  648. atomic_inc(&total_open);
  649. return 0;
  650. }
  651. static int
  652. z90crypt_release(struct inode *inode, struct file *filp)
  653. {
  654. struct priv_data *private_data_p = filp->private_data;
  655. PDEBUG("PID %d (filp %p)\n", PID(), filp);
  656. private_data_p->status = STAT_CLOSED;
  657. memset(private_data_p, 0, sizeof(struct priv_data));
  658. kfree(private_data_p);
  659. atomic_dec(&total_open);
  660. return 0;
  661. }
  662. /*
  663. * there are two read functions, of which compile options will choose one
  664. * without USE_GET_RANDOM_BYTES
  665. * => read() always returns -EPERM;
  666. * otherwise
  667. * => read() uses get_random_bytes() kernel function
  668. */
  669. #ifndef USE_GET_RANDOM_BYTES
  670. /**
  671. * z90crypt_read will not be supported beyond z90crypt 1.3.1
  672. */
  673. static ssize_t
  674. z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  675. {
  676. PDEBUG("filp %p (PID %d)\n", filp, PID());
  677. return -EPERM;
  678. }
  679. #else // we want to use get_random_bytes
  680. /**
  681. * read() just returns a string of random bytes. Since we have no way
  682. * to generate these cryptographically, we just execute get_random_bytes
  683. * for the length specified.
  684. */
  685. #include <linux/random.h>
  686. static ssize_t
  687. z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  688. {
  689. unsigned char *temp_buff;
  690. PDEBUG("filp %p (PID %d)\n", filp, PID());
  691. if (quiesce_z90crypt)
  692. return -EQUIESCE;
  693. if (count < 0) {
  694. PRINTK("Requested random byte count negative: %ld\n", count);
  695. return -EINVAL;
  696. }
  697. if (count > RESPBUFFSIZE) {
  698. PDEBUG("count[%d] > RESPBUFFSIZE", count);
  699. return -EINVAL;
  700. }
  701. if (count == 0)
  702. return 0;
  703. temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
  704. if (!temp_buff) {
  705. PRINTK("Memory allocate failed\n");
  706. return -ENOMEM;
  707. }
  708. get_random_bytes(temp_buff, count);
  709. if (copy_to_user(buf, temp_buff, count) != 0) {
  710. kfree(temp_buff);
  711. return -EFAULT;
  712. }
  713. kfree(temp_buff);
  714. return count;
  715. }
  716. #endif
  717. /**
  718. * Write is is not allowed
  719. */
  720. static ssize_t
  721. z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
  722. {
  723. PDEBUG("filp %p (PID %d)\n", filp, PID());
  724. return -EPERM;
  725. }
  726. /**
  727. * New status functions
  728. */
  729. static inline int
  730. get_status_totalcount(void)
  731. {
  732. return z90crypt.hdware_info->hdware_mask.st_count;
  733. }
  734. static inline int
  735. get_status_PCICAcount(void)
  736. {
  737. return z90crypt.hdware_info->type_mask[PCICA].st_count;
  738. }
  739. static inline int
  740. get_status_PCICCcount(void)
  741. {
  742. return z90crypt.hdware_info->type_mask[PCICC].st_count;
  743. }
  744. static inline int
  745. get_status_PCIXCCcount(void)
  746. {
  747. return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
  748. z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
  749. }
  750. static inline int
  751. get_status_PCIXCCMCL2count(void)
  752. {
  753. return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
  754. }
  755. static inline int
  756. get_status_PCIXCCMCL3count(void)
  757. {
  758. return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
  759. }
  760. static inline int
  761. get_status_CEX2Ccount(void)
  762. {
  763. return z90crypt.hdware_info->type_mask[CEX2C].st_count;
  764. }
  765. static inline int
  766. get_status_CEX2Acount(void)
  767. {
  768. return z90crypt.hdware_info->type_mask[CEX2A].st_count;
  769. }
  770. static inline int
  771. get_status_requestq_count(void)
  772. {
  773. return requestq_count;
  774. }
  775. static inline int
  776. get_status_pendingq_count(void)
  777. {
  778. return pendingq_count;
  779. }
  780. static inline int
  781. get_status_totalopen_count(void)
  782. {
  783. return atomic_read(&total_open);
  784. }
  785. static inline int
  786. get_status_domain_index(void)
  787. {
  788. return z90crypt.cdx;
  789. }
  790. static inline unsigned char *
  791. get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
  792. {
  793. int i, ix;
  794. memcpy(status, z90crypt.hdware_info->device_type_array,
  795. Z90CRYPT_NUM_APS);
  796. for (i = 0; i < get_status_totalcount(); i++) {
  797. ix = SHRT2LONG(i);
  798. if (LONG2DEVPTR(ix)->user_disabled)
  799. status[ix] = 0x0d;
  800. }
  801. return status;
  802. }
  803. static inline unsigned char *
  804. get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
  805. {
  806. int i, ix;
  807. memset(qdepth, 0, Z90CRYPT_NUM_APS);
  808. for (i = 0; i < get_status_totalcount(); i++) {
  809. ix = SHRT2LONG(i);
  810. qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
  811. }
  812. return qdepth;
  813. }
  814. static inline unsigned int *
  815. get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
  816. {
  817. int i, ix;
  818. memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
  819. for (i = 0; i < get_status_totalcount(); i++) {
  820. ix = SHRT2LONG(i);
  821. reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
  822. }
  823. return reqcnt;
  824. }
  825. static inline void
  826. init_work_element(struct work_element *we_p,
  827. struct priv_data *priv_data, pid_t pid)
  828. {
  829. int step;
  830. we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
  831. /* Come up with a unique id for this caller. */
  832. step = atomic_inc_return(&z90crypt_step);
  833. memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
  834. memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
  835. we_p->pid = pid;
  836. we_p->priv_data = priv_data;
  837. we_p->status[0] = STAT_DEFAULT;
  838. we_p->audit[0] = 0x00;
  839. we_p->audit[1] = 0x00;
  840. we_p->audit[2] = 0x00;
  841. we_p->resp_buff_size = 0;
  842. we_p->retcode = 0;
  843. we_p->devindex = -1;
  844. we_p->devtype = -1;
  845. atomic_set(&we_p->alarmrung, 0);
  846. init_waitqueue_head(&we_p->waitq);
  847. INIT_LIST_HEAD(&(we_p->liste));
  848. }
  849. static inline int
  850. allocate_work_element(struct work_element **we_pp,
  851. struct priv_data *priv_data_p, pid_t pid)
  852. {
  853. struct work_element *we_p;
  854. we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
  855. if (!we_p)
  856. return -ENOMEM;
  857. init_work_element(we_p, priv_data_p, pid);
  858. *we_pp = we_p;
  859. return 0;
  860. }
  861. static inline void
  862. remove_device(struct device *device_p)
  863. {
  864. if (!device_p || (device_p->disabled != 0))
  865. return;
  866. device_p->disabled = 1;
  867. z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
  868. z90crypt.hdware_info->hdware_mask.disabled_count++;
  869. }
  870. /**
  871. * Bitlength limits for each card
  872. *
  873. * There are new MCLs which allow more bitlengths. See the table for details.
  874. * The MCL must be applied and the newer bitlengths enabled for these to work.
  875. *
  876. * Card Type Old limit New limit
  877. * PCICA ??-2048 same (the lower limit is less than 128 bit...)
  878. * PCICC 512-1024 512-2048
  879. * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
  880. * PCIXCC_MCL3 ----- 128-2048
  881. * CEX2C 512-2048 128-2048
  882. *
  883. * ext_bitlens (extended bitlengths) is a global, since you should not apply an
  884. * MCL to just one card in a machine. We assume, at first, that all cards have
  885. * these capabilities.
  886. */
  887. int ext_bitlens = 1; // This is global
  888. #define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
  889. #define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
  890. #define PCICC_MIN_MOD_SIZE 64 // 512 bits
  891. #define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
  892. #define MAX_MOD_SIZE 256 // 2048 bits
  893. static inline int
  894. select_device_type(int *dev_type_p, int bytelength)
  895. {
  896. static int count = 0;
  897. int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
  898. index_to_use;
  899. struct status *stat;
  900. if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
  901. (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
  902. (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
  903. (*dev_type_p != ANYDEV))
  904. return -1;
  905. if (*dev_type_p != ANYDEV) {
  906. stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
  907. if (stat->st_count >
  908. (stat->disabled_count + stat->user_disabled_count))
  909. return 0;
  910. return -1;
  911. }
  912. /**
  913. * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
  914. * speed.
  915. *
  916. * PCICA and CEX2A do NOT co-exist, so it would be either one or the
  917. * other present.
  918. */
  919. stat = &z90crypt.hdware_info->type_mask[PCICA];
  920. PCICA_avail = stat->st_count -
  921. (stat->disabled_count + stat->user_disabled_count);
  922. stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
  923. PCIXCC_MCL3_avail = stat->st_count -
  924. (stat->disabled_count + stat->user_disabled_count);
  925. stat = &z90crypt.hdware_info->type_mask[CEX2C];
  926. CEX2C_avail = stat->st_count -
  927. (stat->disabled_count + stat->user_disabled_count);
  928. stat = &z90crypt.hdware_info->type_mask[CEX2A];
  929. CEX2A_avail = stat->st_count -
  930. (stat->disabled_count + stat->user_disabled_count);
  931. if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
  932. /**
  933. * bitlength is a factor, PCICA or CEX2A are the most capable,
  934. * even with the new MCL for PCIXCC.
  935. */
  936. if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
  937. (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
  938. if (PCICA_avail) {
  939. *dev_type_p = PCICA;
  940. return 0;
  941. }
  942. if (CEX2A_avail) {
  943. *dev_type_p = CEX2A;
  944. return 0;
  945. }
  946. return -1;
  947. }
  948. index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
  949. CEX2C_avail + CEX2A_avail);
  950. if (index_to_use < PCICA_avail)
  951. *dev_type_p = PCICA;
  952. else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
  953. *dev_type_p = PCIXCC_MCL3;
  954. else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
  955. CEX2C_avail))
  956. *dev_type_p = CEX2C;
  957. else
  958. *dev_type_p = CEX2A;
  959. count++;
  960. return 0;
  961. }
  962. /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
  963. if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
  964. return -1;
  965. stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
  966. if (stat->st_count >
  967. (stat->disabled_count + stat->user_disabled_count)) {
  968. *dev_type_p = PCIXCC_MCL2;
  969. return 0;
  970. }
  971. /**
  972. * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
  973. * (if we don't have the MCL applied and the newer bitlengths enabled)
  974. * cannot go to a PCICC
  975. */
  976. if ((bytelength < PCICC_MIN_MOD_SIZE) ||
  977. (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
  978. return -1;
  979. }
  980. stat = &z90crypt.hdware_info->type_mask[PCICC];
  981. if (stat->st_count >
  982. (stat->disabled_count + stat->user_disabled_count)) {
  983. *dev_type_p = PCICC;
  984. return 0;
  985. }
  986. return -1;
  987. }
  988. /**
  989. * Try the selected number, then the selected type (can be ANYDEV)
  990. */
  991. static inline int
  992. select_device(int *dev_type_p, int *device_nr_p, int bytelength)
  993. {
  994. int i, indx, devTp, low_count, low_indx;
  995. struct device_x *index_p;
  996. struct device *dev_ptr;
  997. PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
  998. if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
  999. PDEBUG("trying index = %d\n", *device_nr_p);
  1000. dev_ptr = z90crypt.device_p[*device_nr_p];
  1001. if (dev_ptr &&
  1002. (dev_ptr->dev_stat != DEV_GONE) &&
  1003. (dev_ptr->disabled == 0) &&
  1004. (dev_ptr->user_disabled == 0)) {
  1005. PDEBUG("selected by number, index = %d\n",
  1006. *device_nr_p);
  1007. *dev_type_p = dev_ptr->dev_type;
  1008. return *device_nr_p;
  1009. }
  1010. }
  1011. *device_nr_p = -1;
  1012. PDEBUG("trying type = %d\n", *dev_type_p);
  1013. devTp = *dev_type_p;
  1014. if (select_device_type(&devTp, bytelength) == -1) {
  1015. PDEBUG("failed to select by type\n");
  1016. return -1;
  1017. }
  1018. PDEBUG("selected type = %d\n", devTp);
  1019. index_p = &z90crypt.hdware_info->type_x_addr[devTp];
  1020. low_count = 0x0000FFFF;
  1021. low_indx = -1;
  1022. for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
  1023. indx = index_p->device_index[i];
  1024. dev_ptr = z90crypt.device_p[indx];
  1025. if (dev_ptr &&
  1026. (dev_ptr->dev_stat != DEV_GONE) &&
  1027. (dev_ptr->disabled == 0) &&
  1028. (dev_ptr->user_disabled == 0) &&
  1029. (devTp == dev_ptr->dev_type) &&
  1030. (low_count > dev_ptr->dev_caller_count)) {
  1031. low_count = dev_ptr->dev_caller_count;
  1032. low_indx = indx;
  1033. }
  1034. }
  1035. *device_nr_p = low_indx;
  1036. return low_indx;
  1037. }
  1038. static inline int
  1039. send_to_crypto_device(struct work_element *we_p)
  1040. {
  1041. struct caller *caller_p;
  1042. struct device *device_p;
  1043. int dev_nr;
  1044. int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
  1045. if (!we_p->requestptr)
  1046. return SEN_FATAL_ERROR;
  1047. caller_p = (struct caller *)we_p->requestptr;
  1048. dev_nr = we_p->devindex;
  1049. if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
  1050. if (z90crypt.hdware_info->hdware_mask.st_count != 0)
  1051. return SEN_RETRY;
  1052. else
  1053. return SEN_NOT_AVAIL;
  1054. }
  1055. we_p->devindex = dev_nr;
  1056. device_p = z90crypt.device_p[dev_nr];
  1057. if (!device_p)
  1058. return SEN_NOT_AVAIL;
  1059. if (device_p->dev_type != we_p->devtype)
  1060. return SEN_RETRY;
  1061. if (device_p->dev_caller_count >= device_p->dev_q_depth)
  1062. return SEN_QUEUE_FULL;
  1063. PDEBUG("device number prior to send: %d\n", dev_nr);
  1064. switch (send_to_AP(dev_nr, z90crypt.cdx,
  1065. caller_p->caller_dev_dep_req_l,
  1066. caller_p->caller_dev_dep_req_p)) {
  1067. case DEV_SEN_EXCEPTION:
  1068. PRINTKC("Exception during send to device %d\n", dev_nr);
  1069. z90crypt.terminating = 1;
  1070. return SEN_FATAL_ERROR;
  1071. case DEV_GONE:
  1072. PRINTK("Device %d not available\n", dev_nr);
  1073. remove_device(device_p);
  1074. return SEN_NOT_AVAIL;
  1075. case DEV_EMPTY:
  1076. return SEN_NOT_AVAIL;
  1077. case DEV_NO_WORK:
  1078. return SEN_FATAL_ERROR;
  1079. case DEV_BAD_MESSAGE:
  1080. return SEN_USER_ERROR;
  1081. case DEV_QUEUE_FULL:
  1082. return SEN_QUEUE_FULL;
  1083. default:
  1084. case DEV_ONLINE:
  1085. break;
  1086. }
  1087. list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
  1088. device_p->dev_caller_count++;
  1089. return 0;
  1090. }
  1091. /**
  1092. * Send puts the user's work on one of two queues:
  1093. * the pending queue if the send was successful
  1094. * the request queue if the send failed because device full or busy
  1095. */
  1096. static inline int
  1097. z90crypt_send(struct work_element *we_p, const char *buf)
  1098. {
  1099. int rv;
  1100. PDEBUG("PID %d\n", PID());
  1101. if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
  1102. PDEBUG("PID %d tried to send more work but has outstanding "
  1103. "work.\n", PID());
  1104. return -EWORKPEND;
  1105. }
  1106. we_p->devindex = -1; // Reset device number
  1107. spin_lock_irq(&queuespinlock);
  1108. rv = send_to_crypto_device(we_p);
  1109. switch (rv) {
  1110. case 0:
  1111. we_p->requestsent = jiffies;
  1112. we_p->audit[0] |= FP_SENT;
  1113. list_add_tail(&we_p->liste, &pending_list);
  1114. ++pendingq_count;
  1115. we_p->audit[0] |= FP_PENDING;
  1116. break;
  1117. case SEN_BUSY:
  1118. case SEN_QUEUE_FULL:
  1119. rv = 0;
  1120. we_p->devindex = -1; // any device will do
  1121. we_p->requestsent = jiffies;
  1122. list_add_tail(&we_p->liste, &request_list);
  1123. ++requestq_count;
  1124. we_p->audit[0] |= FP_REQUEST;
  1125. break;
  1126. case SEN_RETRY:
  1127. rv = -ERESTARTSYS;
  1128. break;
  1129. case SEN_NOT_AVAIL:
  1130. PRINTK("*** No devices available.\n");
  1131. rv = we_p->retcode = -ENODEV;
  1132. we_p->status[0] |= STAT_FAILED;
  1133. break;
  1134. case REC_OPERAND_INV:
  1135. case REC_OPERAND_SIZE:
  1136. case REC_EVEN_MOD:
  1137. case REC_INVALID_PAD:
  1138. rv = we_p->retcode = -EINVAL;
  1139. we_p->status[0] |= STAT_FAILED;
  1140. break;
  1141. default:
  1142. we_p->retcode = rv;
  1143. we_p->status[0] |= STAT_FAILED;
  1144. break;
  1145. }
  1146. if (rv != -ERESTARTSYS)
  1147. SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
  1148. spin_unlock_irq(&queuespinlock);
  1149. if (rv == 0)
  1150. tasklet_schedule(&reader_tasklet);
  1151. return rv;
  1152. }
  1153. /**
  1154. * process_results copies the user's work from kernel space.
  1155. */
  1156. static inline int
  1157. z90crypt_process_results(struct work_element *we_p, char __user *buf)
  1158. {
  1159. int rv;
  1160. PDEBUG("we_p %p (PID %d)\n", we_p, PID());
  1161. LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
  1162. SET_RDWRMASK(we_p->status[0], STAT_READPEND);
  1163. rv = 0;
  1164. if (!we_p->buffer) {
  1165. PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
  1166. we_p, PID());
  1167. rv = -ENOBUFF;
  1168. }
  1169. if (!rv)
  1170. if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
  1171. PDEBUG("copy_to_user failed: rv = %d\n", rv);
  1172. rv = -EFAULT;
  1173. }
  1174. if (!rv)
  1175. rv = we_p->retcode;
  1176. if (!rv)
  1177. if (we_p->resp_buff_size
  1178. && copy_to_user(we_p->resp_addr, we_p->resp_buff,
  1179. we_p->resp_buff_size))
  1180. rv = -EFAULT;
  1181. SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
  1182. return rv;
  1183. }
  1184. static unsigned char NULL_psmid[8] =
  1185. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  1186. /**
  1187. * Used in device configuration functions
  1188. */
  1189. #define MAX_RESET 90
  1190. /**
  1191. * This is used only for PCICC support
  1192. */
  1193. static inline int
  1194. is_PKCS11_padded(unsigned char *buffer, int length)
  1195. {
  1196. int i;
  1197. if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
  1198. return 0;
  1199. for (i = 2; i < length; i++)
  1200. if (buffer[i] != 0xFF)
  1201. break;
  1202. if ((i < 10) || (i == length))
  1203. return 0;
  1204. if (buffer[i] != 0x00)
  1205. return 0;
  1206. return 1;
  1207. }
  1208. /**
  1209. * This is used only for PCICC support
  1210. */
  1211. static inline int
  1212. is_PKCS12_padded(unsigned char *buffer, int length)
  1213. {
  1214. int i;
  1215. if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
  1216. return 0;
  1217. for (i = 2; i < length; i++)
  1218. if (buffer[i] == 0x00)
  1219. break;
  1220. if ((i < 10) || (i == length))
  1221. return 0;
  1222. if (buffer[i] != 0x00)
  1223. return 0;
  1224. return 1;
  1225. }
  1226. /**
  1227. * builds struct caller and converts message from generic format to
  1228. * device-dependent format
  1229. * func is ICARSAMODEXPO or ICARSACRT
  1230. * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
  1231. */
  1232. static inline int
  1233. build_caller(struct work_element *we_p, short function)
  1234. {
  1235. int rv;
  1236. struct caller *caller_p = (struct caller *)we_p->requestptr;
  1237. if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
  1238. (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
  1239. (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
  1240. return SEN_NOT_AVAIL;
  1241. memcpy(caller_p->caller_id, we_p->caller_id,
  1242. sizeof(caller_p->caller_id));
  1243. caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
  1244. caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
  1245. caller_p->caller_buf_p = we_p->buffer;
  1246. INIT_LIST_HEAD(&(caller_p->caller_liste));
  1247. rv = convert_request(we_p->buffer, we_p->funccode, function,
  1248. z90crypt.cdx, we_p->devtype,
  1249. &caller_p->caller_dev_dep_req_l,
  1250. caller_p->caller_dev_dep_req_p);
  1251. if (rv) {
  1252. if (rv == SEN_NOT_AVAIL)
  1253. PDEBUG("request can't be processed on hdwr avail\n");
  1254. else
  1255. PRINTK("Error from convert_request: %d\n", rv);
  1256. }
  1257. else
  1258. memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
  1259. return rv;
  1260. }
  1261. static inline void
  1262. unbuild_caller(struct device *device_p, struct caller *caller_p)
  1263. {
  1264. if (!caller_p)
  1265. return;
  1266. if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
  1267. if (!list_empty(&caller_p->caller_liste)) {
  1268. list_del_init(&caller_p->caller_liste);
  1269. device_p->dev_caller_count--;
  1270. }
  1271. memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
  1272. }
  1273. static inline int
  1274. get_crypto_request_buffer(struct work_element *we_p)
  1275. {
  1276. struct ica_rsa_modexpo *mex_p;
  1277. struct ica_rsa_modexpo_crt *crt_p;
  1278. unsigned char *temp_buffer;
  1279. short function;
  1280. int rv;
  1281. mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
  1282. crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
  1283. PDEBUG("device type input = %d\n", we_p->devtype);
  1284. if (z90crypt.terminating)
  1285. return REC_NO_RESPONSE;
  1286. if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
  1287. PRINTK("psmid zeroes\n");
  1288. return SEN_FATAL_ERROR;
  1289. }
  1290. if (!we_p->buffer) {
  1291. PRINTK("buffer pointer NULL\n");
  1292. return SEN_USER_ERROR;
  1293. }
  1294. if (!we_p->requestptr) {
  1295. PRINTK("caller pointer NULL\n");
  1296. return SEN_USER_ERROR;
  1297. }
  1298. if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
  1299. (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
  1300. (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
  1301. (we_p->devtype != ANYDEV)) {
  1302. PRINTK("invalid device type\n");
  1303. return SEN_USER_ERROR;
  1304. }
  1305. if ((mex_p->inputdatalength < 1) ||
  1306. (mex_p->inputdatalength > MAX_MOD_SIZE)) {
  1307. PRINTK("inputdatalength[%d] is not valid\n",
  1308. mex_p->inputdatalength);
  1309. return SEN_USER_ERROR;
  1310. }
  1311. if (mex_p->outputdatalength < mex_p->inputdatalength) {
  1312. PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
  1313. mex_p->outputdatalength, mex_p->inputdatalength);
  1314. return SEN_USER_ERROR;
  1315. }
  1316. if (!mex_p->inputdata || !mex_p->outputdata) {
  1317. PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
  1318. mex_p->outputdata, mex_p->inputdata);
  1319. return SEN_USER_ERROR;
  1320. }
  1321. /**
  1322. * As long as outputdatalength is big enough, we can set the
  1323. * outputdatalength equal to the inputdatalength, since that is the
  1324. * number of bytes we will copy in any case
  1325. */
  1326. mex_p->outputdatalength = mex_p->inputdatalength;
  1327. rv = 0;
  1328. switch (we_p->funccode) {
  1329. case ICARSAMODEXPO:
  1330. if (!mex_p->b_key || !mex_p->n_modulus)
  1331. rv = SEN_USER_ERROR;
  1332. break;
  1333. case ICARSACRT:
  1334. if (!IS_EVEN(crt_p->inputdatalength)) {
  1335. PRINTK("inputdatalength[%d] is odd, CRT form\n",
  1336. crt_p->inputdatalength);
  1337. rv = SEN_USER_ERROR;
  1338. break;
  1339. }
  1340. if (!crt_p->bp_key ||
  1341. !crt_p->bq_key ||
  1342. !crt_p->np_prime ||
  1343. !crt_p->nq_prime ||
  1344. !crt_p->u_mult_inv) {
  1345. PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
  1346. crt_p->bp_key, crt_p->bq_key,
  1347. crt_p->np_prime, crt_p->nq_prime,
  1348. crt_p->u_mult_inv);
  1349. rv = SEN_USER_ERROR;
  1350. }
  1351. break;
  1352. default:
  1353. PRINTK("bad func = %d\n", we_p->funccode);
  1354. rv = SEN_USER_ERROR;
  1355. break;
  1356. }
  1357. if (rv != 0)
  1358. return rv;
  1359. if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
  1360. return SEN_NOT_AVAIL;
  1361. temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
  1362. sizeof(struct caller);
  1363. if (copy_from_user(temp_buffer, mex_p->inputdata,
  1364. mex_p->inputdatalength) != 0)
  1365. return SEN_RELEASED;
  1366. function = PCI_FUNC_KEY_ENCRYPT;
  1367. switch (we_p->devtype) {
  1368. /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
  1369. case PCICA:
  1370. case CEX2A:
  1371. function = PCI_FUNC_KEY_ENCRYPT;
  1372. break;
  1373. /**
  1374. * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
  1375. * operation, and all CRT forms with a PKCS-1.2 format decrypt.
  1376. * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
  1377. * mod-expo operation
  1378. */
  1379. case PCIXCC_MCL2:
  1380. if (we_p->funccode == ICARSAMODEXPO)
  1381. function = PCI_FUNC_KEY_ENCRYPT;
  1382. else
  1383. function = PCI_FUNC_KEY_DECRYPT;
  1384. break;
  1385. case PCIXCC_MCL3:
  1386. case CEX2C:
  1387. if (we_p->funccode == ICARSAMODEXPO)
  1388. function = PCI_FUNC_KEY_ENCRYPT;
  1389. else
  1390. function = PCI_FUNC_KEY_DECRYPT;
  1391. break;
  1392. /**
  1393. * PCICC does everything as a PKCS-1.2 format request
  1394. */
  1395. case PCICC:
  1396. /* PCICC cannot handle input that is is PKCS#1.1 padded */
  1397. if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
  1398. return SEN_NOT_AVAIL;
  1399. }
  1400. if (we_p->funccode == ICARSAMODEXPO) {
  1401. if (is_PKCS12_padded(temp_buffer,
  1402. mex_p->inputdatalength))
  1403. function = PCI_FUNC_KEY_ENCRYPT;
  1404. else
  1405. function = PCI_FUNC_KEY_DECRYPT;
  1406. } else
  1407. /* all CRT forms are decrypts */
  1408. function = PCI_FUNC_KEY_DECRYPT;
  1409. break;
  1410. }
  1411. PDEBUG("function: %04x\n", function);
  1412. rv = build_caller(we_p, function);
  1413. PDEBUG("rv from build_caller = %d\n", rv);
  1414. return rv;
  1415. }
  1416. static inline int
  1417. z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
  1418. const char __user *buffer)
  1419. {
  1420. int rv;
  1421. we_p->devindex = -1;
  1422. if (funccode == ICARSAMODEXPO)
  1423. we_p->buff_size = sizeof(struct ica_rsa_modexpo);
  1424. else
  1425. we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
  1426. if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
  1427. return -EFAULT;
  1428. we_p->audit[0] |= FP_COPYFROM;
  1429. SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
  1430. we_p->funccode = funccode;
  1431. we_p->devtype = -1;
  1432. we_p->audit[0] |= FP_BUFFREQ;
  1433. rv = get_crypto_request_buffer(we_p);
  1434. switch (rv) {
  1435. case 0:
  1436. we_p->audit[0] |= FP_BUFFGOT;
  1437. break;
  1438. case SEN_USER_ERROR:
  1439. rv = -EINVAL;
  1440. break;
  1441. case SEN_QUEUE_FULL:
  1442. rv = 0;
  1443. break;
  1444. case SEN_RELEASED:
  1445. rv = -EFAULT;
  1446. break;
  1447. case REC_NO_RESPONSE:
  1448. rv = -ENODEV;
  1449. break;
  1450. case SEN_NOT_AVAIL:
  1451. case EGETBUFF:
  1452. rv = -EGETBUFF;
  1453. break;
  1454. default:
  1455. PRINTK("rv = %d\n", rv);
  1456. rv = -EGETBUFF;
  1457. break;
  1458. }
  1459. if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
  1460. SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
  1461. return rv;
  1462. }
  1463. static inline void
  1464. purge_work_element(struct work_element *we_p)
  1465. {
  1466. struct list_head *lptr;
  1467. spin_lock_irq(&queuespinlock);
  1468. list_for_each(lptr, &request_list) {
  1469. if (lptr == &we_p->liste) {
  1470. list_del_init(lptr);
  1471. requestq_count--;
  1472. break;
  1473. }
  1474. }
  1475. list_for_each(lptr, &pending_list) {
  1476. if (lptr == &we_p->liste) {
  1477. list_del_init(lptr);
  1478. pendingq_count--;
  1479. break;
  1480. }
  1481. }
  1482. spin_unlock_irq(&queuespinlock);
  1483. }
  1484. /**
  1485. * Build the request and send it.
  1486. */
  1487. static inline int
  1488. z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
  1489. unsigned int cmd, unsigned long arg)
  1490. {
  1491. struct work_element *we_p;
  1492. int rv;
  1493. if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
  1494. PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
  1495. return rv;
  1496. }
  1497. if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
  1498. PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
  1499. if (!rv)
  1500. if ((rv = z90crypt_send(we_p, (const char *)arg)))
  1501. PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
  1502. if (!rv) {
  1503. we_p->audit[0] |= FP_ASLEEP;
  1504. wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
  1505. we_p->audit[0] |= FP_AWAKE;
  1506. rv = we_p->retcode;
  1507. }
  1508. if (!rv)
  1509. rv = z90crypt_process_results(we_p, (char __user *)arg);
  1510. if ((we_p->status[0] & STAT_FAILED)) {
  1511. switch (rv) {
  1512. /**
  1513. * EINVAL *after* receive is almost always a padding error or
  1514. * length error issued by a coprocessor (not an accelerator).
  1515. * We convert this return value to -EGETBUFF which should
  1516. * trigger a fallback to software.
  1517. */
  1518. case -EINVAL:
  1519. if ((we_p->devtype != PCICA) &&
  1520. (we_p->devtype != CEX2A))
  1521. rv = -EGETBUFF;
  1522. break;
  1523. case -ETIMEOUT:
  1524. if (z90crypt.mask.st_count > 0)
  1525. rv = -ERESTARTSYS; // retry with another
  1526. else
  1527. rv = -ENODEV; // no cards left
  1528. /* fall through to clean up request queue */
  1529. case -ERESTARTSYS:
  1530. case -ERELEASED:
  1531. switch (CHK_RDWRMASK(we_p->status[0])) {
  1532. case STAT_WRITTEN:
  1533. purge_work_element(we_p);
  1534. break;
  1535. case STAT_READPEND:
  1536. case STAT_NOWORK:
  1537. default:
  1538. break;
  1539. }
  1540. break;
  1541. default:
  1542. we_p->status[0] ^= STAT_FAILED;
  1543. break;
  1544. }
  1545. }
  1546. free_page((long)we_p);
  1547. return rv;
  1548. }
  1549. /**
  1550. * This function is a little long, but it's really just one large switch
  1551. * statement.
  1552. */
  1553. static long
  1554. z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1555. {
  1556. struct priv_data *private_data_p = filp->private_data;
  1557. unsigned char *status;
  1558. unsigned char *qdepth;
  1559. unsigned int *reqcnt;
  1560. struct ica_z90_status *pstat;
  1561. int ret, i, loopLim, tempstat;
  1562. static int deprecated_msg_count1 = 0;
  1563. static int deprecated_msg_count2 = 0;
  1564. PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
  1565. PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
  1566. cmd,
  1567. !_IOC_DIR(cmd) ? "NO"
  1568. : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
  1569. : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
  1570. : "WR")),
  1571. _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
  1572. if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
  1573. PRINTK("cmd 0x%08X contains bad magic\n", cmd);
  1574. return -ENOTTY;
  1575. }
  1576. ret = 0;
  1577. switch (cmd) {
  1578. case ICARSAMODEXPO:
  1579. case ICARSACRT:
  1580. if (quiesce_z90crypt) {
  1581. ret = -EQUIESCE;
  1582. break;
  1583. }
  1584. ret = -ENODEV; // Default if no devices
  1585. loopLim = z90crypt.hdware_info->hdware_mask.st_count -
  1586. (z90crypt.hdware_info->hdware_mask.disabled_count +
  1587. z90crypt.hdware_info->hdware_mask.user_disabled_count);
  1588. for (i = 0; i < loopLim; i++) {
  1589. ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
  1590. if (ret != -ERESTARTSYS)
  1591. break;
  1592. }
  1593. if (ret == -ERESTARTSYS)
  1594. ret = -ENODEV;
  1595. break;
  1596. case Z90STAT_TOTALCOUNT:
  1597. tempstat = get_status_totalcount();
  1598. if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
  1599. ret = -EFAULT;
  1600. break;
  1601. case Z90STAT_PCICACOUNT:
  1602. tempstat = get_status_PCICAcount();
  1603. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1604. ret = -EFAULT;
  1605. break;
  1606. case Z90STAT_PCICCCOUNT:
  1607. tempstat = get_status_PCICCcount();
  1608. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1609. ret = -EFAULT;
  1610. break;
  1611. case Z90STAT_PCIXCCMCL2COUNT:
  1612. tempstat = get_status_PCIXCCMCL2count();
  1613. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1614. ret = -EFAULT;
  1615. break;
  1616. case Z90STAT_PCIXCCMCL3COUNT:
  1617. tempstat = get_status_PCIXCCMCL3count();
  1618. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1619. ret = -EFAULT;
  1620. break;
  1621. case Z90STAT_CEX2CCOUNT:
  1622. tempstat = get_status_CEX2Ccount();
  1623. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1624. ret = -EFAULT;
  1625. break;
  1626. case Z90STAT_CEX2ACOUNT:
  1627. tempstat = get_status_CEX2Acount();
  1628. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1629. ret = -EFAULT;
  1630. break;
  1631. case Z90STAT_REQUESTQ_COUNT:
  1632. tempstat = get_status_requestq_count();
  1633. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1634. ret = -EFAULT;
  1635. break;
  1636. case Z90STAT_PENDINGQ_COUNT:
  1637. tempstat = get_status_pendingq_count();
  1638. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1639. ret = -EFAULT;
  1640. break;
  1641. case Z90STAT_TOTALOPEN_COUNT:
  1642. tempstat = get_status_totalopen_count();
  1643. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1644. ret = -EFAULT;
  1645. break;
  1646. case Z90STAT_DOMAIN_INDEX:
  1647. tempstat = get_status_domain_index();
  1648. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1649. ret = -EFAULT;
  1650. break;
  1651. case Z90STAT_STATUS_MASK:
  1652. status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
  1653. if (!status) {
  1654. PRINTK("kmalloc for status failed!\n");
  1655. ret = -ENOMEM;
  1656. break;
  1657. }
  1658. get_status_status_mask(status);
  1659. if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
  1660. != 0)
  1661. ret = -EFAULT;
  1662. kfree(status);
  1663. break;
  1664. case Z90STAT_QDEPTH_MASK:
  1665. qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
  1666. if (!qdepth) {
  1667. PRINTK("kmalloc for qdepth failed!\n");
  1668. ret = -ENOMEM;
  1669. break;
  1670. }
  1671. get_status_qdepth_mask(qdepth);
  1672. if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
  1673. ret = -EFAULT;
  1674. kfree(qdepth);
  1675. break;
  1676. case Z90STAT_PERDEV_REQCNT:
  1677. reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
  1678. if (!reqcnt) {
  1679. PRINTK("kmalloc for reqcnt failed!\n");
  1680. ret = -ENOMEM;
  1681. break;
  1682. }
  1683. get_status_perdevice_reqcnt(reqcnt);
  1684. if (copy_to_user((char __user *) arg, reqcnt,
  1685. Z90CRYPT_NUM_APS * sizeof(int)) != 0)
  1686. ret = -EFAULT;
  1687. kfree(reqcnt);
  1688. break;
  1689. /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
  1690. case ICAZ90STATUS:
  1691. if (deprecated_msg_count1 < 20) {
  1692. PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
  1693. deprecated_msg_count1++;
  1694. if (deprecated_msg_count1 == 20)
  1695. PRINTK("No longer issuing messages related to "
  1696. "deprecated call to ICAZ90STATUS.\n");
  1697. }
  1698. pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
  1699. if (!pstat) {
  1700. PRINTK("kmalloc for pstat failed!\n");
  1701. ret = -ENOMEM;
  1702. break;
  1703. }
  1704. pstat->totalcount = get_status_totalcount();
  1705. pstat->leedslitecount = get_status_PCICAcount();
  1706. pstat->leeds2count = get_status_PCICCcount();
  1707. pstat->requestqWaitCount = get_status_requestq_count();
  1708. pstat->pendingqWaitCount = get_status_pendingq_count();
  1709. pstat->totalOpenCount = get_status_totalopen_count();
  1710. pstat->cryptoDomain = get_status_domain_index();
  1711. get_status_status_mask(pstat->status);
  1712. get_status_qdepth_mask(pstat->qdepth);
  1713. if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
  1714. sizeof(struct ica_z90_status)) != 0)
  1715. ret = -EFAULT;
  1716. kfree(pstat);
  1717. break;
  1718. /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
  1719. case Z90STAT_PCIXCCCOUNT:
  1720. if (deprecated_msg_count2 < 20) {
  1721. PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
  1722. deprecated_msg_count2++;
  1723. if (deprecated_msg_count2 == 20)
  1724. PRINTK("No longer issuing messages about depre"
  1725. "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
  1726. }
  1727. tempstat = get_status_PCIXCCcount();
  1728. if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
  1729. ret = -EFAULT;
  1730. break;
  1731. case Z90QUIESCE:
  1732. if (current->euid != 0) {
  1733. PRINTK("QUIESCE fails: euid %d\n",
  1734. current->euid);
  1735. ret = -EACCES;
  1736. } else {
  1737. PRINTK("QUIESCE device from PID %d\n", PID());
  1738. quiesce_z90crypt = 1;
  1739. }
  1740. break;
  1741. default:
  1742. /* user passed an invalid IOCTL number */
  1743. PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
  1744. ret = -ENOTTY;
  1745. break;
  1746. }
  1747. return ret;
  1748. }
  1749. static inline int
  1750. sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
  1751. {
  1752. int hl, i;
  1753. hl = 0;
  1754. for (i = 0; i < len; i++)
  1755. hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
  1756. hl += sprintf(outaddr+hl, " ");
  1757. return hl;
  1758. }
  1759. static inline int
  1760. sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
  1761. {
  1762. int hl, inl, c, cx;
  1763. hl = sprintf(outaddr, " ");
  1764. inl = 0;
  1765. for (c = 0; c < (len / 16); c++) {
  1766. hl += sprintcl(outaddr+hl, addr+inl, 16);
  1767. inl += 16;
  1768. }
  1769. cx = len%16;
  1770. if (cx) {
  1771. hl += sprintcl(outaddr+hl, addr+inl, cx);
  1772. inl += cx;
  1773. }
  1774. hl += sprintf(outaddr+hl, "\n");
  1775. return hl;
  1776. }
  1777. static inline int
  1778. sprinthx(unsigned char *title, unsigned char *outaddr,
  1779. unsigned char *addr, unsigned int len)
  1780. {
  1781. int hl, inl, r, rx;
  1782. hl = sprintf(outaddr, "\n%s\n", title);
  1783. inl = 0;
  1784. for (r = 0; r < (len / 64); r++) {
  1785. hl += sprintrw(outaddr+hl, addr+inl, 64);
  1786. inl += 64;
  1787. }
  1788. rx = len % 64;
  1789. if (rx) {
  1790. hl += sprintrw(outaddr+hl, addr+inl, rx);
  1791. inl += rx;
  1792. }
  1793. hl += sprintf(outaddr+hl, "\n");
  1794. return hl;
  1795. }
  1796. static inline int
  1797. sprinthx4(unsigned char *title, unsigned char *outaddr,
  1798. unsigned int *array, unsigned int len)
  1799. {
  1800. int hl, r;
  1801. hl = sprintf(outaddr, "\n%s\n", title);
  1802. for (r = 0; r < len; r++) {
  1803. if ((r % 8) == 0)
  1804. hl += sprintf(outaddr+hl, " ");
  1805. hl += sprintf(outaddr+hl, "%08X ", array[r]);
  1806. if ((r % 8) == 7)
  1807. hl += sprintf(outaddr+hl, "\n");
  1808. }
  1809. hl += sprintf(outaddr+hl, "\n");
  1810. return hl;
  1811. }
  1812. static int
  1813. z90crypt_status(char *resp_buff, char **start, off_t offset,
  1814. int count, int *eof, void *data)
  1815. {
  1816. unsigned char *workarea;
  1817. int len;
  1818. /* resp_buff is a page. Use the right half for a work area */
  1819. workarea = resp_buff+2000;
  1820. len = 0;
  1821. len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
  1822. z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
  1823. len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
  1824. get_status_domain_index());
  1825. len += sprintf(resp_buff+len, "Total device count: %d\n",
  1826. get_status_totalcount());
  1827. len += sprintf(resp_buff+len, "PCICA count: %d\n",
  1828. get_status_PCICAcount());
  1829. len += sprintf(resp_buff+len, "PCICC count: %d\n",
  1830. get_status_PCICCcount());
  1831. len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
  1832. get_status_PCIXCCMCL2count());
  1833. len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
  1834. get_status_PCIXCCMCL3count());
  1835. len += sprintf(resp_buff+len, "CEX2C count: %d\n",
  1836. get_status_CEX2Ccount());
  1837. len += sprintf(resp_buff+len, "CEX2A count: %d\n",
  1838. get_status_CEX2Acount());
  1839. len += sprintf(resp_buff+len, "requestq count: %d\n",
  1840. get_status_requestq_count());
  1841. len += sprintf(resp_buff+len, "pendingq count: %d\n",
  1842. get_status_pendingq_count());
  1843. len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
  1844. get_status_totalopen_count());
  1845. len += sprinthx(
  1846. "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
  1847. "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
  1848. resp_buff+len,
  1849. get_status_status_mask(workarea),
  1850. Z90CRYPT_NUM_APS);
  1851. len += sprinthx("Waiting work element counts",
  1852. resp_buff+len,
  1853. get_status_qdepth_mask(workarea),
  1854. Z90CRYPT_NUM_APS);
  1855. len += sprinthx4(
  1856. "Per-device successfully completed request counts",
  1857. resp_buff+len,
  1858. get_status_perdevice_reqcnt((unsigned int *)workarea),
  1859. Z90CRYPT_NUM_APS);
  1860. *eof = 1;
  1861. memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
  1862. return len;
  1863. }
  1864. static inline void
  1865. disable_card(int card_index)
  1866. {
  1867. struct device *devp;
  1868. devp = LONG2DEVPTR(card_index);
  1869. if (!devp || devp->user_disabled)
  1870. return;
  1871. devp->user_disabled = 1;
  1872. z90crypt.hdware_info->hdware_mask.user_disabled_count++;
  1873. if (devp->dev_type == -1)
  1874. return;
  1875. z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
  1876. }
  1877. static inline void
  1878. enable_card(int card_index)
  1879. {
  1880. struct device *devp;
  1881. devp = LONG2DEVPTR(card_index);
  1882. if (!devp || !devp->user_disabled)
  1883. return;
  1884. devp->user_disabled = 0;
  1885. z90crypt.hdware_info->hdware_mask.user_disabled_count--;
  1886. if (devp->dev_type == -1)
  1887. return;
  1888. z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
  1889. }
  1890. static int
  1891. z90crypt_status_write(struct file *file, const char __user *buffer,
  1892. unsigned long count, void *data)
  1893. {
  1894. int j, eol;
  1895. unsigned char *lbuf, *ptr;
  1896. unsigned int local_count;
  1897. #define LBUFSIZE 1200
  1898. lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
  1899. if (!lbuf) {
  1900. PRINTK("kmalloc failed!\n");
  1901. return 0;
  1902. }
  1903. if (count <= 0)
  1904. return 0;
  1905. local_count = UMIN((unsigned int)count, LBUFSIZE-1);
  1906. if (copy_from_user(lbuf, buffer, local_count) != 0) {
  1907. kfree(lbuf);
  1908. return -EFAULT;
  1909. }
  1910. lbuf[local_count] = '\0';
  1911. ptr = strstr(lbuf, "Online devices");
  1912. if (ptr == 0) {
  1913. PRINTK("Unable to parse data (missing \"Online devices\")\n");
  1914. kfree(lbuf);
  1915. return count;
  1916. }
  1917. ptr = strstr(ptr, "\n");
  1918. if (ptr == 0) {
  1919. PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
  1920. kfree(lbuf);
  1921. return count;
  1922. }
  1923. ptr++;
  1924. if (strstr(ptr, "Waiting work element counts") == NULL) {
  1925. PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
  1926. kfree(lbuf);
  1927. return count;
  1928. }
  1929. j = 0;
  1930. eol = 0;
  1931. while ((j < 64) && (*ptr != '\0')) {
  1932. switch (*ptr) {
  1933. case '\t':
  1934. case ' ':
  1935. break;
  1936. case '\n':
  1937. default:
  1938. eol = 1;
  1939. break;
  1940. case '0': // no device
  1941. case '1': // PCICA
  1942. case '2': // PCICC
  1943. case '3': // PCIXCC_MCL2
  1944. case '4': // PCIXCC_MCL3
  1945. case '5': // CEX2C
  1946. case '6': // CEX2A
  1947. j++;
  1948. break;
  1949. case 'd':
  1950. case 'D':
  1951. disable_card(j);
  1952. j++;
  1953. break;
  1954. case 'e':
  1955. case 'E':
  1956. enable_card(j);
  1957. j++;
  1958. break;
  1959. }
  1960. if (eol)
  1961. break;
  1962. ptr++;
  1963. }
  1964. kfree(lbuf);
  1965. return count;
  1966. }
  1967. /**
  1968. * Functions that run under a timer, with no process id
  1969. *
  1970. * The task functions:
  1971. * z90crypt_reader_task
  1972. * helper_send_work
  1973. * helper_handle_work_element
  1974. * helper_receive_rc
  1975. * z90crypt_config_task
  1976. * z90crypt_cleanup_task
  1977. *
  1978. * Helper functions:
  1979. * z90crypt_schedule_reader_timer
  1980. * z90crypt_schedule_reader_task
  1981. * z90crypt_schedule_config_task
  1982. * z90crypt_schedule_cleanup_task
  1983. */
  1984. static inline int
  1985. receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
  1986. unsigned char *buff, unsigned char __user **dest_p_p)
  1987. {
  1988. int dv, rv;
  1989. struct device *dev_ptr;
  1990. struct caller *caller_p;
  1991. struct ica_rsa_modexpo *icaMsg_p;
  1992. struct list_head *ptr, *tptr;
  1993. memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
  1994. if (z90crypt.terminating)
  1995. return REC_FATAL_ERROR;
  1996. caller_p = 0;
  1997. dev_ptr = z90crypt.device_p[index];
  1998. rv = 0;
  1999. do {
  2000. if (!dev_ptr || dev_ptr->disabled) {
  2001. rv = REC_NO_WORK; // a disabled device can't return work
  2002. break;
  2003. }
  2004. if (dev_ptr->dev_self_x != index) {
  2005. PRINTKC("Corrupt dev ptr\n");
  2006. z90crypt.terminating = 1;
  2007. rv = REC_FATAL_ERROR;
  2008. break;
  2009. }
  2010. if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
  2011. dv = DEV_REC_EXCEPTION;
  2012. PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
  2013. dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
  2014. } else {
  2015. PDEBUG("Dequeue called for device %d\n", index);
  2016. dv = receive_from_AP(index, z90crypt.cdx,
  2017. dev_ptr->dev_resp_l,
  2018. dev_ptr->dev_resp_p, psmid);
  2019. }
  2020. switch (dv) {
  2021. case DEV_REC_EXCEPTION:
  2022. rv = REC_FATAL_ERROR;
  2023. z90crypt.terminating = 1;
  2024. PRINTKC("Exception in receive from device %d\n",
  2025. index);
  2026. break;
  2027. case DEV_ONLINE:
  2028. rv = 0;
  2029. break;
  2030. case DEV_EMPTY:
  2031. rv = REC_EMPTY;
  2032. break;
  2033. case DEV_NO_WORK:
  2034. rv = REC_NO_WORK;
  2035. break;
  2036. case DEV_BAD_MESSAGE:
  2037. case DEV_GONE:
  2038. case REC_HARDWAR_ERR:
  2039. default:
  2040. rv = REC_NO_RESPONSE;
  2041. break;
  2042. }
  2043. if (rv)
  2044. break;
  2045. if (dev_ptr->dev_caller_count <= 0) {
  2046. rv = REC_USER_GONE;
  2047. break;
  2048. }
  2049. list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
  2050. caller_p = list_entry(ptr, struct caller, caller_liste);
  2051. if (!memcmp(caller_p->caller_id, psmid,
  2052. sizeof(caller_p->caller_id))) {
  2053. if (!list_empty(&caller_p->caller_liste)) {
  2054. list_del_init(ptr);
  2055. dev_ptr->dev_caller_count--;
  2056. break;
  2057. }
  2058. }
  2059. caller_p = 0;
  2060. }
  2061. if (!caller_p) {
  2062. PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
  2063. "%02X%02X%02X in device list\n",
  2064. psmid[0], psmid[1], psmid[2], psmid[3],
  2065. psmid[4], psmid[5], psmid[6], psmid[7]);
  2066. rv = REC_USER_GONE;
  2067. break;
  2068. }
  2069. PDEBUG("caller_p after successful receive: %p\n", caller_p);
  2070. rv = convert_response(dev_ptr->dev_resp_p,
  2071. caller_p->caller_buf_p, buff_len_p, buff);
  2072. switch (rv) {
  2073. case REC_USE_PCICA:
  2074. break;
  2075. case REC_OPERAND_INV:
  2076. case REC_OPERAND_SIZE:
  2077. case REC_EVEN_MOD:
  2078. case REC_INVALID_PAD:
  2079. PDEBUG("device %d: 'user error' %d\n", index, rv);
  2080. break;
  2081. case WRONG_DEVICE_TYPE:
  2082. case REC_HARDWAR_ERR:
  2083. case REC_BAD_MESSAGE:
  2084. PRINTKW("device %d: hardware error %d\n", index, rv);
  2085. rv = REC_NO_RESPONSE;
  2086. break;
  2087. default:
  2088. PDEBUG("device %d: rv = %d\n", index, rv);
  2089. break;
  2090. }
  2091. } while (0);
  2092. switch (rv) {
  2093. case 0:
  2094. PDEBUG("Successful receive from device %d\n", index);
  2095. icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
  2096. *dest_p_p = icaMsg_p->outputdata;
  2097. if (*buff_len_p == 0)
  2098. PRINTK("Zero *buff_len_p\n");
  2099. break;
  2100. case REC_NO_RESPONSE:
  2101. PRINTKW("Removing device %d from availability\n", index);
  2102. remove_device(dev_ptr);
  2103. break;
  2104. }
  2105. if (caller_p)
  2106. unbuild_caller(dev_ptr, caller_p);
  2107. return rv;
  2108. }
  2109. static inline void
  2110. helper_send_work(int index)
  2111. {
  2112. struct work_element *rq_p;
  2113. int rv;
  2114. if (list_empty(&request_list))
  2115. return;
  2116. requestq_count--;
  2117. rq_p = list_entry(request_list.next, struct work_element, liste);
  2118. list_del_init(&rq_p->liste);
  2119. rq_p->audit[1] |= FP_REMREQUEST;
  2120. if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
  2121. rq_p->devindex = SHRT2LONG(index);
  2122. rv = send_to_crypto_device(rq_p);
  2123. if (rv == 0) {
  2124. rq_p->requestsent = jiffies;
  2125. rq_p->audit[0] |= FP_SENT;
  2126. list_add_tail(&rq_p->liste, &pending_list);
  2127. ++pendingq_count;
  2128. rq_p->audit[0] |= FP_PENDING;
  2129. } else {
  2130. switch (rv) {
  2131. case REC_OPERAND_INV:
  2132. case REC_OPERAND_SIZE:
  2133. case REC_EVEN_MOD:
  2134. case REC_INVALID_PAD:
  2135. rq_p->retcode = -EINVAL;
  2136. break;
  2137. case SEN_NOT_AVAIL:
  2138. case SEN_RETRY:
  2139. case REC_NO_RESPONSE:
  2140. default:
  2141. if (z90crypt.mask.st_count > 1)
  2142. rq_p->retcode =
  2143. -ERESTARTSYS;
  2144. else
  2145. rq_p->retcode = -ENODEV;
  2146. break;
  2147. }
  2148. rq_p->status[0] |= STAT_FAILED;
  2149. rq_p->audit[1] |= FP_AWAKENING;
  2150. atomic_set(&rq_p->alarmrung, 1);
  2151. wake_up(&rq_p->waitq);
  2152. }
  2153. } else {
  2154. if (z90crypt.mask.st_count > 1)
  2155. rq_p->retcode = -ERESTARTSYS;
  2156. else
  2157. rq_p->retcode = -ENODEV;
  2158. rq_p->status[0] |= STAT_FAILED;
  2159. rq_p->audit[1] |= FP_AWAKENING;
  2160. atomic_set(&rq_p->alarmrung, 1);
  2161. wake_up(&rq_p->waitq);
  2162. }
  2163. }
  2164. static inline void
  2165. helper_handle_work_element(int index, unsigned char psmid[8], int rc,
  2166. int buff_len, unsigned char *buff,
  2167. unsigned char __user *resp_addr)
  2168. {
  2169. struct work_element *pq_p;
  2170. struct list_head *lptr, *tptr;
  2171. pq_p = 0;
  2172. list_for_each_safe(lptr, tptr, &pending_list) {
  2173. pq_p = list_entry(lptr, struct work_element, liste);
  2174. if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
  2175. list_del_init(lptr);
  2176. pendingq_count--;
  2177. pq_p->audit[1] |= FP_NOTPENDING;
  2178. break;
  2179. }
  2180. pq_p = 0;
  2181. }
  2182. if (!pq_p) {
  2183. PRINTK("device %d has work but no caller exists on pending Q\n",
  2184. SHRT2LONG(index));
  2185. return;
  2186. }
  2187. switch (rc) {
  2188. case 0:
  2189. pq_p->resp_buff_size = buff_len;
  2190. pq_p->audit[1] |= FP_RESPSIZESET;
  2191. if (buff_len) {
  2192. pq_p->resp_addr = resp_addr;
  2193. pq_p->audit[1] |= FP_RESPADDRCOPIED;
  2194. memcpy(pq_p->resp_buff, buff, buff_len);
  2195. pq_p->audit[1] |= FP_RESPBUFFCOPIED;
  2196. }
  2197. break;
  2198. case REC_OPERAND_INV:
  2199. case REC_OPERAND_SIZE:
  2200. case REC_EVEN_MOD:
  2201. case REC_INVALID_PAD:
  2202. PDEBUG("-EINVAL after application error %d\n", rc);
  2203. pq_p->retcode = -EINVAL;
  2204. pq_p->status[0] |= STAT_FAILED;
  2205. break;
  2206. case REC_USE_PCICA:
  2207. pq_p->retcode = -ERESTARTSYS;
  2208. pq_p->status[0] |= STAT_FAILED;
  2209. break;
  2210. case REC_NO_RESPONSE:
  2211. default:
  2212. if (z90crypt.mask.st_count > 1)
  2213. pq_p->retcode = -ERESTARTSYS;
  2214. else
  2215. pq_p->retcode = -ENODEV;
  2216. pq_p->status[0] |= STAT_FAILED;
  2217. break;
  2218. }
  2219. if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
  2220. pq_p->audit[1] |= FP_AWAKENING;
  2221. atomic_set(&pq_p->alarmrung, 1);
  2222. wake_up(&pq_p->waitq);
  2223. }
  2224. }
  2225. /**
  2226. * return TRUE if the work element should be removed from the queue
  2227. */
  2228. static inline int
  2229. helper_receive_rc(int index, int *rc_p)
  2230. {
  2231. switch (*rc_p) {
  2232. case 0:
  2233. case REC_OPERAND_INV:
  2234. case REC_OPERAND_SIZE:
  2235. case REC_EVEN_MOD:
  2236. case REC_INVALID_PAD:
  2237. case REC_USE_PCICA:
  2238. break;
  2239. case REC_BUSY:
  2240. case REC_NO_WORK:
  2241. case REC_EMPTY:
  2242. case REC_RETRY_DEV:
  2243. case REC_FATAL_ERROR:
  2244. return 0;
  2245. case REC_NO_RESPONSE:
  2246. break;
  2247. default:
  2248. PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
  2249. *rc_p, SHRT2LONG(index));
  2250. *rc_p = REC_NO_RESPONSE;
  2251. break;
  2252. }
  2253. return 1;
  2254. }
  2255. static inline void
  2256. z90crypt_schedule_reader_timer(void)
  2257. {
  2258. if (timer_pending(&reader_timer))
  2259. return;
  2260. if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
  2261. PRINTK("Timer pending while modifying reader timer\n");
  2262. }
  2263. static void
  2264. z90crypt_reader_task(unsigned long ptr)
  2265. {
  2266. int workavail, index, rc, buff_len;
  2267. unsigned char psmid[8];
  2268. unsigned char __user *resp_addr;
  2269. static unsigned char buff[1024];
  2270. /**
  2271. * we use workavail = 2 to ensure 2 passes with nothing dequeued before
  2272. * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
  2273. * loop, there is no work remaining on the queues.
  2274. */
  2275. resp_addr = 0;
  2276. workavail = 2;
  2277. buff_len = 0;
  2278. while (workavail) {
  2279. workavail--;
  2280. rc = 0;
  2281. spin_lock_irq(&queuespinlock);
  2282. memset(buff, 0x00, sizeof(buff));
  2283. /* Dequeue once from each device in round robin. */
  2284. for (index = 0; index < z90crypt.mask.st_count; index++) {
  2285. PDEBUG("About to receive.\n");
  2286. rc = receive_from_crypto_device(SHRT2LONG(index),
  2287. psmid,
  2288. &buff_len,
  2289. buff,
  2290. &resp_addr);
  2291. PDEBUG("Dequeued: rc = %d.\n", rc);
  2292. if (helper_receive_rc(index, &rc)) {
  2293. if (rc != REC_NO_RESPONSE) {
  2294. helper_send_work(index);
  2295. workavail = 2;
  2296. }
  2297. helper_handle_work_element(index, psmid, rc,
  2298. buff_len, buff,
  2299. resp_addr);
  2300. }
  2301. if (rc == REC_FATAL_ERROR)
  2302. PRINTKW("REC_FATAL_ERROR from device %d!\n",
  2303. SHRT2LONG(index));
  2304. }
  2305. spin_unlock_irq(&queuespinlock);
  2306. }
  2307. if (pendingq_count + requestq_count)
  2308. z90crypt_schedule_reader_timer();
  2309. }
  2310. static inline void
  2311. z90crypt_schedule_config_task(unsigned int expiration)
  2312. {
  2313. if (timer_pending(&config_timer))
  2314. return;
  2315. if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
  2316. PRINTK("Timer pending while modifying config timer\n");
  2317. }
  2318. static void
  2319. z90crypt_config_task(unsigned long ptr)
  2320. {
  2321. int rc;
  2322. PDEBUG("jiffies %ld\n", jiffies);
  2323. if ((rc = refresh_z90crypt(&z90crypt.cdx)))
  2324. PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
  2325. /* If return was fatal, don't bother reconfiguring */
  2326. if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
  2327. z90crypt_schedule_config_task(CONFIGTIME);
  2328. }
  2329. static inline void
  2330. z90crypt_schedule_cleanup_task(void)
  2331. {
  2332. if (timer_pending(&cleanup_timer))
  2333. return;
  2334. if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
  2335. PRINTK("Timer pending while modifying cleanup timer\n");
  2336. }
  2337. static inline void
  2338. helper_drain_queues(void)
  2339. {
  2340. struct work_element *pq_p;
  2341. struct list_head *lptr, *tptr;
  2342. list_for_each_safe(lptr, tptr, &pending_list) {
  2343. pq_p = list_entry(lptr, struct work_element, liste);
  2344. pq_p->retcode = -ENODEV;
  2345. pq_p->status[0] |= STAT_FAILED;
  2346. unbuild_caller(LONG2DEVPTR(pq_p->devindex),
  2347. (struct caller *)pq_p->requestptr);
  2348. list_del_init(lptr);
  2349. pendingq_count--;
  2350. pq_p->audit[1] |= FP_NOTPENDING;
  2351. pq_p->audit[1] |= FP_AWAKENING;
  2352. atomic_set(&pq_p->alarmrung, 1);
  2353. wake_up(&pq_p->waitq);
  2354. }
  2355. list_for_each_safe(lptr, tptr, &request_list) {
  2356. pq_p = list_entry(lptr, struct work_element, liste);
  2357. pq_p->retcode = -ENODEV;
  2358. pq_p->status[0] |= STAT_FAILED;
  2359. list_del_init(lptr);
  2360. requestq_count--;
  2361. pq_p->audit[1] |= FP_REMREQUEST;
  2362. pq_p->audit[1] |= FP_AWAKENING;
  2363. atomic_set(&pq_p->alarmrung, 1);
  2364. wake_up(&pq_p->waitq);
  2365. }
  2366. }
  2367. static inline void
  2368. helper_timeout_requests(void)
  2369. {
  2370. struct work_element *pq_p;
  2371. struct list_head *lptr, *tptr;
  2372. long timelimit;
  2373. timelimit = jiffies - (CLEANUPTIME * HZ);
  2374. /* The list is in strict chronological order */
  2375. list_for_each_safe(lptr, tptr, &pending_list) {
  2376. pq_p = list_entry(lptr, struct work_element, liste);
  2377. if (pq_p->requestsent >= timelimit)
  2378. break;
  2379. PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
  2380. ((struct caller *)pq_p->requestptr)->caller_id[0],
  2381. ((struct caller *)pq_p->requestptr)->caller_id[1],
  2382. ((struct caller *)pq_p->requestptr)->caller_id[2],
  2383. ((struct caller *)pq_p->requestptr)->caller_id[3],
  2384. ((struct caller *)pq_p->requestptr)->caller_id[4],
  2385. ((struct caller *)pq_p->requestptr)->caller_id[5],
  2386. ((struct caller *)pq_p->requestptr)->caller_id[6],
  2387. ((struct caller *)pq_p->requestptr)->caller_id[7]);
  2388. pq_p->retcode = -ETIMEOUT;
  2389. pq_p->status[0] |= STAT_FAILED;
  2390. /* get this off any caller queue it may be on */
  2391. unbuild_caller(LONG2DEVPTR(pq_p->devindex),
  2392. (struct caller *) pq_p->requestptr);
  2393. list_del_init(lptr);
  2394. pendingq_count--;
  2395. pq_p->audit[1] |= FP_TIMEDOUT;
  2396. pq_p->audit[1] |= FP_NOTPENDING;
  2397. pq_p->audit[1] |= FP_AWAKENING;
  2398. atomic_set(&pq_p->alarmrung, 1);
  2399. wake_up(&pq_p->waitq);
  2400. }
  2401. /**
  2402. * If pending count is zero, items left on the request queue may
  2403. * never be processed.
  2404. */
  2405. if (pendingq_count <= 0) {
  2406. list_for_each_safe(lptr, tptr, &request_list) {
  2407. pq_p = list_entry(lptr, struct work_element, liste);
  2408. if (pq_p->requestsent >= timelimit)
  2409. break;
  2410. PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
  2411. ((struct caller *)pq_p->requestptr)->caller_id[0],
  2412. ((struct caller *)pq_p->requestptr)->caller_id[1],
  2413. ((struct caller *)pq_p->requestptr)->caller_id[2],
  2414. ((struct caller *)pq_p->requestptr)->caller_id[3],
  2415. ((struct caller *)pq_p->requestptr)->caller_id[4],
  2416. ((struct caller *)pq_p->requestptr)->caller_id[5],
  2417. ((struct caller *)pq_p->requestptr)->caller_id[6],
  2418. ((struct caller *)pq_p->requestptr)->caller_id[7]);
  2419. pq_p->retcode = -ETIMEOUT;
  2420. pq_p->status[0] |= STAT_FAILED;
  2421. list_del_init(lptr);
  2422. requestq_count--;
  2423. pq_p->audit[1] |= FP_TIMEDOUT;
  2424. pq_p->audit[1] |= FP_REMREQUEST;
  2425. pq_p->audit[1] |= FP_AWAKENING;
  2426. atomic_set(&pq_p->alarmrung, 1);
  2427. wake_up(&pq_p->waitq);
  2428. }
  2429. }
  2430. }
  2431. static void
  2432. z90crypt_cleanup_task(unsigned long ptr)
  2433. {
  2434. PDEBUG("jiffies %ld\n", jiffies);
  2435. spin_lock_irq(&queuespinlock);
  2436. if (z90crypt.mask.st_count <= 0) // no devices!
  2437. helper_drain_queues();
  2438. else
  2439. helper_timeout_requests();
  2440. spin_unlock_irq(&queuespinlock);
  2441. z90crypt_schedule_cleanup_task();
  2442. }
  2443. static void
  2444. z90crypt_schedule_reader_task(unsigned long ptr)
  2445. {
  2446. tasklet_schedule(&reader_tasklet);
  2447. }
  2448. /**
  2449. * Lowlevel Functions:
  2450. *
  2451. * create_z90crypt: creates and initializes basic data structures
  2452. * refresh_z90crypt: re-initializes basic data structures
  2453. * find_crypto_devices: returns a count and mask of hardware status
  2454. * create_crypto_device: builds the descriptor for a device
  2455. * destroy_crypto_device: unallocates the descriptor for a device
  2456. * destroy_z90crypt: drains all work, unallocates structs
  2457. */
  2458. /**
  2459. * build the z90crypt root structure using the given domain index
  2460. */
  2461. static int
  2462. create_z90crypt(int *cdx_p)
  2463. {
  2464. struct hdware_block *hdware_blk_p;
  2465. memset(&z90crypt, 0x00, sizeof(struct z90crypt));
  2466. z90crypt.domain_established = 0;
  2467. z90crypt.len = sizeof(struct z90crypt);
  2468. z90crypt.max_count = Z90CRYPT_NUM_DEVS;
  2469. z90crypt.cdx = *cdx_p;
  2470. hdware_blk_p = (struct hdware_block *)
  2471. kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
  2472. if (!hdware_blk_p) {
  2473. PDEBUG("kmalloc for hardware block failed\n");
  2474. return ENOMEM;
  2475. }
  2476. memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
  2477. z90crypt.hdware_info = hdware_blk_p;
  2478. return 0;
  2479. }
  2480. static inline int
  2481. helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
  2482. {
  2483. enum hdstat hd_stat;
  2484. int q_depth, dev_type;
  2485. int indx, chkdom, numdomains;
  2486. q_depth = dev_type = numdomains = 0;
  2487. for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
  2488. for (indx = 0; indx < z90crypt.max_count; indx++) {
  2489. hd_stat = HD_NOT_THERE;
  2490. numdomains = 0;
  2491. for (chkdom = 0; chkdom <= 15; chkdom++) {
  2492. hd_stat = query_online(indx, chkdom, MAX_RESET,
  2493. &q_depth, &dev_type);
  2494. if (hd_stat == HD_TSQ_EXCEPTION) {
  2495. z90crypt.terminating = 1;
  2496. PRINTKC("exception taken!\n");
  2497. break;
  2498. }
  2499. if (hd_stat == HD_ONLINE) {
  2500. cdx_array[numdomains++] = chkdom;
  2501. if (*cdx_p == chkdom) {
  2502. *correct_cdx_found = 1;
  2503. break;
  2504. }
  2505. }
  2506. }
  2507. if ((*correct_cdx_found == 1) || (numdomains != 0))
  2508. break;
  2509. if (z90crypt.terminating)
  2510. break;
  2511. }
  2512. return numdomains;
  2513. }
  2514. static inline int
  2515. probe_crypto_domain(int *cdx_p)
  2516. {
  2517. int cdx_array[16];
  2518. char cdx_array_text[53], temp[5];
  2519. int correct_cdx_found, numdomains;
  2520. correct_cdx_found = 0;
  2521. numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
  2522. if (z90crypt.terminating)
  2523. return TSQ_FATAL_ERROR;
  2524. if (correct_cdx_found)
  2525. return 0;
  2526. if (numdomains == 0) {
  2527. PRINTKW("Unable to find crypto domain: No devices found\n");
  2528. return Z90C_NO_DEVICES;
  2529. }
  2530. if (numdomains == 1) {
  2531. if (*cdx_p == -1) {
  2532. *cdx_p = cdx_array[0];
  2533. return 0;
  2534. }
  2535. PRINTKW("incorrect domain: specified = %d, found = %d\n",
  2536. *cdx_p, cdx_array[0]);
  2537. return Z90C_INCORRECT_DOMAIN;
  2538. }
  2539. numdomains--;
  2540. sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
  2541. while (numdomains) {
  2542. numdomains--;
  2543. sprintf(temp, ", %d", cdx_array[numdomains]);
  2544. strcat(cdx_array_text, temp);
  2545. }
  2546. PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
  2547. *cdx_p, cdx_array_text);
  2548. return Z90C_AMBIGUOUS_DOMAIN;
  2549. }
  2550. static int
  2551. refresh_z90crypt(int *cdx_p)
  2552. {
  2553. int i, j, indx, rv;
  2554. static struct status local_mask;
  2555. struct device *devPtr;
  2556. unsigned char oldStat, newStat;
  2557. int return_unchanged;
  2558. if (z90crypt.len != sizeof(z90crypt))
  2559. return ENOTINIT;
  2560. if (z90crypt.terminating)
  2561. return TSQ_FATAL_ERROR;
  2562. rv = 0;
  2563. if (!z90crypt.hdware_info->hdware_mask.st_count &&
  2564. !z90crypt.domain_established) {
  2565. rv = probe_crypto_domain(cdx_p);
  2566. if (z90crypt.terminating)
  2567. return TSQ_FATAL_ERROR;
  2568. if (rv == Z90C_NO_DEVICES)
  2569. return 0; // try later
  2570. if (rv)
  2571. return rv;
  2572. z90crypt.cdx = *cdx_p;
  2573. z90crypt.domain_established = 1;
  2574. }
  2575. rv = find_crypto_devices(&local_mask);
  2576. if (rv) {
  2577. PRINTK("find crypto devices returned %d\n", rv);
  2578. return rv;
  2579. }
  2580. if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
  2581. sizeof(struct status))) {
  2582. return_unchanged = 1;
  2583. for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
  2584. /**
  2585. * Check for disabled cards. If any device is marked
  2586. * disabled, destroy it.
  2587. */
  2588. for (j = 0;
  2589. j < z90crypt.hdware_info->type_mask[i].st_count;
  2590. j++) {
  2591. indx = z90crypt.hdware_info->type_x_addr[i].
  2592. device_index[j];
  2593. devPtr = z90crypt.device_p[indx];
  2594. if (devPtr && devPtr->disabled) {
  2595. local_mask.st_mask[indx] = HD_NOT_THERE;
  2596. return_unchanged = 0;
  2597. }
  2598. }
  2599. }
  2600. if (return_unchanged == 1)
  2601. return 0;
  2602. }
  2603. spin_lock_irq(&queuespinlock);
  2604. for (i = 0; i < z90crypt.max_count; i++) {
  2605. oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
  2606. newStat = local_mask.st_mask[i];
  2607. if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
  2608. destroy_crypto_device(i);
  2609. else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
  2610. rv = create_crypto_device(i);
  2611. if (rv >= REC_FATAL_ERROR)
  2612. return rv;
  2613. if (rv != 0) {
  2614. local_mask.st_mask[i] = HD_NOT_THERE;
  2615. local_mask.st_count--;
  2616. }
  2617. }
  2618. }
  2619. memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
  2620. sizeof(local_mask.st_mask));
  2621. z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
  2622. z90crypt.hdware_info->hdware_mask.disabled_count =
  2623. local_mask.disabled_count;
  2624. refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
  2625. for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
  2626. refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
  2627. &(z90crypt.hdware_info->type_x_addr[i]));
  2628. spin_unlock_irq(&queuespinlock);
  2629. return rv;
  2630. }
  2631. static int
  2632. find_crypto_devices(struct status *deviceMask)
  2633. {
  2634. int i, q_depth, dev_type;
  2635. enum hdstat hd_stat;
  2636. deviceMask->st_count = 0;
  2637. deviceMask->disabled_count = 0;
  2638. deviceMask->user_disabled_count = 0;
  2639. for (i = 0; i < z90crypt.max_count; i++) {
  2640. hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
  2641. &dev_type);
  2642. if (hd_stat == HD_TSQ_EXCEPTION) {
  2643. z90crypt.terminating = 1;
  2644. PRINTKC("Exception during probe for crypto devices\n");
  2645. return TSQ_FATAL_ERROR;
  2646. }
  2647. deviceMask->st_mask[i] = hd_stat;
  2648. if (hd_stat == HD_ONLINE) {
  2649. PDEBUG("Got an online crypto!: %d\n", i);
  2650. PDEBUG("Got a queue depth of %d\n", q_depth);
  2651. PDEBUG("Got a device type of %d\n", dev_type);
  2652. if (q_depth <= 0)
  2653. return TSQ_FATAL_ERROR;
  2654. deviceMask->st_count++;
  2655. z90crypt.q_depth_array[i] = q_depth;
  2656. z90crypt.dev_type_array[i] = dev_type;
  2657. }
  2658. }
  2659. return 0;
  2660. }
  2661. static int
  2662. refresh_index_array(struct status *status_str, struct device_x *index_array)
  2663. {
  2664. int i, count;
  2665. enum devstat stat;
  2666. i = -1;
  2667. count = 0;
  2668. do {
  2669. stat = status_str->st_mask[++i];
  2670. if (stat == DEV_ONLINE)
  2671. index_array->device_index[count++] = i;
  2672. } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
  2673. return count;
  2674. }
  2675. static int
  2676. create_crypto_device(int index)
  2677. {
  2678. int rv, devstat, total_size;
  2679. struct device *dev_ptr;
  2680. struct status *type_str_p;
  2681. int deviceType;
  2682. dev_ptr = z90crypt.device_p[index];
  2683. if (!dev_ptr) {
  2684. total_size = sizeof(struct device) +
  2685. z90crypt.q_depth_array[index] * sizeof(int);
  2686. dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
  2687. if (!dev_ptr) {
  2688. PRINTK("kmalloc device %d failed\n", index);
  2689. return ENOMEM;
  2690. }
  2691. memset(dev_ptr, 0, total_size);
  2692. dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
  2693. if (!dev_ptr->dev_resp_p) {
  2694. kfree(dev_ptr);
  2695. PRINTK("kmalloc device %d rec buffer failed\n", index);
  2696. return ENOMEM;
  2697. }
  2698. dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
  2699. INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
  2700. }
  2701. devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
  2702. if (devstat == DEV_RSQ_EXCEPTION) {
  2703. PRINTK("exception during reset device %d\n", index);
  2704. kfree(dev_ptr->dev_resp_p);
  2705. kfree(dev_ptr);
  2706. return RSQ_FATAL_ERROR;
  2707. }
  2708. if (devstat == DEV_ONLINE) {
  2709. dev_ptr->dev_self_x = index;
  2710. dev_ptr->dev_type = z90crypt.dev_type_array[index];
  2711. if (dev_ptr->dev_type == NILDEV) {
  2712. rv = probe_device_type(dev_ptr);
  2713. if (rv) {
  2714. PRINTK("rv = %d from probe_device_type %d\n",
  2715. rv, index);
  2716. kfree(dev_ptr->dev_resp_p);
  2717. kfree(dev_ptr);
  2718. return rv;
  2719. }
  2720. }
  2721. if (dev_ptr->dev_type == PCIXCC_UNK) {
  2722. rv = probe_PCIXCC_type(dev_ptr);
  2723. if (rv) {
  2724. PRINTK("rv = %d from probe_PCIXCC_type %d\n",
  2725. rv, index);
  2726. kfree(dev_ptr->dev_resp_p);
  2727. kfree(dev_ptr);
  2728. return rv;
  2729. }
  2730. }
  2731. deviceType = dev_ptr->dev_type;
  2732. z90crypt.dev_type_array[index] = deviceType;
  2733. if (deviceType == PCICA)
  2734. z90crypt.hdware_info->device_type_array[index] = 1;
  2735. else if (deviceType == PCICC)
  2736. z90crypt.hdware_info->device_type_array[index] = 2;
  2737. else if (deviceType == PCIXCC_MCL2)
  2738. z90crypt.hdware_info->device_type_array[index] = 3;
  2739. else if (deviceType == PCIXCC_MCL3)
  2740. z90crypt.hdware_info->device_type_array[index] = 4;
  2741. else if (deviceType == CEX2C)
  2742. z90crypt.hdware_info->device_type_array[index] = 5;
  2743. else if (deviceType == CEX2A)
  2744. z90crypt.hdware_info->device_type_array[index] = 6;
  2745. else // No idea how this would happen.
  2746. z90crypt.hdware_info->device_type_array[index] = -1;
  2747. }
  2748. /**
  2749. * 'q_depth' returned by the hardware is one less than
  2750. * the actual depth
  2751. */
  2752. dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
  2753. dev_ptr->dev_type = z90crypt.dev_type_array[index];
  2754. dev_ptr->dev_stat = devstat;
  2755. dev_ptr->disabled = 0;
  2756. z90crypt.device_p[index] = dev_ptr;
  2757. if (devstat == DEV_ONLINE) {
  2758. if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
  2759. z90crypt.mask.st_mask[index] = DEV_ONLINE;
  2760. z90crypt.mask.st_count++;
  2761. }
  2762. deviceType = dev_ptr->dev_type;
  2763. type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
  2764. if (type_str_p->st_mask[index] != DEV_ONLINE) {
  2765. type_str_p->st_mask[index] = DEV_ONLINE;
  2766. type_str_p->st_count++;
  2767. }
  2768. }
  2769. return 0;
  2770. }
  2771. static int
  2772. destroy_crypto_device(int index)
  2773. {
  2774. struct device *dev_ptr;
  2775. int t, disabledFlag;
  2776. dev_ptr = z90crypt.device_p[index];
  2777. /* remember device type; get rid of device struct */
  2778. if (dev_ptr) {
  2779. disabledFlag = dev_ptr->disabled;
  2780. t = dev_ptr->dev_type;
  2781. kfree(dev_ptr->dev_resp_p);
  2782. kfree(dev_ptr);
  2783. } else {
  2784. disabledFlag = 0;
  2785. t = -1;
  2786. }
  2787. z90crypt.device_p[index] = 0;
  2788. /* if the type is valid, remove the device from the type_mask */
  2789. if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
  2790. z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
  2791. z90crypt.hdware_info->type_mask[t].st_count--;
  2792. if (disabledFlag == 1)
  2793. z90crypt.hdware_info->type_mask[t].disabled_count--;
  2794. }
  2795. if (z90crypt.mask.st_mask[index] != DEV_GONE) {
  2796. z90crypt.mask.st_mask[index] = DEV_GONE;
  2797. z90crypt.mask.st_count--;
  2798. }
  2799. z90crypt.hdware_info->device_type_array[index] = 0;
  2800. return 0;
  2801. }
  2802. static void
  2803. destroy_z90crypt(void)
  2804. {
  2805. int i;
  2806. for (i = 0; i < z90crypt.max_count; i++)
  2807. if (z90crypt.device_p[i])
  2808. destroy_crypto_device(i);
  2809. kfree(z90crypt.hdware_info);
  2810. memset((void *)&z90crypt, 0, sizeof(z90crypt));
  2811. }
  2812. static unsigned char static_testmsg[384] = {
  2813. 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
  2814. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
  2815. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
  2816. 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
  2817. 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2818. 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2819. 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
  2820. 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2821. 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2822. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2823. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2824. 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2825. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
  2826. 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
  2827. 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
  2828. 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
  2829. 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
  2830. 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
  2831. 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
  2832. 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
  2833. 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
  2834. 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
  2835. 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
  2836. 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
  2837. };
  2838. static int
  2839. probe_device_type(struct device *devPtr)
  2840. {
  2841. int rv, dv, i, index, length;
  2842. unsigned char psmid[8];
  2843. static unsigned char loc_testmsg[sizeof(static_testmsg)];
  2844. index = devPtr->dev_self_x;
  2845. rv = 0;
  2846. do {
  2847. memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
  2848. length = sizeof(static_testmsg) - 24;
  2849. /* the -24 allows for the header */
  2850. dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
  2851. if (dv) {
  2852. PDEBUG("dv returned by send during probe: %d\n", dv);
  2853. if (dv == DEV_SEN_EXCEPTION) {
  2854. rv = SEN_FATAL_ERROR;
  2855. PRINTKC("exception in send to AP %d\n", index);
  2856. break;
  2857. }
  2858. PDEBUG("return value from send_to_AP: %d\n", rv);
  2859. switch (dv) {
  2860. case DEV_GONE:
  2861. PDEBUG("dev %d not available\n", index);
  2862. rv = SEN_NOT_AVAIL;
  2863. break;
  2864. case DEV_ONLINE:
  2865. rv = 0;
  2866. break;
  2867. case DEV_EMPTY:
  2868. rv = SEN_NOT_AVAIL;
  2869. break;
  2870. case DEV_NO_WORK:
  2871. rv = SEN_FATAL_ERROR;
  2872. break;
  2873. case DEV_BAD_MESSAGE:
  2874. rv = SEN_USER_ERROR;
  2875. break;
  2876. case DEV_QUEUE_FULL:
  2877. rv = SEN_QUEUE_FULL;
  2878. break;
  2879. default:
  2880. PRINTK("unknown dv=%d for dev %d\n", dv, index);
  2881. rv = SEN_NOT_AVAIL;
  2882. break;
  2883. }
  2884. }
  2885. if (rv)
  2886. break;
  2887. for (i = 0; i < 6; i++) {
  2888. mdelay(300);
  2889. dv = receive_from_AP(index, z90crypt.cdx,
  2890. devPtr->dev_resp_l,
  2891. devPtr->dev_resp_p, psmid);
  2892. PDEBUG("dv returned by DQ = %d\n", dv);
  2893. if (dv == DEV_REC_EXCEPTION) {
  2894. rv = REC_FATAL_ERROR;
  2895. PRINTKC("exception in dequeue %d\n",
  2896. index);
  2897. break;
  2898. }
  2899. switch (dv) {
  2900. case DEV_ONLINE:
  2901. rv = 0;
  2902. break;
  2903. case DEV_EMPTY:
  2904. rv = REC_EMPTY;
  2905. break;
  2906. case DEV_NO_WORK:
  2907. rv = REC_NO_WORK;
  2908. break;
  2909. case DEV_BAD_MESSAGE:
  2910. case DEV_GONE:
  2911. default:
  2912. rv = REC_NO_RESPONSE;
  2913. break;
  2914. }
  2915. if ((rv != 0) && (rv != REC_NO_WORK))
  2916. break;
  2917. if (rv == 0)
  2918. break;
  2919. }
  2920. if (rv)
  2921. break;
  2922. rv = (devPtr->dev_resp_p[0] == 0x00) &&
  2923. (devPtr->dev_resp_p[1] == 0x86);
  2924. if (rv)
  2925. devPtr->dev_type = PCICC;
  2926. else
  2927. devPtr->dev_type = PCICA;
  2928. rv = 0;
  2929. } while (0);
  2930. /* In a general error case, the card is not marked online */
  2931. return rv;
  2932. }
  2933. static unsigned char MCL3_testmsg[] = {
  2934. 0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
  2935. 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2936. 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2937. 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2938. 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
  2939. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
  2940. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
  2941. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
  2942. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2943. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2944. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2945. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2946. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2947. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2948. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2949. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2950. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2951. 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2952. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2953. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2954. 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
  2955. 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
  2956. 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
  2957. 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
  2958. 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
  2959. 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
  2960. 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
  2961. 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
  2962. 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
  2963. 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
  2964. 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
  2965. 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
  2966. 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
  2967. 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
  2968. 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
  2969. };
  2970. static int
  2971. probe_PCIXCC_type(struct device *devPtr)
  2972. {
  2973. int rv, dv, i, index, length;
  2974. unsigned char psmid[8];
  2975. static unsigned char loc_testmsg[548];
  2976. struct CPRBX *cprbx_p;
  2977. index = devPtr->dev_self_x;
  2978. rv = 0;
  2979. do {
  2980. memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
  2981. length = sizeof(MCL3_testmsg) - 0x0C;
  2982. dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
  2983. if (dv) {
  2984. PDEBUG("dv returned = %d\n", dv);
  2985. if (dv == DEV_SEN_EXCEPTION) {
  2986. rv = SEN_FATAL_ERROR;
  2987. PRINTKC("exception in send to AP %d\n", index);
  2988. break;
  2989. }
  2990. PDEBUG("return value from send_to_AP: %d\n", rv);
  2991. switch (dv) {
  2992. case DEV_GONE:
  2993. PDEBUG("dev %d not available\n", index);
  2994. rv = SEN_NOT_AVAIL;
  2995. break;
  2996. case DEV_ONLINE:
  2997. rv = 0;
  2998. break;
  2999. case DEV_EMPTY:
  3000. rv = SEN_NOT_AVAIL;
  3001. break;
  3002. case DEV_NO_WORK:
  3003. rv = SEN_FATAL_ERROR;
  3004. break;
  3005. case DEV_BAD_MESSAGE:
  3006. rv = SEN_USER_ERROR;
  3007. break;
  3008. case DEV_QUEUE_FULL:
  3009. rv = SEN_QUEUE_FULL;
  3010. break;
  3011. default:
  3012. PRINTK("unknown dv=%d for dev %d\n", dv, index);
  3013. rv = SEN_NOT_AVAIL;
  3014. break;
  3015. }
  3016. }
  3017. if (rv)
  3018. break;
  3019. for (i = 0; i < 6; i++) {
  3020. mdelay(300);
  3021. dv = receive_from_AP(index, z90crypt.cdx,
  3022. devPtr->dev_resp_l,
  3023. devPtr->dev_resp_p, psmid);
  3024. PDEBUG("dv returned by DQ = %d\n", dv);
  3025. if (dv == DEV_REC_EXCEPTION) {
  3026. rv = REC_FATAL_ERROR;
  3027. PRINTKC("exception in dequeue %d\n",
  3028. index);
  3029. break;
  3030. }
  3031. switch (dv) {
  3032. case DEV_ONLINE:
  3033. rv = 0;
  3034. break;
  3035. case DEV_EMPTY:
  3036. rv = REC_EMPTY;
  3037. break;
  3038. case DEV_NO_WORK:
  3039. rv = REC_NO_WORK;
  3040. break;
  3041. case DEV_BAD_MESSAGE:
  3042. case DEV_GONE:
  3043. default:
  3044. rv = REC_NO_RESPONSE;
  3045. break;
  3046. }
  3047. if ((rv != 0) && (rv != REC_NO_WORK))
  3048. break;
  3049. if (rv == 0)
  3050. break;
  3051. }
  3052. if (rv)
  3053. break;
  3054. cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
  3055. if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
  3056. devPtr->dev_type = PCIXCC_MCL2;
  3057. PDEBUG("device %d is MCL2\n", index);
  3058. } else {
  3059. devPtr->dev_type = PCIXCC_MCL3;
  3060. PDEBUG("device %d is MCL3\n", index);
  3061. }
  3062. } while (0);
  3063. /* In a general error case, the card is not marked online */
  3064. return rv;
  3065. }
  3066. module_init(z90crypt_init_module);
  3067. module_exit(z90crypt_cleanup_module);