z90main.c 92 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523
  1. /*
  2. * linux/drivers/s390/crypto/z90main.c
  3. *
  4. * z90crypt 1.3.2
  5. *
  6. * Copyright (C) 2001, 2004 IBM Corporation
  7. * Author(s): Robert Burroughs (burrough@us.ibm.com)
  8. * Eric Rossman (edrossma@us.ibm.com)
  9. *
  10. * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. #include <asm/uaccess.h> // copy_(from|to)_user
  27. #include <linux/compat.h>
  28. #include <linux/compiler.h>
  29. #include <linux/delay.h> // mdelay
  30. #include <linux/init.h>
  31. #include <linux/interrupt.h> // for tasklets
  32. #include <linux/ioctl32.h>
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/kobject_uevent.h>
  36. #include <linux/proc_fs.h>
  37. #include <linux/syscalls.h>
  38. #include <linux/version.h>
  39. #include "z90crypt.h"
  40. #include "z90common.h"
  41. #ifndef Z90CRYPT_USE_HOTPLUG
  42. #include <linux/miscdevice.h>
  43. #endif
  44. #define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
  45. #if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
  46. # error "This kernel is too old: not supported"
  47. #endif
  48. #if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
  49. # error "This kernel is too recent: not supported by this file"
  50. #endif
  51. #define VERSION_Z90MAIN_C "$Revision: 1.57 $"
  52. static char z90main_version[] __initdata =
  53. "z90main.o (" VERSION_Z90MAIN_C "/"
  54. VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
  55. extern char z90hardware_version[];
  56. /**
  57. * Defaults that may be modified.
  58. */
  59. #ifndef Z90CRYPT_USE_HOTPLUG
  60. /**
  61. * You can specify a different minor at compile time.
  62. */
  63. #ifndef Z90CRYPT_MINOR
  64. #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
  65. #endif
  66. #else
  67. /**
  68. * You can specify a different major at compile time.
  69. */
  70. #ifndef Z90CRYPT_MAJOR
  71. #define Z90CRYPT_MAJOR 0
  72. #endif
  73. #endif
  74. /**
  75. * You can specify a different domain at compile time or on the insmod
  76. * command line.
  77. */
  78. #ifndef DOMAIN_INDEX
  79. #define DOMAIN_INDEX -1
  80. #endif
  81. /**
  82. * This is the name under which the device is registered in /proc/modules.
  83. */
  84. #define REG_NAME "z90crypt"
  85. /**
  86. * Cleanup should run every CLEANUPTIME seconds and should clean up requests
  87. * older than CLEANUPTIME seconds in the past.
  88. */
  89. #ifndef CLEANUPTIME
  90. #define CLEANUPTIME 20
  91. #endif
  92. /**
  93. * Config should run every CONFIGTIME seconds
  94. */
  95. #ifndef CONFIGTIME
  96. #define CONFIGTIME 30
  97. #endif
  98. /**
  99. * The first execution of the config task should take place
  100. * immediately after initialization
  101. */
  102. #ifndef INITIAL_CONFIGTIME
  103. #define INITIAL_CONFIGTIME 1
  104. #endif
  105. /**
  106. * Reader should run every READERTIME milliseconds
  107. * With the 100Hz patch for s390, z90crypt can lock the system solid while
  108. * under heavy load. We'll try to avoid that.
  109. */
  110. #ifndef READERTIME
  111. #if HZ > 1000
  112. #define READERTIME 2
  113. #else
  114. #define READERTIME 10
  115. #endif
  116. #endif
  117. /**
  118. * turn long device array index into device pointer
  119. */
  120. #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
  121. /**
  122. * turn short device array index into long device array index
  123. */
  124. #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
  125. /**
  126. * turn short device array index into device pointer
  127. */
  128. #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
  129. /**
  130. * Status for a work-element
  131. */
  132. #define STAT_DEFAULT 0x00 // request has not been processed
  133. #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
  134. // else, device is determined each write
  135. #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
  136. // before being sent to the hardware.
  137. #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
  138. // 0x20 // UNUSED state
  139. #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
  140. #define STAT_NOWORK 0x00 // bits off: no work on any queue
  141. #define STAT_RDWRMASK 0x30 // mask for bits 5-4
  142. /**
  143. * Macros to check the status RDWRMASK
  144. */
  145. #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
  146. #define SET_RDWRMASK(statbyte, newval) \
  147. {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
  148. /**
  149. * Audit Trail. Progress of a Work element
  150. * audit[0]: Unless noted otherwise, these bits are all set by the process
  151. */
  152. #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
  153. #define FP_BUFFREQ 0x40 // Low Level buffer requested
  154. #define FP_BUFFGOT 0x20 // Low Level buffer obtained
  155. #define FP_SENT 0x10 // Work element sent to a crypto device
  156. // (may be set by process or by reader task)
  157. #define FP_PENDING 0x08 // Work element placed on pending queue
  158. // (may be set by process or by reader task)
  159. #define FP_REQUEST 0x04 // Work element placed on request queue
  160. #define FP_ASLEEP 0x02 // Work element about to sleep
  161. #define FP_AWAKE 0x01 // Work element has been awakened
  162. /**
  163. * audit[1]: These bits are set by the reader task and/or the cleanup task
  164. */
  165. #define FP_NOTPENDING 0x80 // Work element removed from pending queue
  166. #define FP_AWAKENING 0x40 // Caller about to be awakened
  167. #define FP_TIMEDOUT 0x20 // Caller timed out
  168. #define FP_RESPSIZESET 0x10 // Response size copied to work element
  169. #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
  170. #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
  171. #define FP_REMREQUEST 0x02 // Work element removed from request queue
  172. #define FP_SIGNALED 0x01 // Work element was awakened by a signal
  173. /**
  174. * audit[2]: unused
  175. */
  176. /**
  177. * state of the file handle in private_data.status
  178. */
  179. #define STAT_OPEN 0
  180. #define STAT_CLOSED 1
  181. /**
  182. * PID() expands to the process ID of the current process
  183. */
  184. #define PID() (current->pid)
  185. /**
  186. * Selected Constants. The number of APs and the number of devices
  187. */
  188. #ifndef Z90CRYPT_NUM_APS
  189. #define Z90CRYPT_NUM_APS 64
  190. #endif
  191. #ifndef Z90CRYPT_NUM_DEVS
  192. #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
  193. #endif
  194. /**
  195. * Buffer size for receiving responses. The maximum Response Size
  196. * is actually the maximum request size, since in an error condition
  197. * the request itself may be returned unchanged.
  198. */
  199. #define MAX_RESPONSE_SIZE 0x0000077C
  200. /**
  201. * A count and status-byte mask
  202. */
  203. struct status {
  204. int st_count; // # of enabled devices
  205. int disabled_count; // # of disabled devices
  206. int user_disabled_count; // # of devices disabled via proc fs
  207. unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
  208. };
  209. /**
  210. * The array of device indexes is a mechanism for fast indexing into
  211. * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
  212. * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
  213. * z90CDeviceIndex[2] is 47.
  214. */
  215. struct device_x {
  216. int device_index[Z90CRYPT_NUM_DEVS];
  217. };
  218. /**
  219. * All devices are arranged in a single array: 64 APs
  220. */
  221. struct device {
  222. int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
  223. // PCIXCC_MCL3, CEX2C
  224. enum devstat dev_stat; // current device status
  225. int dev_self_x; // Index in array
  226. int disabled; // Set when device is in error
  227. int user_disabled; // Set when device is disabled by user
  228. int dev_q_depth; // q depth
  229. unsigned char * dev_resp_p; // Response buffer address
  230. int dev_resp_l; // Response Buffer length
  231. int dev_caller_count; // Number of callers
  232. int dev_total_req_cnt; // # requests for device since load
  233. struct list_head dev_caller_list; // List of callers
  234. };
  235. /**
  236. * There's a struct status and a struct device_x for each device type.
  237. */
  238. struct hdware_block {
  239. struct status hdware_mask;
  240. struct status type_mask[Z90CRYPT_NUM_TYPES];
  241. struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
  242. unsigned char device_type_array[Z90CRYPT_NUM_APS];
  243. };
  244. /**
  245. * z90crypt is the topmost data structure in the hierarchy.
  246. */
  247. struct z90crypt {
  248. int max_count; // Nr of possible crypto devices
  249. struct status mask;
  250. int q_depth_array[Z90CRYPT_NUM_DEVS];
  251. int dev_type_array[Z90CRYPT_NUM_DEVS];
  252. struct device_x overall_device_x; // array device indexes
  253. struct device * device_p[Z90CRYPT_NUM_DEVS];
  254. int terminating;
  255. int domain_established;// TRUE: domain has been found
  256. int cdx; // Crypto Domain Index
  257. int len; // Length of this data structure
  258. struct hdware_block *hdware_info;
  259. };
  260. /**
  261. * An array of these structures is pointed to from dev_caller
  262. * The length of the array depends on the device type. For APs,
  263. * there are 8.
  264. *
  265. * The caller buffer is allocated to the user at OPEN. At WRITE,
  266. * it contains the request; at READ, the response. The function
  267. * send_to_crypto_device converts the request to device-dependent
  268. * form and use the caller's OPEN-allocated buffer for the response.
  269. */
  270. struct caller {
  271. int caller_buf_l; // length of original request
  272. unsigned char * caller_buf_p; // Original request on WRITE
  273. int caller_dev_dep_req_l; // len device dependent request
  274. unsigned char * caller_dev_dep_req_p; // Device dependent form
  275. unsigned char caller_id[8]; // caller-supplied message id
  276. struct list_head caller_liste;
  277. unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
  278. };
  279. /**
  280. * Function prototypes from z90hardware.c
  281. */
  282. enum hdstat query_online(int, int, int, int *, int *);
  283. enum devstat reset_device(int, int, int);
  284. enum devstat send_to_AP(int, int, int, unsigned char *);
  285. enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
  286. int convert_request(unsigned char *, int, short, int, int, int *,
  287. unsigned char *);
  288. int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
  289. /**
  290. * Low level function prototypes
  291. */
  292. static int create_z90crypt(int *);
  293. static int refresh_z90crypt(int *);
  294. static int find_crypto_devices(struct status *);
  295. static int create_crypto_device(int);
  296. static int destroy_crypto_device(int);
  297. static void destroy_z90crypt(void);
  298. static int refresh_index_array(struct status *, struct device_x *);
  299. static int probe_device_type(struct device *);
  300. static int probe_PCIXCC_type(struct device *);
  301. /**
  302. * proc fs definitions
  303. */
  304. static struct proc_dir_entry *z90crypt_entry;
  305. /**
  306. * data structures
  307. */
  308. /**
  309. * work_element.opener points back to this structure
  310. */
  311. struct priv_data {
  312. pid_t opener_pid;
  313. unsigned char status; // 0: open 1: closed
  314. };
  315. /**
  316. * A work element is allocated for each request
  317. */
  318. struct work_element {
  319. struct priv_data *priv_data;
  320. pid_t pid;
  321. int devindex; // index of device processing this w_e
  322. // (If request did not specify device,
  323. // -1 until placed onto a queue)
  324. int devtype;
  325. struct list_head liste; // used for requestq and pendingq
  326. char buffer[128]; // local copy of user request
  327. int buff_size; // size of the buffer for the request
  328. char resp_buff[RESPBUFFSIZE];
  329. int resp_buff_size;
  330. char __user * resp_addr; // address of response in user space
  331. unsigned int funccode; // function code of request
  332. wait_queue_head_t waitq;
  333. unsigned long requestsent; // time at which the request was sent
  334. atomic_t alarmrung; // wake-up signal
  335. unsigned char caller_id[8]; // pid + counter, for this w_e
  336. unsigned char status[1]; // bits to mark status of the request
  337. unsigned char audit[3]; // record of work element's progress
  338. unsigned char * requestptr; // address of request buffer
  339. int retcode; // return code of request
  340. };
  341. /**
  342. * High level function prototypes
  343. */
  344. static int z90crypt_open(struct inode *, struct file *);
  345. static int z90crypt_release(struct inode *, struct file *);
  346. static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
  347. static ssize_t z90crypt_write(struct file *, const char __user *,
  348. size_t, loff_t *);
  349. static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
  350. static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
  351. static void z90crypt_reader_task(unsigned long);
  352. static void z90crypt_schedule_reader_task(unsigned long);
  353. static void z90crypt_config_task(unsigned long);
  354. static void z90crypt_cleanup_task(unsigned long);
  355. static int z90crypt_status(char *, char **, off_t, int, int *, void *);
  356. static int z90crypt_status_write(struct file *, const char __user *,
  357. unsigned long, void *);
  358. /**
  359. * Hotplug support
  360. */
  361. #ifdef Z90CRYPT_USE_HOTPLUG
  362. #define Z90CRYPT_HOTPLUG_ADD 1
  363. #define Z90CRYPT_HOTPLUG_REMOVE 2
  364. static void z90crypt_hotplug_event(int, int, int);
  365. #endif
  366. /**
  367. * Storage allocated at initialization and used throughout the life of
  368. * this insmod
  369. */
  370. #ifdef Z90CRYPT_USE_HOTPLUG
  371. static int z90crypt_major = Z90CRYPT_MAJOR;
  372. #endif
  373. static int domain = DOMAIN_INDEX;
  374. static struct z90crypt z90crypt;
  375. static int quiesce_z90crypt;
  376. static spinlock_t queuespinlock;
  377. static struct list_head request_list;
  378. static int requestq_count;
  379. static struct list_head pending_list;
  380. static int pendingq_count;
  381. static struct tasklet_struct reader_tasklet;
  382. static struct timer_list reader_timer;
  383. static struct timer_list config_timer;
  384. static struct timer_list cleanup_timer;
  385. static atomic_t total_open;
  386. static atomic_t z90crypt_step;
  387. static struct file_operations z90crypt_fops = {
  388. .owner = THIS_MODULE,
  389. .read = z90crypt_read,
  390. .write = z90crypt_write,
  391. .unlocked_ioctl = z90crypt_unlocked_ioctl,
  392. #ifdef CONFIG_COMPAT
  393. .compat_ioctl = z90crypt_compat_ioctl,
  394. #endif
  395. .open = z90crypt_open,
  396. .release = z90crypt_release
  397. };
  398. #ifndef Z90CRYPT_USE_HOTPLUG
  399. static struct miscdevice z90crypt_misc_device = {
  400. .minor = Z90CRYPT_MINOR,
  401. .name = DEV_NAME,
  402. .fops = &z90crypt_fops,
  403. .devfs_name = DEV_NAME
  404. };
  405. #endif
  406. /**
  407. * Documentation values.
  408. */
  409. MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
  410. "and Jochen Roehrig");
  411. MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
  412. "Copyright 2001, 2004 IBM Corporation");
  413. MODULE_LICENSE("GPL");
  414. module_param(domain, int, 0);
  415. MODULE_PARM_DESC(domain, "domain index for device");
  416. #ifdef CONFIG_COMPAT
  417. /**
  418. * ioctl32 conversion routines
  419. */
  420. struct ica_rsa_modexpo_32 { // For 32-bit callers
  421. compat_uptr_t inputdata;
  422. unsigned int inputdatalength;
  423. compat_uptr_t outputdata;
  424. unsigned int outputdatalength;
  425. compat_uptr_t b_key;
  426. compat_uptr_t n_modulus;
  427. };
  428. static long
  429. trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
  430. {
  431. struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
  432. struct ica_rsa_modexpo_32 mex32k;
  433. struct ica_rsa_modexpo __user *mex64;
  434. long ret = 0;
  435. unsigned int i;
  436. if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
  437. return -EFAULT;
  438. mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
  439. if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
  440. return -EFAULT;
  441. if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
  442. return -EFAULT;
  443. if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
  444. __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
  445. __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
  446. __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
  447. __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
  448. __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
  449. return -EFAULT;
  450. ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
  451. if (!ret)
  452. if (__get_user(i, &mex64->outputdatalength) ||
  453. __put_user(i, &mex32u->outputdatalength))
  454. ret = -EFAULT;
  455. return ret;
  456. }
  457. struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
  458. compat_uptr_t inputdata;
  459. unsigned int inputdatalength;
  460. compat_uptr_t outputdata;
  461. unsigned int outputdatalength;
  462. compat_uptr_t bp_key;
  463. compat_uptr_t bq_key;
  464. compat_uptr_t np_prime;
  465. compat_uptr_t nq_prime;
  466. compat_uptr_t u_mult_inv;
  467. };
  468. static long
  469. trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
  470. {
  471. struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
  472. struct ica_rsa_modexpo_crt_32 crt32k;
  473. struct ica_rsa_modexpo_crt __user *crt64;
  474. long ret = 0;
  475. unsigned int i;
  476. if (!access_ok(VERIFY_WRITE, crt32u,
  477. sizeof(struct ica_rsa_modexpo_crt_32)))
  478. return -EFAULT;
  479. crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
  480. if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
  481. return -EFAULT;
  482. if (copy_from_user(&crt32k, crt32u,
  483. sizeof(struct ica_rsa_modexpo_crt_32)))
  484. return -EFAULT;
  485. if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
  486. __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
  487. __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
  488. __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
  489. __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
  490. __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
  491. __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
  492. __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
  493. __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
  494. return -EFAULT;
  495. ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
  496. if (!ret)
  497. if (__get_user(i, &crt64->outputdatalength) ||
  498. __put_user(i, &crt32u->outputdatalength))
  499. ret = -EFAULT;
  500. return ret;
  501. }
  502. static long
  503. z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  504. {
  505. switch (cmd) {
  506. case ICAZ90STATUS:
  507. case Z90QUIESCE:
  508. case Z90STAT_TOTALCOUNT:
  509. case Z90STAT_PCICACOUNT:
  510. case Z90STAT_PCICCCOUNT:
  511. case Z90STAT_PCIXCCCOUNT:
  512. case Z90STAT_PCIXCCMCL2COUNT:
  513. case Z90STAT_PCIXCCMCL3COUNT:
  514. case Z90STAT_CEX2CCOUNT:
  515. case Z90STAT_REQUESTQ_COUNT:
  516. case Z90STAT_PENDINGQ_COUNT:
  517. case Z90STAT_TOTALOPEN_COUNT:
  518. case Z90STAT_DOMAIN_INDEX:
  519. case Z90STAT_STATUS_MASK:
  520. case Z90STAT_QDEPTH_MASK:
  521. case Z90STAT_PERDEV_REQCNT:
  522. return z90crypt_unlocked_ioctl(filp, cmd, arg);
  523. case ICARSAMODEXPO:
  524. return trans_modexpo32(filp, cmd, arg);
  525. case ICARSACRT:
  526. return trans_modexpo_crt32(filp, cmd, arg);
  527. default:
  528. return -ENOIOCTLCMD;
  529. }
  530. }
  531. #endif
  532. /**
  533. * The module initialization code.
  534. */
  535. static int __init
  536. z90crypt_init_module(void)
  537. {
  538. int result, nresult;
  539. struct proc_dir_entry *entry;
  540. PDEBUG("PID %d\n", PID());
  541. if ((domain < -1) || (domain > 15)) {
  542. PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
  543. return -EINVAL;
  544. }
  545. #ifndef Z90CRYPT_USE_HOTPLUG
  546. /* Register as misc device with given minor (or get a dynamic one). */
  547. result = misc_register(&z90crypt_misc_device);
  548. if (result < 0) {
  549. PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
  550. z90crypt_misc_device.minor, result);
  551. return result;
  552. }
  553. #else
  554. /* Register the major (or get a dynamic one). */
  555. result = register_chrdev(z90crypt_major, REG_NAME, &z90crypt_fops);
  556. if (result < 0) {
  557. PRINTKW("register_chrdev (major %d) failed with %d.\n",
  558. z90crypt_major, result);
  559. return result;
  560. }
  561. if (z90crypt_major == 0)
  562. z90crypt_major = result;
  563. #endif
  564. PDEBUG("Registered " DEV_NAME " with result %d\n", result);
  565. result = create_z90crypt(&domain);
  566. if (result != 0) {
  567. PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
  568. domain, result);
  569. result = -ENOMEM;
  570. goto init_module_cleanup;
  571. }
  572. if (result == 0) {
  573. PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
  574. z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
  575. __DATE__, __TIME__);
  576. PRINTKN("%s\n", z90main_version);
  577. PRINTKN("%s\n", z90hardware_version);
  578. PDEBUG("create_z90crypt (domain index %d) successful.\n",
  579. domain);
  580. } else
  581. PRINTK("No devices at startup\n");
  582. #ifdef Z90CRYPT_USE_HOTPLUG
  583. /* generate hotplug event for device node generation */
  584. z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_ADD);
  585. #endif
  586. /* Initialize globals. */
  587. spin_lock_init(&queuespinlock);
  588. INIT_LIST_HEAD(&pending_list);
  589. pendingq_count = 0;
  590. INIT_LIST_HEAD(&request_list);
  591. requestq_count = 0;
  592. quiesce_z90crypt = 0;
  593. atomic_set(&total_open, 0);
  594. atomic_set(&z90crypt_step, 0);
  595. /* Set up the cleanup task. */
  596. init_timer(&cleanup_timer);
  597. cleanup_timer.function = z90crypt_cleanup_task;
  598. cleanup_timer.data = 0;
  599. cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
  600. add_timer(&cleanup_timer);
  601. /* Set up the proc file system */
  602. entry = create_proc_entry("driver/z90crypt", 0644, 0);
  603. if (entry) {
  604. entry->nlink = 1;
  605. entry->data = 0;
  606. entry->read_proc = z90crypt_status;
  607. entry->write_proc = z90crypt_status_write;
  608. }
  609. else
  610. PRINTK("Couldn't create z90crypt proc entry\n");
  611. z90crypt_entry = entry;
  612. /* Set up the configuration task. */
  613. init_timer(&config_timer);
  614. config_timer.function = z90crypt_config_task;
  615. config_timer.data = 0;
  616. config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
  617. add_timer(&config_timer);
  618. /* Set up the reader task */
  619. tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
  620. init_timer(&reader_timer);
  621. reader_timer.function = z90crypt_schedule_reader_task;
  622. reader_timer.data = 0;
  623. reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
  624. add_timer(&reader_timer);
  625. return 0; // success
  626. init_module_cleanup:
  627. #ifndef Z90CRYPT_USE_HOTPLUG
  628. if ((nresult = misc_deregister(&z90crypt_misc_device)))
  629. PRINTK("misc_deregister failed with %d.\n", nresult);
  630. else
  631. PDEBUG("misc_deregister successful.\n");
  632. #else
  633. if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
  634. PRINTK("unregister_chrdev failed with %d.\n", nresult);
  635. else
  636. PDEBUG("unregister_chrdev successful.\n");
  637. #endif
  638. return result; // failure
  639. }
  640. /**
  641. * The module termination code
  642. */
  643. static void __exit
  644. z90crypt_cleanup_module(void)
  645. {
  646. int nresult;
  647. PDEBUG("PID %d\n", PID());
  648. remove_proc_entry("driver/z90crypt", 0);
  649. #ifndef Z90CRYPT_USE_HOTPLUG
  650. if ((nresult = misc_deregister(&z90crypt_misc_device)))
  651. PRINTK("misc_deregister failed with %d.\n", nresult);
  652. else
  653. PDEBUG("misc_deregister successful.\n");
  654. #else
  655. z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_REMOVE);
  656. if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
  657. PRINTK("unregister_chrdev failed with %d.\n", nresult);
  658. else
  659. PDEBUG("unregister_chrdev successful.\n");
  660. #endif
  661. /* Remove the tasks */
  662. tasklet_kill(&reader_tasklet);
  663. del_timer(&reader_timer);
  664. del_timer(&config_timer);
  665. del_timer(&cleanup_timer);
  666. destroy_z90crypt();
  667. PRINTKN("Unloaded.\n");
  668. }
  669. /**
  670. * Functions running under a process id
  671. *
  672. * The I/O functions:
  673. * z90crypt_open
  674. * z90crypt_release
  675. * z90crypt_read
  676. * z90crypt_write
  677. * z90crypt_unlocked_ioctl
  678. * z90crypt_status
  679. * z90crypt_status_write
  680. * disable_card
  681. * enable_card
  682. * scan_char
  683. * scan_string
  684. *
  685. * Helper functions:
  686. * z90crypt_rsa
  687. * z90crypt_prepare
  688. * z90crypt_send
  689. * z90crypt_process_results
  690. *
  691. */
  692. static int
  693. z90crypt_open(struct inode *inode, struct file *filp)
  694. {
  695. struct priv_data *private_data_p;
  696. if (quiesce_z90crypt)
  697. return -EQUIESCE;
  698. private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
  699. if (!private_data_p) {
  700. PRINTK("Memory allocate failed\n");
  701. return -ENOMEM;
  702. }
  703. memset((void *)private_data_p, 0, sizeof(struct priv_data));
  704. private_data_p->status = STAT_OPEN;
  705. private_data_p->opener_pid = PID();
  706. filp->private_data = private_data_p;
  707. atomic_inc(&total_open);
  708. return 0;
  709. }
  710. static int
  711. z90crypt_release(struct inode *inode, struct file *filp)
  712. {
  713. struct priv_data *private_data_p = filp->private_data;
  714. PDEBUG("PID %d (filp %p)\n", PID(), filp);
  715. private_data_p->status = STAT_CLOSED;
  716. memset(private_data_p, 0, sizeof(struct priv_data));
  717. kfree(private_data_p);
  718. atomic_dec(&total_open);
  719. return 0;
  720. }
  721. /*
  722. * there are two read functions, of which compile options will choose one
  723. * without USE_GET_RANDOM_BYTES
  724. * => read() always returns -EPERM;
  725. * otherwise
  726. * => read() uses get_random_bytes() kernel function
  727. */
  728. #ifndef USE_GET_RANDOM_BYTES
  729. /**
  730. * z90crypt_read will not be supported beyond z90crypt 1.3.1
  731. */
  732. static ssize_t
  733. z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  734. {
  735. PDEBUG("filp %p (PID %d)\n", filp, PID());
  736. return -EPERM;
  737. }
  738. #else // we want to use get_random_bytes
  739. /**
  740. * read() just returns a string of random bytes. Since we have no way
  741. * to generate these cryptographically, we just execute get_random_bytes
  742. * for the length specified.
  743. */
  744. #include <linux/random.h>
  745. static ssize_t
  746. z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  747. {
  748. unsigned char *temp_buff;
  749. PDEBUG("filp %p (PID %d)\n", filp, PID());
  750. if (quiesce_z90crypt)
  751. return -EQUIESCE;
  752. if (count < 0) {
  753. PRINTK("Requested random byte count negative: %ld\n", count);
  754. return -EINVAL;
  755. }
  756. if (count > RESPBUFFSIZE) {
  757. PDEBUG("count[%d] > RESPBUFFSIZE", count);
  758. return -EINVAL;
  759. }
  760. if (count == 0)
  761. return 0;
  762. temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
  763. if (!temp_buff) {
  764. PRINTK("Memory allocate failed\n");
  765. return -ENOMEM;
  766. }
  767. get_random_bytes(temp_buff, count);
  768. if (copy_to_user(buf, temp_buff, count) != 0) {
  769. kfree(temp_buff);
  770. return -EFAULT;
  771. }
  772. kfree(temp_buff);
  773. return count;
  774. }
  775. #endif
  776. /**
  777. * Write is is not allowed
  778. */
  779. static ssize_t
  780. z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
  781. {
  782. PDEBUG("filp %p (PID %d)\n", filp, PID());
  783. return -EPERM;
  784. }
  785. /**
  786. * New status functions
  787. */
  788. static inline int
  789. get_status_totalcount(void)
  790. {
  791. return z90crypt.hdware_info->hdware_mask.st_count;
  792. }
  793. static inline int
  794. get_status_PCICAcount(void)
  795. {
  796. return z90crypt.hdware_info->type_mask[PCICA].st_count;
  797. }
  798. static inline int
  799. get_status_PCICCcount(void)
  800. {
  801. return z90crypt.hdware_info->type_mask[PCICC].st_count;
  802. }
  803. static inline int
  804. get_status_PCIXCCcount(void)
  805. {
  806. return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
  807. z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
  808. }
  809. static inline int
  810. get_status_PCIXCCMCL2count(void)
  811. {
  812. return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
  813. }
  814. static inline int
  815. get_status_PCIXCCMCL3count(void)
  816. {
  817. return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
  818. }
  819. static inline int
  820. get_status_CEX2Ccount(void)
  821. {
  822. return z90crypt.hdware_info->type_mask[CEX2C].st_count;
  823. }
  824. static inline int
  825. get_status_requestq_count(void)
  826. {
  827. return requestq_count;
  828. }
  829. static inline int
  830. get_status_pendingq_count(void)
  831. {
  832. return pendingq_count;
  833. }
  834. static inline int
  835. get_status_totalopen_count(void)
  836. {
  837. return atomic_read(&total_open);
  838. }
  839. static inline int
  840. get_status_domain_index(void)
  841. {
  842. return z90crypt.cdx;
  843. }
  844. static inline unsigned char *
  845. get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
  846. {
  847. int i, ix;
  848. memcpy(status, z90crypt.hdware_info->device_type_array,
  849. Z90CRYPT_NUM_APS);
  850. for (i = 0; i < get_status_totalcount(); i++) {
  851. ix = SHRT2LONG(i);
  852. if (LONG2DEVPTR(ix)->user_disabled)
  853. status[ix] = 0x0d;
  854. }
  855. return status;
  856. }
  857. static inline unsigned char *
  858. get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
  859. {
  860. int i, ix;
  861. memset(qdepth, 0, Z90CRYPT_NUM_APS);
  862. for (i = 0; i < get_status_totalcount(); i++) {
  863. ix = SHRT2LONG(i);
  864. qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
  865. }
  866. return qdepth;
  867. }
  868. static inline unsigned int *
  869. get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
  870. {
  871. int i, ix;
  872. memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
  873. for (i = 0; i < get_status_totalcount(); i++) {
  874. ix = SHRT2LONG(i);
  875. reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
  876. }
  877. return reqcnt;
  878. }
  879. static inline void
  880. init_work_element(struct work_element *we_p,
  881. struct priv_data *priv_data, pid_t pid)
  882. {
  883. int step;
  884. we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
  885. /* Come up with a unique id for this caller. */
  886. step = atomic_inc_return(&z90crypt_step);
  887. memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
  888. memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
  889. we_p->pid = pid;
  890. we_p->priv_data = priv_data;
  891. we_p->status[0] = STAT_DEFAULT;
  892. we_p->audit[0] = 0x00;
  893. we_p->audit[1] = 0x00;
  894. we_p->audit[2] = 0x00;
  895. we_p->resp_buff_size = 0;
  896. we_p->retcode = 0;
  897. we_p->devindex = -1;
  898. we_p->devtype = -1;
  899. atomic_set(&we_p->alarmrung, 0);
  900. init_waitqueue_head(&we_p->waitq);
  901. INIT_LIST_HEAD(&(we_p->liste));
  902. }
  903. static inline int
  904. allocate_work_element(struct work_element **we_pp,
  905. struct priv_data *priv_data_p, pid_t pid)
  906. {
  907. struct work_element *we_p;
  908. we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
  909. if (!we_p)
  910. return -ENOMEM;
  911. init_work_element(we_p, priv_data_p, pid);
  912. *we_pp = we_p;
  913. return 0;
  914. }
  915. static inline void
  916. remove_device(struct device *device_p)
  917. {
  918. if (!device_p || (device_p->disabled != 0))
  919. return;
  920. device_p->disabled = 1;
  921. z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
  922. z90crypt.hdware_info->hdware_mask.disabled_count++;
  923. }
  924. /**
  925. * Bitlength limits for each card
  926. *
  927. * There are new MCLs which allow more bitlengths. See the table for details.
  928. * The MCL must be applied and the newer bitlengths enabled for these to work.
  929. *
  930. * Card Type Old limit New limit
  931. * PCICC 512-1024 512-2048
  932. * PCIXCC_MCL2 512-2048 no change (applying this MCL == card is MCL3+)
  933. * PCIXCC_MCL3 512-2048 128-2048
  934. * CEX2C 512-2048 128-2048
  935. *
  936. * ext_bitlens (extended bitlengths) is a global, since you should not apply an
  937. * MCL to just one card in a machine. We assume, at first, that all cards have
  938. * these capabilities.
  939. */
  940. int ext_bitlens = 1; // This is global
  941. #define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
  942. #define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
  943. #define PCICC_MIN_MOD_SIZE 64 // 512 bits
  944. #define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
  945. #define MAX_MOD_SIZE 256 // 2048 bits
  946. static inline int
  947. select_device_type(int *dev_type_p, int bytelength)
  948. {
  949. static int count = 0;
  950. int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
  951. struct status *stat;
  952. if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
  953. (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
  954. (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
  955. return -1;
  956. if (*dev_type_p != ANYDEV) {
  957. stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
  958. if (stat->st_count >
  959. (stat->disabled_count + stat->user_disabled_count))
  960. return 0;
  961. return -1;
  962. }
  963. /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
  964. stat = &z90crypt.hdware_info->type_mask[PCICA];
  965. PCICA_avail = stat->st_count -
  966. (stat->disabled_count + stat->user_disabled_count);
  967. stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
  968. PCIXCC_MCL3_avail = stat->st_count -
  969. (stat->disabled_count + stat->user_disabled_count);
  970. stat = &z90crypt.hdware_info->type_mask[CEX2C];
  971. CEX2C_avail = stat->st_count -
  972. (stat->disabled_count + stat->user_disabled_count);
  973. if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
  974. /**
  975. * bitlength is a factor, PCICA is the most capable, even with
  976. * the new MCL.
  977. */
  978. if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
  979. (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
  980. if (!PCICA_avail)
  981. return -1;
  982. else {
  983. *dev_type_p = PCICA;
  984. return 0;
  985. }
  986. }
  987. index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
  988. CEX2C_avail);
  989. if (index_to_use < PCICA_avail)
  990. *dev_type_p = PCICA;
  991. else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
  992. *dev_type_p = PCIXCC_MCL3;
  993. else
  994. *dev_type_p = CEX2C;
  995. count++;
  996. return 0;
  997. }
  998. /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
  999. if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
  1000. return -1;
  1001. stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
  1002. if (stat->st_count >
  1003. (stat->disabled_count + stat->user_disabled_count)) {
  1004. *dev_type_p = PCIXCC_MCL2;
  1005. return 0;
  1006. }
  1007. /**
  1008. * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
  1009. * (if we don't have the MCL applied and the newer bitlengths enabled)
  1010. * cannot go to a PCICC
  1011. */
  1012. if ((bytelength < PCICC_MIN_MOD_SIZE) ||
  1013. (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
  1014. return -1;
  1015. }
  1016. stat = &z90crypt.hdware_info->type_mask[PCICC];
  1017. if (stat->st_count >
  1018. (stat->disabled_count + stat->user_disabled_count)) {
  1019. *dev_type_p = PCICC;
  1020. return 0;
  1021. }
  1022. return -1;
  1023. }
  1024. /**
  1025. * Try the selected number, then the selected type (can be ANYDEV)
  1026. */
  1027. static inline int
  1028. select_device(int *dev_type_p, int *device_nr_p, int bytelength)
  1029. {
  1030. int i, indx, devTp, low_count, low_indx;
  1031. struct device_x *index_p;
  1032. struct device *dev_ptr;
  1033. PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
  1034. if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
  1035. PDEBUG("trying index = %d\n", *device_nr_p);
  1036. dev_ptr = z90crypt.device_p[*device_nr_p];
  1037. if (dev_ptr &&
  1038. (dev_ptr->dev_stat != DEV_GONE) &&
  1039. (dev_ptr->disabled == 0) &&
  1040. (dev_ptr->user_disabled == 0)) {
  1041. PDEBUG("selected by number, index = %d\n",
  1042. *device_nr_p);
  1043. *dev_type_p = dev_ptr->dev_type;
  1044. return *device_nr_p;
  1045. }
  1046. }
  1047. *device_nr_p = -1;
  1048. PDEBUG("trying type = %d\n", *dev_type_p);
  1049. devTp = *dev_type_p;
  1050. if (select_device_type(&devTp, bytelength) == -1) {
  1051. PDEBUG("failed to select by type\n");
  1052. return -1;
  1053. }
  1054. PDEBUG("selected type = %d\n", devTp);
  1055. index_p = &z90crypt.hdware_info->type_x_addr[devTp];
  1056. low_count = 0x0000FFFF;
  1057. low_indx = -1;
  1058. for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
  1059. indx = index_p->device_index[i];
  1060. dev_ptr = z90crypt.device_p[indx];
  1061. if (dev_ptr &&
  1062. (dev_ptr->dev_stat != DEV_GONE) &&
  1063. (dev_ptr->disabled == 0) &&
  1064. (dev_ptr->user_disabled == 0) &&
  1065. (devTp == dev_ptr->dev_type) &&
  1066. (low_count > dev_ptr->dev_caller_count)) {
  1067. low_count = dev_ptr->dev_caller_count;
  1068. low_indx = indx;
  1069. }
  1070. }
  1071. *device_nr_p = low_indx;
  1072. return low_indx;
  1073. }
  1074. static inline int
  1075. send_to_crypto_device(struct work_element *we_p)
  1076. {
  1077. struct caller *caller_p;
  1078. struct device *device_p;
  1079. int dev_nr;
  1080. int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
  1081. if (!we_p->requestptr)
  1082. return SEN_FATAL_ERROR;
  1083. caller_p = (struct caller *)we_p->requestptr;
  1084. dev_nr = we_p->devindex;
  1085. if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
  1086. if (z90crypt.hdware_info->hdware_mask.st_count != 0)
  1087. return SEN_RETRY;
  1088. else
  1089. return SEN_NOT_AVAIL;
  1090. }
  1091. we_p->devindex = dev_nr;
  1092. device_p = z90crypt.device_p[dev_nr];
  1093. if (!device_p)
  1094. return SEN_NOT_AVAIL;
  1095. if (device_p->dev_type != we_p->devtype)
  1096. return SEN_RETRY;
  1097. if (device_p->dev_caller_count >= device_p->dev_q_depth)
  1098. return SEN_QUEUE_FULL;
  1099. PDEBUG("device number prior to send: %d\n", dev_nr);
  1100. switch (send_to_AP(dev_nr, z90crypt.cdx,
  1101. caller_p->caller_dev_dep_req_l,
  1102. caller_p->caller_dev_dep_req_p)) {
  1103. case DEV_SEN_EXCEPTION:
  1104. PRINTKC("Exception during send to device %d\n", dev_nr);
  1105. z90crypt.terminating = 1;
  1106. return SEN_FATAL_ERROR;
  1107. case DEV_GONE:
  1108. PRINTK("Device %d not available\n", dev_nr);
  1109. remove_device(device_p);
  1110. return SEN_NOT_AVAIL;
  1111. case DEV_EMPTY:
  1112. return SEN_NOT_AVAIL;
  1113. case DEV_NO_WORK:
  1114. return SEN_FATAL_ERROR;
  1115. case DEV_BAD_MESSAGE:
  1116. return SEN_USER_ERROR;
  1117. case DEV_QUEUE_FULL:
  1118. return SEN_QUEUE_FULL;
  1119. default:
  1120. case DEV_ONLINE:
  1121. break;
  1122. }
  1123. list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
  1124. device_p->dev_caller_count++;
  1125. return 0;
  1126. }
  1127. /**
  1128. * Send puts the user's work on one of two queues:
  1129. * the pending queue if the send was successful
  1130. * the request queue if the send failed because device full or busy
  1131. */
  1132. static inline int
  1133. z90crypt_send(struct work_element *we_p, const char *buf)
  1134. {
  1135. int rv;
  1136. PDEBUG("PID %d\n", PID());
  1137. if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
  1138. PDEBUG("PID %d tried to send more work but has outstanding "
  1139. "work.\n", PID());
  1140. return -EWORKPEND;
  1141. }
  1142. we_p->devindex = -1; // Reset device number
  1143. spin_lock_irq(&queuespinlock);
  1144. rv = send_to_crypto_device(we_p);
  1145. switch (rv) {
  1146. case 0:
  1147. we_p->requestsent = jiffies;
  1148. we_p->audit[0] |= FP_SENT;
  1149. list_add_tail(&we_p->liste, &pending_list);
  1150. ++pendingq_count;
  1151. we_p->audit[0] |= FP_PENDING;
  1152. break;
  1153. case SEN_BUSY:
  1154. case SEN_QUEUE_FULL:
  1155. rv = 0;
  1156. we_p->devindex = -1; // any device will do
  1157. we_p->requestsent = jiffies;
  1158. list_add_tail(&we_p->liste, &request_list);
  1159. ++requestq_count;
  1160. we_p->audit[0] |= FP_REQUEST;
  1161. break;
  1162. case SEN_RETRY:
  1163. rv = -ERESTARTSYS;
  1164. break;
  1165. case SEN_NOT_AVAIL:
  1166. PRINTK("*** No devices available.\n");
  1167. rv = we_p->retcode = -ENODEV;
  1168. we_p->status[0] |= STAT_FAILED;
  1169. break;
  1170. case REC_OPERAND_INV:
  1171. case REC_OPERAND_SIZE:
  1172. case REC_EVEN_MOD:
  1173. case REC_INVALID_PAD:
  1174. rv = we_p->retcode = -EINVAL;
  1175. we_p->status[0] |= STAT_FAILED;
  1176. break;
  1177. default:
  1178. we_p->retcode = rv;
  1179. we_p->status[0] |= STAT_FAILED;
  1180. break;
  1181. }
  1182. if (rv != -ERESTARTSYS)
  1183. SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
  1184. spin_unlock_irq(&queuespinlock);
  1185. if (rv == 0)
  1186. tasklet_schedule(&reader_tasklet);
  1187. return rv;
  1188. }
  1189. /**
  1190. * process_results copies the user's work from kernel space.
  1191. */
  1192. static inline int
  1193. z90crypt_process_results(struct work_element *we_p, char __user *buf)
  1194. {
  1195. int rv;
  1196. PDEBUG("we_p %p (PID %d)\n", we_p, PID());
  1197. LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
  1198. SET_RDWRMASK(we_p->status[0], STAT_READPEND);
  1199. rv = 0;
  1200. if (!we_p->buffer) {
  1201. PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
  1202. we_p, PID());
  1203. rv = -ENOBUFF;
  1204. }
  1205. if (!rv)
  1206. if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
  1207. PDEBUG("copy_to_user failed: rv = %d\n", rv);
  1208. rv = -EFAULT;
  1209. }
  1210. if (!rv)
  1211. rv = we_p->retcode;
  1212. if (!rv)
  1213. if (we_p->resp_buff_size
  1214. && copy_to_user(we_p->resp_addr, we_p->resp_buff,
  1215. we_p->resp_buff_size))
  1216. rv = -EFAULT;
  1217. SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
  1218. return rv;
  1219. }
  1220. static unsigned char NULL_psmid[8] =
  1221. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  1222. /**
  1223. * Used in device configuration functions
  1224. */
  1225. #define MAX_RESET 90
  1226. /**
  1227. * This is used only for PCICC support
  1228. */
  1229. static inline int
  1230. is_PKCS11_padded(unsigned char *buffer, int length)
  1231. {
  1232. int i;
  1233. if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
  1234. return 0;
  1235. for (i = 2; i < length; i++)
  1236. if (buffer[i] != 0xFF)
  1237. break;
  1238. if ((i < 10) || (i == length))
  1239. return 0;
  1240. if (buffer[i] != 0x00)
  1241. return 0;
  1242. return 1;
  1243. }
  1244. /**
  1245. * This is used only for PCICC support
  1246. */
  1247. static inline int
  1248. is_PKCS12_padded(unsigned char *buffer, int length)
  1249. {
  1250. int i;
  1251. if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
  1252. return 0;
  1253. for (i = 2; i < length; i++)
  1254. if (buffer[i] == 0x00)
  1255. break;
  1256. if ((i < 10) || (i == length))
  1257. return 0;
  1258. if (buffer[i] != 0x00)
  1259. return 0;
  1260. return 1;
  1261. }
  1262. /**
  1263. * builds struct caller and converts message from generic format to
  1264. * device-dependent format
  1265. * func is ICARSAMODEXPO or ICARSACRT
  1266. * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
  1267. */
  1268. static inline int
  1269. build_caller(struct work_element *we_p, short function)
  1270. {
  1271. int rv;
  1272. struct caller *caller_p = (struct caller *)we_p->requestptr;
  1273. if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
  1274. (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
  1275. (we_p->devtype != CEX2C))
  1276. return SEN_NOT_AVAIL;
  1277. memcpy(caller_p->caller_id, we_p->caller_id,
  1278. sizeof(caller_p->caller_id));
  1279. caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
  1280. caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
  1281. caller_p->caller_buf_p = we_p->buffer;
  1282. INIT_LIST_HEAD(&(caller_p->caller_liste));
  1283. rv = convert_request(we_p->buffer, we_p->funccode, function,
  1284. z90crypt.cdx, we_p->devtype,
  1285. &caller_p->caller_dev_dep_req_l,
  1286. caller_p->caller_dev_dep_req_p);
  1287. if (rv) {
  1288. if (rv == SEN_NOT_AVAIL)
  1289. PDEBUG("request can't be processed on hdwr avail\n");
  1290. else
  1291. PRINTK("Error from convert_request: %d\n", rv);
  1292. }
  1293. else
  1294. memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
  1295. return rv;
  1296. }
  1297. static inline void
  1298. unbuild_caller(struct device *device_p, struct caller *caller_p)
  1299. {
  1300. if (!caller_p)
  1301. return;
  1302. if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
  1303. if (!list_empty(&caller_p->caller_liste)) {
  1304. list_del_init(&caller_p->caller_liste);
  1305. device_p->dev_caller_count--;
  1306. }
  1307. memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
  1308. }
  1309. static inline int
  1310. get_crypto_request_buffer(struct work_element *we_p)
  1311. {
  1312. struct ica_rsa_modexpo *mex_p;
  1313. struct ica_rsa_modexpo_crt *crt_p;
  1314. unsigned char *temp_buffer;
  1315. short function;
  1316. int rv;
  1317. mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
  1318. crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
  1319. PDEBUG("device type input = %d\n", we_p->devtype);
  1320. if (z90crypt.terminating)
  1321. return REC_NO_RESPONSE;
  1322. if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
  1323. PRINTK("psmid zeroes\n");
  1324. return SEN_FATAL_ERROR;
  1325. }
  1326. if (!we_p->buffer) {
  1327. PRINTK("buffer pointer NULL\n");
  1328. return SEN_USER_ERROR;
  1329. }
  1330. if (!we_p->requestptr) {
  1331. PRINTK("caller pointer NULL\n");
  1332. return SEN_USER_ERROR;
  1333. }
  1334. if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
  1335. (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
  1336. (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
  1337. PRINTK("invalid device type\n");
  1338. return SEN_USER_ERROR;
  1339. }
  1340. if ((mex_p->inputdatalength < 1) ||
  1341. (mex_p->inputdatalength > MAX_MOD_SIZE)) {
  1342. PRINTK("inputdatalength[%d] is not valid\n",
  1343. mex_p->inputdatalength);
  1344. return SEN_USER_ERROR;
  1345. }
  1346. if (mex_p->outputdatalength < mex_p->inputdatalength) {
  1347. PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
  1348. mex_p->outputdatalength, mex_p->inputdatalength);
  1349. return SEN_USER_ERROR;
  1350. }
  1351. if (!mex_p->inputdata || !mex_p->outputdata) {
  1352. PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
  1353. mex_p->outputdata, mex_p->inputdata);
  1354. return SEN_USER_ERROR;
  1355. }
  1356. /**
  1357. * As long as outputdatalength is big enough, we can set the
  1358. * outputdatalength equal to the inputdatalength, since that is the
  1359. * number of bytes we will copy in any case
  1360. */
  1361. mex_p->outputdatalength = mex_p->inputdatalength;
  1362. rv = 0;
  1363. switch (we_p->funccode) {
  1364. case ICARSAMODEXPO:
  1365. if (!mex_p->b_key || !mex_p->n_modulus)
  1366. rv = SEN_USER_ERROR;
  1367. break;
  1368. case ICARSACRT:
  1369. if (!IS_EVEN(crt_p->inputdatalength)) {
  1370. PRINTK("inputdatalength[%d] is odd, CRT form\n",
  1371. crt_p->inputdatalength);
  1372. rv = SEN_USER_ERROR;
  1373. break;
  1374. }
  1375. if (!crt_p->bp_key ||
  1376. !crt_p->bq_key ||
  1377. !crt_p->np_prime ||
  1378. !crt_p->nq_prime ||
  1379. !crt_p->u_mult_inv) {
  1380. PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
  1381. crt_p->bp_key, crt_p->bq_key,
  1382. crt_p->np_prime, crt_p->nq_prime,
  1383. crt_p->u_mult_inv);
  1384. rv = SEN_USER_ERROR;
  1385. }
  1386. break;
  1387. default:
  1388. PRINTK("bad func = %d\n", we_p->funccode);
  1389. rv = SEN_USER_ERROR;
  1390. break;
  1391. }
  1392. if (rv != 0)
  1393. return rv;
  1394. if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
  1395. return SEN_NOT_AVAIL;
  1396. temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
  1397. sizeof(struct caller);
  1398. if (copy_from_user(temp_buffer, mex_p->inputdata,
  1399. mex_p->inputdatalength) != 0)
  1400. return SEN_RELEASED;
  1401. function = PCI_FUNC_KEY_ENCRYPT;
  1402. switch (we_p->devtype) {
  1403. /* PCICA does everything with a simple RSA mod-expo operation */
  1404. case PCICA:
  1405. function = PCI_FUNC_KEY_ENCRYPT;
  1406. break;
  1407. /**
  1408. * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
  1409. * operation, and all CRT forms with a PKCS-1.2 format decrypt.
  1410. * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
  1411. * mod-expo operation
  1412. */
  1413. case PCIXCC_MCL2:
  1414. if (we_p->funccode == ICARSAMODEXPO)
  1415. function = PCI_FUNC_KEY_ENCRYPT;
  1416. else
  1417. function = PCI_FUNC_KEY_DECRYPT;
  1418. break;
  1419. case PCIXCC_MCL3:
  1420. case CEX2C:
  1421. if (we_p->funccode == ICARSAMODEXPO)
  1422. function = PCI_FUNC_KEY_ENCRYPT;
  1423. else
  1424. function = PCI_FUNC_KEY_DECRYPT;
  1425. break;
  1426. /**
  1427. * PCICC does everything as a PKCS-1.2 format request
  1428. */
  1429. case PCICC:
  1430. /* PCICC cannot handle input that is is PKCS#1.1 padded */
  1431. if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
  1432. return SEN_NOT_AVAIL;
  1433. }
  1434. if (we_p->funccode == ICARSAMODEXPO) {
  1435. if (is_PKCS12_padded(temp_buffer,
  1436. mex_p->inputdatalength))
  1437. function = PCI_FUNC_KEY_ENCRYPT;
  1438. else
  1439. function = PCI_FUNC_KEY_DECRYPT;
  1440. } else
  1441. /* all CRT forms are decrypts */
  1442. function = PCI_FUNC_KEY_DECRYPT;
  1443. break;
  1444. }
  1445. PDEBUG("function: %04x\n", function);
  1446. rv = build_caller(we_p, function);
  1447. PDEBUG("rv from build_caller = %d\n", rv);
  1448. return rv;
  1449. }
  1450. static inline int
  1451. z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
  1452. const char __user *buffer)
  1453. {
  1454. int rv;
  1455. we_p->devindex = -1;
  1456. if (funccode == ICARSAMODEXPO)
  1457. we_p->buff_size = sizeof(struct ica_rsa_modexpo);
  1458. else
  1459. we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
  1460. if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
  1461. return -EFAULT;
  1462. we_p->audit[0] |= FP_COPYFROM;
  1463. SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
  1464. we_p->funccode = funccode;
  1465. we_p->devtype = -1;
  1466. we_p->audit[0] |= FP_BUFFREQ;
  1467. rv = get_crypto_request_buffer(we_p);
  1468. switch (rv) {
  1469. case 0:
  1470. we_p->audit[0] |= FP_BUFFGOT;
  1471. break;
  1472. case SEN_USER_ERROR:
  1473. rv = -EINVAL;
  1474. break;
  1475. case SEN_QUEUE_FULL:
  1476. rv = 0;
  1477. break;
  1478. case SEN_RELEASED:
  1479. rv = -EFAULT;
  1480. break;
  1481. case REC_NO_RESPONSE:
  1482. rv = -ENODEV;
  1483. break;
  1484. case SEN_NOT_AVAIL:
  1485. case EGETBUFF:
  1486. rv = -EGETBUFF;
  1487. break;
  1488. default:
  1489. PRINTK("rv = %d\n", rv);
  1490. rv = -EGETBUFF;
  1491. break;
  1492. }
  1493. if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
  1494. SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
  1495. return rv;
  1496. }
  1497. static inline void
  1498. purge_work_element(struct work_element *we_p)
  1499. {
  1500. struct list_head *lptr;
  1501. spin_lock_irq(&queuespinlock);
  1502. list_for_each(lptr, &request_list) {
  1503. if (lptr == &we_p->liste) {
  1504. list_del_init(lptr);
  1505. requestq_count--;
  1506. break;
  1507. }
  1508. }
  1509. list_for_each(lptr, &pending_list) {
  1510. if (lptr == &we_p->liste) {
  1511. list_del_init(lptr);
  1512. pendingq_count--;
  1513. break;
  1514. }
  1515. }
  1516. spin_unlock_irq(&queuespinlock);
  1517. }
  1518. /**
  1519. * Build the request and send it.
  1520. */
  1521. static inline int
  1522. z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
  1523. unsigned int cmd, unsigned long arg)
  1524. {
  1525. struct work_element *we_p;
  1526. int rv;
  1527. if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
  1528. PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
  1529. return rv;
  1530. }
  1531. if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
  1532. PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
  1533. if (!rv)
  1534. if ((rv = z90crypt_send(we_p, (const char *)arg)))
  1535. PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
  1536. if (!rv) {
  1537. we_p->audit[0] |= FP_ASLEEP;
  1538. wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
  1539. we_p->audit[0] |= FP_AWAKE;
  1540. rv = we_p->retcode;
  1541. }
  1542. if (!rv)
  1543. rv = z90crypt_process_results(we_p, (char __user *)arg);
  1544. if ((we_p->status[0] & STAT_FAILED)) {
  1545. switch (rv) {
  1546. /**
  1547. * EINVAL *after* receive is almost always a padding error or
  1548. * length error issued by a coprocessor (not an accelerator).
  1549. * We convert this return value to -EGETBUFF which should
  1550. * trigger a fallback to software.
  1551. */
  1552. case -EINVAL:
  1553. if (we_p->devtype != PCICA)
  1554. rv = -EGETBUFF;
  1555. break;
  1556. case -ETIMEOUT:
  1557. if (z90crypt.mask.st_count > 0)
  1558. rv = -ERESTARTSYS; // retry with another
  1559. else
  1560. rv = -ENODEV; // no cards left
  1561. /* fall through to clean up request queue */
  1562. case -ERESTARTSYS:
  1563. case -ERELEASED:
  1564. switch (CHK_RDWRMASK(we_p->status[0])) {
  1565. case STAT_WRITTEN:
  1566. purge_work_element(we_p);
  1567. break;
  1568. case STAT_READPEND:
  1569. case STAT_NOWORK:
  1570. default:
  1571. break;
  1572. }
  1573. break;
  1574. default:
  1575. we_p->status[0] ^= STAT_FAILED;
  1576. break;
  1577. }
  1578. }
  1579. free_page((long)we_p);
  1580. return rv;
  1581. }
  1582. /**
  1583. * This function is a little long, but it's really just one large switch
  1584. * statement.
  1585. */
  1586. static long
  1587. z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1588. {
  1589. struct priv_data *private_data_p = filp->private_data;
  1590. unsigned char *status;
  1591. unsigned char *qdepth;
  1592. unsigned int *reqcnt;
  1593. struct ica_z90_status *pstat;
  1594. int ret, i, loopLim, tempstat;
  1595. static int deprecated_msg_count1 = 0;
  1596. static int deprecated_msg_count2 = 0;
  1597. PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
  1598. PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
  1599. cmd,
  1600. !_IOC_DIR(cmd) ? "NO"
  1601. : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
  1602. : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
  1603. : "WR")),
  1604. _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
  1605. if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
  1606. PRINTK("cmd 0x%08X contains bad magic\n", cmd);
  1607. return -ENOTTY;
  1608. }
  1609. ret = 0;
  1610. switch (cmd) {
  1611. case ICARSAMODEXPO:
  1612. case ICARSACRT:
  1613. if (quiesce_z90crypt) {
  1614. ret = -EQUIESCE;
  1615. break;
  1616. }
  1617. ret = -ENODEV; // Default if no devices
  1618. loopLim = z90crypt.hdware_info->hdware_mask.st_count -
  1619. (z90crypt.hdware_info->hdware_mask.disabled_count +
  1620. z90crypt.hdware_info->hdware_mask.user_disabled_count);
  1621. for (i = 0; i < loopLim; i++) {
  1622. ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
  1623. if (ret != -ERESTARTSYS)
  1624. break;
  1625. }
  1626. if (ret == -ERESTARTSYS)
  1627. ret = -ENODEV;
  1628. break;
  1629. case Z90STAT_TOTALCOUNT:
  1630. tempstat = get_status_totalcount();
  1631. if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
  1632. ret = -EFAULT;
  1633. break;
  1634. case Z90STAT_PCICACOUNT:
  1635. tempstat = get_status_PCICAcount();
  1636. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1637. ret = -EFAULT;
  1638. break;
  1639. case Z90STAT_PCICCCOUNT:
  1640. tempstat = get_status_PCICCcount();
  1641. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1642. ret = -EFAULT;
  1643. break;
  1644. case Z90STAT_PCIXCCMCL2COUNT:
  1645. tempstat = get_status_PCIXCCMCL2count();
  1646. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1647. ret = -EFAULT;
  1648. break;
  1649. case Z90STAT_PCIXCCMCL3COUNT:
  1650. tempstat = get_status_PCIXCCMCL3count();
  1651. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1652. ret = -EFAULT;
  1653. break;
  1654. case Z90STAT_CEX2CCOUNT:
  1655. tempstat = get_status_CEX2Ccount();
  1656. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1657. ret = -EFAULT;
  1658. break;
  1659. case Z90STAT_REQUESTQ_COUNT:
  1660. tempstat = get_status_requestq_count();
  1661. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1662. ret = -EFAULT;
  1663. break;
  1664. case Z90STAT_PENDINGQ_COUNT:
  1665. tempstat = get_status_pendingq_count();
  1666. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1667. ret = -EFAULT;
  1668. break;
  1669. case Z90STAT_TOTALOPEN_COUNT:
  1670. tempstat = get_status_totalopen_count();
  1671. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1672. ret = -EFAULT;
  1673. break;
  1674. case Z90STAT_DOMAIN_INDEX:
  1675. tempstat = get_status_domain_index();
  1676. if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
  1677. ret = -EFAULT;
  1678. break;
  1679. case Z90STAT_STATUS_MASK:
  1680. status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
  1681. if (!status) {
  1682. PRINTK("kmalloc for status failed!\n");
  1683. ret = -ENOMEM;
  1684. break;
  1685. }
  1686. get_status_status_mask(status);
  1687. if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
  1688. != 0)
  1689. ret = -EFAULT;
  1690. kfree(status);
  1691. break;
  1692. case Z90STAT_QDEPTH_MASK:
  1693. qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
  1694. if (!qdepth) {
  1695. PRINTK("kmalloc for qdepth failed!\n");
  1696. ret = -ENOMEM;
  1697. break;
  1698. }
  1699. get_status_qdepth_mask(qdepth);
  1700. if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
  1701. ret = -EFAULT;
  1702. kfree(qdepth);
  1703. break;
  1704. case Z90STAT_PERDEV_REQCNT:
  1705. reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
  1706. if (!reqcnt) {
  1707. PRINTK("kmalloc for reqcnt failed!\n");
  1708. ret = -ENOMEM;
  1709. break;
  1710. }
  1711. get_status_perdevice_reqcnt(reqcnt);
  1712. if (copy_to_user((char __user *) arg, reqcnt,
  1713. Z90CRYPT_NUM_APS * sizeof(int)) != 0)
  1714. ret = -EFAULT;
  1715. kfree(reqcnt);
  1716. break;
  1717. /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
  1718. case ICAZ90STATUS:
  1719. if (deprecated_msg_count1 < 20) {
  1720. PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
  1721. deprecated_msg_count1++;
  1722. if (deprecated_msg_count1 == 20)
  1723. PRINTK("No longer issuing messages related to "
  1724. "deprecated call to ICAZ90STATUS.\n");
  1725. }
  1726. pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
  1727. if (!pstat) {
  1728. PRINTK("kmalloc for pstat failed!\n");
  1729. ret = -ENOMEM;
  1730. break;
  1731. }
  1732. pstat->totalcount = get_status_totalcount();
  1733. pstat->leedslitecount = get_status_PCICAcount();
  1734. pstat->leeds2count = get_status_PCICCcount();
  1735. pstat->requestqWaitCount = get_status_requestq_count();
  1736. pstat->pendingqWaitCount = get_status_pendingq_count();
  1737. pstat->totalOpenCount = get_status_totalopen_count();
  1738. pstat->cryptoDomain = get_status_domain_index();
  1739. get_status_status_mask(pstat->status);
  1740. get_status_qdepth_mask(pstat->qdepth);
  1741. if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
  1742. sizeof(struct ica_z90_status)) != 0)
  1743. ret = -EFAULT;
  1744. kfree(pstat);
  1745. break;
  1746. /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
  1747. case Z90STAT_PCIXCCCOUNT:
  1748. if (deprecated_msg_count2 < 20) {
  1749. PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
  1750. deprecated_msg_count2++;
  1751. if (deprecated_msg_count2 == 20)
  1752. PRINTK("No longer issuing messages about depre"
  1753. "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
  1754. }
  1755. tempstat = get_status_PCIXCCcount();
  1756. if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
  1757. ret = -EFAULT;
  1758. break;
  1759. case Z90QUIESCE:
  1760. if (current->euid != 0) {
  1761. PRINTK("QUIESCE fails: euid %d\n",
  1762. current->euid);
  1763. ret = -EACCES;
  1764. } else {
  1765. PRINTK("QUIESCE device from PID %d\n", PID());
  1766. quiesce_z90crypt = 1;
  1767. }
  1768. break;
  1769. default:
  1770. /* user passed an invalid IOCTL number */
  1771. PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
  1772. ret = -ENOTTY;
  1773. break;
  1774. }
  1775. return ret;
  1776. }
  1777. static inline int
  1778. sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
  1779. {
  1780. int hl, i;
  1781. hl = 0;
  1782. for (i = 0; i < len; i++)
  1783. hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
  1784. hl += sprintf(outaddr+hl, " ");
  1785. return hl;
  1786. }
  1787. static inline int
  1788. sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
  1789. {
  1790. int hl, inl, c, cx;
  1791. hl = sprintf(outaddr, " ");
  1792. inl = 0;
  1793. for (c = 0; c < (len / 16); c++) {
  1794. hl += sprintcl(outaddr+hl, addr+inl, 16);
  1795. inl += 16;
  1796. }
  1797. cx = len%16;
  1798. if (cx) {
  1799. hl += sprintcl(outaddr+hl, addr+inl, cx);
  1800. inl += cx;
  1801. }
  1802. hl += sprintf(outaddr+hl, "\n");
  1803. return hl;
  1804. }
  1805. static inline int
  1806. sprinthx(unsigned char *title, unsigned char *outaddr,
  1807. unsigned char *addr, unsigned int len)
  1808. {
  1809. int hl, inl, r, rx;
  1810. hl = sprintf(outaddr, "\n%s\n", title);
  1811. inl = 0;
  1812. for (r = 0; r < (len / 64); r++) {
  1813. hl += sprintrw(outaddr+hl, addr+inl, 64);
  1814. inl += 64;
  1815. }
  1816. rx = len % 64;
  1817. if (rx) {
  1818. hl += sprintrw(outaddr+hl, addr+inl, rx);
  1819. inl += rx;
  1820. }
  1821. hl += sprintf(outaddr+hl, "\n");
  1822. return hl;
  1823. }
  1824. static inline int
  1825. sprinthx4(unsigned char *title, unsigned char *outaddr,
  1826. unsigned int *array, unsigned int len)
  1827. {
  1828. int hl, r;
  1829. hl = sprintf(outaddr, "\n%s\n", title);
  1830. for (r = 0; r < len; r++) {
  1831. if ((r % 8) == 0)
  1832. hl += sprintf(outaddr+hl, " ");
  1833. hl += sprintf(outaddr+hl, "%08X ", array[r]);
  1834. if ((r % 8) == 7)
  1835. hl += sprintf(outaddr+hl, "\n");
  1836. }
  1837. hl += sprintf(outaddr+hl, "\n");
  1838. return hl;
  1839. }
  1840. static int
  1841. z90crypt_status(char *resp_buff, char **start, off_t offset,
  1842. int count, int *eof, void *data)
  1843. {
  1844. unsigned char *workarea;
  1845. int len;
  1846. /* resp_buff is a page. Use the right half for a work area */
  1847. workarea = resp_buff+2000;
  1848. len = 0;
  1849. len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
  1850. z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
  1851. len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
  1852. get_status_domain_index());
  1853. len += sprintf(resp_buff+len, "Total device count: %d\n",
  1854. get_status_totalcount());
  1855. len += sprintf(resp_buff+len, "PCICA count: %d\n",
  1856. get_status_PCICAcount());
  1857. len += sprintf(resp_buff+len, "PCICC count: %d\n",
  1858. get_status_PCICCcount());
  1859. len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
  1860. get_status_PCIXCCMCL2count());
  1861. len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
  1862. get_status_PCIXCCMCL3count());
  1863. len += sprintf(resp_buff+len, "CEX2C count: %d\n",
  1864. get_status_CEX2Ccount());
  1865. len += sprintf(resp_buff+len, "requestq count: %d\n",
  1866. get_status_requestq_count());
  1867. len += sprintf(resp_buff+len, "pendingq count: %d\n",
  1868. get_status_pendingq_count());
  1869. len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
  1870. get_status_totalopen_count());
  1871. len += sprinthx(
  1872. "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
  1873. "4: PCIXCC (MCL3), 5: CEX2C",
  1874. resp_buff+len,
  1875. get_status_status_mask(workarea),
  1876. Z90CRYPT_NUM_APS);
  1877. len += sprinthx("Waiting work element counts",
  1878. resp_buff+len,
  1879. get_status_qdepth_mask(workarea),
  1880. Z90CRYPT_NUM_APS);
  1881. len += sprinthx4(
  1882. "Per-device successfully completed request counts",
  1883. resp_buff+len,
  1884. get_status_perdevice_reqcnt((unsigned int *)workarea),
  1885. Z90CRYPT_NUM_APS);
  1886. *eof = 1;
  1887. memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
  1888. return len;
  1889. }
  1890. static inline void
  1891. disable_card(int card_index)
  1892. {
  1893. struct device *devp;
  1894. devp = LONG2DEVPTR(card_index);
  1895. if (!devp || devp->user_disabled)
  1896. return;
  1897. devp->user_disabled = 1;
  1898. z90crypt.hdware_info->hdware_mask.user_disabled_count++;
  1899. if (devp->dev_type == -1)
  1900. return;
  1901. z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
  1902. }
  1903. static inline void
  1904. enable_card(int card_index)
  1905. {
  1906. struct device *devp;
  1907. devp = LONG2DEVPTR(card_index);
  1908. if (!devp || !devp->user_disabled)
  1909. return;
  1910. devp->user_disabled = 0;
  1911. z90crypt.hdware_info->hdware_mask.user_disabled_count--;
  1912. if (devp->dev_type == -1)
  1913. return;
  1914. z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
  1915. }
  1916. static inline int
  1917. scan_char(unsigned char *bf, unsigned int len,
  1918. unsigned int *offs, unsigned int *p_eof, unsigned char c)
  1919. {
  1920. unsigned int i, found;
  1921. found = 0;
  1922. for (i = 0; i < len; i++) {
  1923. if (bf[i] == c) {
  1924. found = 1;
  1925. break;
  1926. }
  1927. if (bf[i] == '\0') {
  1928. *p_eof = 1;
  1929. break;
  1930. }
  1931. if (bf[i] == '\n') {
  1932. break;
  1933. }
  1934. }
  1935. *offs = i+1;
  1936. return found;
  1937. }
  1938. static inline int
  1939. scan_string(unsigned char *bf, unsigned int len,
  1940. unsigned int *offs, unsigned int *p_eof, unsigned char *s)
  1941. {
  1942. unsigned int temp_len, temp_offs, found, eof;
  1943. temp_len = temp_offs = found = eof = 0;
  1944. while (!eof && !found) {
  1945. found = scan_char(bf+temp_len, len-temp_len,
  1946. &temp_offs, &eof, *s);
  1947. temp_len += temp_offs;
  1948. if (eof) {
  1949. found = 0;
  1950. break;
  1951. }
  1952. if (found) {
  1953. if (len >= temp_offs+strlen(s)) {
  1954. found = !strncmp(bf+temp_len-1, s, strlen(s));
  1955. if (found) {
  1956. *offs = temp_len+strlen(s)-1;
  1957. break;
  1958. }
  1959. } else {
  1960. found = 0;
  1961. *p_eof = 1;
  1962. break;
  1963. }
  1964. }
  1965. }
  1966. return found;
  1967. }
  1968. static int
  1969. z90crypt_status_write(struct file *file, const char __user *buffer,
  1970. unsigned long count, void *data)
  1971. {
  1972. int i, j, len, offs, found, eof;
  1973. unsigned char *lbuf;
  1974. unsigned int local_count;
  1975. #define LBUFSIZE 600
  1976. lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
  1977. if (!lbuf) {
  1978. PRINTK("kmalloc failed!\n");
  1979. return 0;
  1980. }
  1981. if (count <= 0)
  1982. return 0;
  1983. local_count = UMIN((unsigned int)count, LBUFSIZE-1);
  1984. if (copy_from_user(lbuf, buffer, local_count) != 0) {
  1985. kfree(lbuf);
  1986. return -EFAULT;
  1987. }
  1988. lbuf[local_count-1] = '\0';
  1989. len = 0;
  1990. eof = 0;
  1991. found = 0;
  1992. while (!eof) {
  1993. found = scan_string(lbuf+len, local_count-len, &offs, &eof,
  1994. "Online devices");
  1995. len += offs;
  1996. if (found == 1)
  1997. break;
  1998. }
  1999. if (eof) {
  2000. kfree(lbuf);
  2001. return count;
  2002. }
  2003. if (found)
  2004. found = scan_char(lbuf+len, local_count-len, &offs, &eof, '\n');
  2005. if (!found || eof) {
  2006. kfree(lbuf);
  2007. return count;
  2008. }
  2009. len += offs;
  2010. j = 0;
  2011. for (i = 0; i < 80; i++) {
  2012. switch (*(lbuf+len+i)) {
  2013. case '\t':
  2014. case ' ':
  2015. break;
  2016. case '\n':
  2017. default:
  2018. eof = 1;
  2019. break;
  2020. case '0':
  2021. case '1':
  2022. case '2':
  2023. case '3':
  2024. case '4':
  2025. case '5':
  2026. j++;
  2027. break;
  2028. case 'd':
  2029. case 'D':
  2030. disable_card(j);
  2031. j++;
  2032. break;
  2033. case 'e':
  2034. case 'E':
  2035. enable_card(j);
  2036. j++;
  2037. break;
  2038. }
  2039. if (eof)
  2040. break;
  2041. }
  2042. kfree(lbuf);
  2043. return count;
  2044. }
  2045. /**
  2046. * Functions that run under a timer, with no process id
  2047. *
  2048. * The task functions:
  2049. * z90crypt_reader_task
  2050. * helper_send_work
  2051. * helper_handle_work_element
  2052. * helper_receive_rc
  2053. * z90crypt_config_task
  2054. * z90crypt_cleanup_task
  2055. *
  2056. * Helper functions:
  2057. * z90crypt_schedule_reader_timer
  2058. * z90crypt_schedule_reader_task
  2059. * z90crypt_schedule_config_task
  2060. * z90crypt_schedule_cleanup_task
  2061. */
  2062. static inline int
  2063. receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
  2064. unsigned char *buff, unsigned char __user **dest_p_p)
  2065. {
  2066. int dv, rv;
  2067. struct device *dev_ptr;
  2068. struct caller *caller_p;
  2069. struct ica_rsa_modexpo *icaMsg_p;
  2070. struct list_head *ptr, *tptr;
  2071. memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
  2072. if (z90crypt.terminating)
  2073. return REC_FATAL_ERROR;
  2074. caller_p = 0;
  2075. dev_ptr = z90crypt.device_p[index];
  2076. rv = 0;
  2077. do {
  2078. if (!dev_ptr || dev_ptr->disabled) {
  2079. rv = REC_NO_WORK; // a disabled device can't return work
  2080. break;
  2081. }
  2082. if (dev_ptr->dev_self_x != index) {
  2083. PRINTKC("Corrupt dev ptr\n");
  2084. z90crypt.terminating = 1;
  2085. rv = REC_FATAL_ERROR;
  2086. break;
  2087. }
  2088. if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
  2089. dv = DEV_REC_EXCEPTION;
  2090. PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
  2091. dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
  2092. } else {
  2093. PDEBUG("Dequeue called for device %d\n", index);
  2094. dv = receive_from_AP(index, z90crypt.cdx,
  2095. dev_ptr->dev_resp_l,
  2096. dev_ptr->dev_resp_p, psmid);
  2097. }
  2098. switch (dv) {
  2099. case DEV_REC_EXCEPTION:
  2100. rv = REC_FATAL_ERROR;
  2101. z90crypt.terminating = 1;
  2102. PRINTKC("Exception in receive from device %d\n",
  2103. index);
  2104. break;
  2105. case DEV_ONLINE:
  2106. rv = 0;
  2107. break;
  2108. case DEV_EMPTY:
  2109. rv = REC_EMPTY;
  2110. break;
  2111. case DEV_NO_WORK:
  2112. rv = REC_NO_WORK;
  2113. break;
  2114. case DEV_BAD_MESSAGE:
  2115. case DEV_GONE:
  2116. case REC_HARDWAR_ERR:
  2117. default:
  2118. rv = REC_NO_RESPONSE;
  2119. break;
  2120. }
  2121. if (rv)
  2122. break;
  2123. if (dev_ptr->dev_caller_count <= 0) {
  2124. rv = REC_USER_GONE;
  2125. break;
  2126. }
  2127. list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
  2128. caller_p = list_entry(ptr, struct caller, caller_liste);
  2129. if (!memcmp(caller_p->caller_id, psmid,
  2130. sizeof(caller_p->caller_id))) {
  2131. if (!list_empty(&caller_p->caller_liste)) {
  2132. list_del_init(ptr);
  2133. dev_ptr->dev_caller_count--;
  2134. break;
  2135. }
  2136. }
  2137. caller_p = 0;
  2138. }
  2139. if (!caller_p) {
  2140. PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
  2141. "%02X%02X%02X in device list\n",
  2142. psmid[0], psmid[1], psmid[2], psmid[3],
  2143. psmid[4], psmid[5], psmid[6], psmid[7]);
  2144. rv = REC_USER_GONE;
  2145. break;
  2146. }
  2147. PDEBUG("caller_p after successful receive: %p\n", caller_p);
  2148. rv = convert_response(dev_ptr->dev_resp_p,
  2149. caller_p->caller_buf_p, buff_len_p, buff);
  2150. switch (rv) {
  2151. case REC_USE_PCICA:
  2152. break;
  2153. case REC_OPERAND_INV:
  2154. case REC_OPERAND_SIZE:
  2155. case REC_EVEN_MOD:
  2156. case REC_INVALID_PAD:
  2157. PDEBUG("device %d: 'user error' %d\n", index, rv);
  2158. break;
  2159. case WRONG_DEVICE_TYPE:
  2160. case REC_HARDWAR_ERR:
  2161. case REC_BAD_MESSAGE:
  2162. PRINTKW("device %d: hardware error %d\n", index, rv);
  2163. rv = REC_NO_RESPONSE;
  2164. break;
  2165. default:
  2166. PDEBUG("device %d: rv = %d\n", index, rv);
  2167. break;
  2168. }
  2169. } while (0);
  2170. switch (rv) {
  2171. case 0:
  2172. PDEBUG("Successful receive from device %d\n", index);
  2173. icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
  2174. *dest_p_p = icaMsg_p->outputdata;
  2175. if (*buff_len_p == 0)
  2176. PRINTK("Zero *buff_len_p\n");
  2177. break;
  2178. case REC_NO_RESPONSE:
  2179. PRINTKW("Removing device %d from availability\n", index);
  2180. remove_device(dev_ptr);
  2181. break;
  2182. }
  2183. if (caller_p)
  2184. unbuild_caller(dev_ptr, caller_p);
  2185. return rv;
  2186. }
  2187. static inline void
  2188. helper_send_work(int index)
  2189. {
  2190. struct work_element *rq_p;
  2191. int rv;
  2192. if (list_empty(&request_list))
  2193. return;
  2194. requestq_count--;
  2195. rq_p = list_entry(request_list.next, struct work_element, liste);
  2196. list_del_init(&rq_p->liste);
  2197. rq_p->audit[1] |= FP_REMREQUEST;
  2198. if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
  2199. rq_p->devindex = SHRT2LONG(index);
  2200. rv = send_to_crypto_device(rq_p);
  2201. if (rv == 0) {
  2202. rq_p->requestsent = jiffies;
  2203. rq_p->audit[0] |= FP_SENT;
  2204. list_add_tail(&rq_p->liste, &pending_list);
  2205. ++pendingq_count;
  2206. rq_p->audit[0] |= FP_PENDING;
  2207. } else {
  2208. switch (rv) {
  2209. case REC_OPERAND_INV:
  2210. case REC_OPERAND_SIZE:
  2211. case REC_EVEN_MOD:
  2212. case REC_INVALID_PAD:
  2213. rq_p->retcode = -EINVAL;
  2214. break;
  2215. case SEN_NOT_AVAIL:
  2216. case SEN_RETRY:
  2217. case REC_NO_RESPONSE:
  2218. default:
  2219. if (z90crypt.mask.st_count > 1)
  2220. rq_p->retcode =
  2221. -ERESTARTSYS;
  2222. else
  2223. rq_p->retcode = -ENODEV;
  2224. break;
  2225. }
  2226. rq_p->status[0] |= STAT_FAILED;
  2227. rq_p->audit[1] |= FP_AWAKENING;
  2228. atomic_set(&rq_p->alarmrung, 1);
  2229. wake_up(&rq_p->waitq);
  2230. }
  2231. } else {
  2232. if (z90crypt.mask.st_count > 1)
  2233. rq_p->retcode = -ERESTARTSYS;
  2234. else
  2235. rq_p->retcode = -ENODEV;
  2236. rq_p->status[0] |= STAT_FAILED;
  2237. rq_p->audit[1] |= FP_AWAKENING;
  2238. atomic_set(&rq_p->alarmrung, 1);
  2239. wake_up(&rq_p->waitq);
  2240. }
  2241. }
  2242. static inline void
  2243. helper_handle_work_element(int index, unsigned char psmid[8], int rc,
  2244. int buff_len, unsigned char *buff,
  2245. unsigned char __user *resp_addr)
  2246. {
  2247. struct work_element *pq_p;
  2248. struct list_head *lptr, *tptr;
  2249. pq_p = 0;
  2250. list_for_each_safe(lptr, tptr, &pending_list) {
  2251. pq_p = list_entry(lptr, struct work_element, liste);
  2252. if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
  2253. list_del_init(lptr);
  2254. pendingq_count--;
  2255. pq_p->audit[1] |= FP_NOTPENDING;
  2256. break;
  2257. }
  2258. pq_p = 0;
  2259. }
  2260. if (!pq_p) {
  2261. PRINTK("device %d has work but no caller exists on pending Q\n",
  2262. SHRT2LONG(index));
  2263. return;
  2264. }
  2265. switch (rc) {
  2266. case 0:
  2267. pq_p->resp_buff_size = buff_len;
  2268. pq_p->audit[1] |= FP_RESPSIZESET;
  2269. if (buff_len) {
  2270. pq_p->resp_addr = resp_addr;
  2271. pq_p->audit[1] |= FP_RESPADDRCOPIED;
  2272. memcpy(pq_p->resp_buff, buff, buff_len);
  2273. pq_p->audit[1] |= FP_RESPBUFFCOPIED;
  2274. }
  2275. break;
  2276. case REC_OPERAND_INV:
  2277. case REC_OPERAND_SIZE:
  2278. case REC_EVEN_MOD:
  2279. case REC_INVALID_PAD:
  2280. PDEBUG("-EINVAL after application error %d\n", rc);
  2281. pq_p->retcode = -EINVAL;
  2282. pq_p->status[0] |= STAT_FAILED;
  2283. break;
  2284. case REC_USE_PCICA:
  2285. pq_p->retcode = -ERESTARTSYS;
  2286. pq_p->status[0] |= STAT_FAILED;
  2287. break;
  2288. case REC_NO_RESPONSE:
  2289. default:
  2290. if (z90crypt.mask.st_count > 1)
  2291. pq_p->retcode = -ERESTARTSYS;
  2292. else
  2293. pq_p->retcode = -ENODEV;
  2294. pq_p->status[0] |= STAT_FAILED;
  2295. break;
  2296. }
  2297. if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
  2298. pq_p->audit[1] |= FP_AWAKENING;
  2299. atomic_set(&pq_p->alarmrung, 1);
  2300. wake_up(&pq_p->waitq);
  2301. }
  2302. }
  2303. /**
  2304. * return TRUE if the work element should be removed from the queue
  2305. */
  2306. static inline int
  2307. helper_receive_rc(int index, int *rc_p)
  2308. {
  2309. switch (*rc_p) {
  2310. case 0:
  2311. case REC_OPERAND_INV:
  2312. case REC_OPERAND_SIZE:
  2313. case REC_EVEN_MOD:
  2314. case REC_INVALID_PAD:
  2315. case REC_USE_PCICA:
  2316. break;
  2317. case REC_BUSY:
  2318. case REC_NO_WORK:
  2319. case REC_EMPTY:
  2320. case REC_RETRY_DEV:
  2321. case REC_FATAL_ERROR:
  2322. return 0;
  2323. case REC_NO_RESPONSE:
  2324. break;
  2325. default:
  2326. PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
  2327. *rc_p, SHRT2LONG(index));
  2328. *rc_p = REC_NO_RESPONSE;
  2329. break;
  2330. }
  2331. return 1;
  2332. }
  2333. static inline void
  2334. z90crypt_schedule_reader_timer(void)
  2335. {
  2336. if (timer_pending(&reader_timer))
  2337. return;
  2338. if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
  2339. PRINTK("Timer pending while modifying reader timer\n");
  2340. }
  2341. static void
  2342. z90crypt_reader_task(unsigned long ptr)
  2343. {
  2344. int workavail, index, rc, buff_len;
  2345. unsigned char psmid[8];
  2346. unsigned char __user *resp_addr;
  2347. static unsigned char buff[1024];
  2348. /**
  2349. * we use workavail = 2 to ensure 2 passes with nothing dequeued before
  2350. * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
  2351. * loop, there is no work remaining on the queues.
  2352. */
  2353. resp_addr = 0;
  2354. workavail = 2;
  2355. buff_len = 0;
  2356. while (workavail) {
  2357. workavail--;
  2358. rc = 0;
  2359. spin_lock_irq(&queuespinlock);
  2360. memset(buff, 0x00, sizeof(buff));
  2361. /* Dequeue once from each device in round robin. */
  2362. for (index = 0; index < z90crypt.mask.st_count; index++) {
  2363. PDEBUG("About to receive.\n");
  2364. rc = receive_from_crypto_device(SHRT2LONG(index),
  2365. psmid,
  2366. &buff_len,
  2367. buff,
  2368. &resp_addr);
  2369. PDEBUG("Dequeued: rc = %d.\n", rc);
  2370. if (helper_receive_rc(index, &rc)) {
  2371. if (rc != REC_NO_RESPONSE) {
  2372. helper_send_work(index);
  2373. workavail = 2;
  2374. }
  2375. helper_handle_work_element(index, psmid, rc,
  2376. buff_len, buff,
  2377. resp_addr);
  2378. }
  2379. if (rc == REC_FATAL_ERROR)
  2380. PRINTKW("REC_FATAL_ERROR from device %d!\n",
  2381. SHRT2LONG(index));
  2382. }
  2383. spin_unlock_irq(&queuespinlock);
  2384. }
  2385. if (pendingq_count + requestq_count)
  2386. z90crypt_schedule_reader_timer();
  2387. }
  2388. static inline void
  2389. z90crypt_schedule_config_task(unsigned int expiration)
  2390. {
  2391. if (timer_pending(&config_timer))
  2392. return;
  2393. if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
  2394. PRINTK("Timer pending while modifying config timer\n");
  2395. }
  2396. static void
  2397. z90crypt_config_task(unsigned long ptr)
  2398. {
  2399. int rc;
  2400. PDEBUG("jiffies %ld\n", jiffies);
  2401. if ((rc = refresh_z90crypt(&z90crypt.cdx)))
  2402. PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
  2403. /* If return was fatal, don't bother reconfiguring */
  2404. if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
  2405. z90crypt_schedule_config_task(CONFIGTIME);
  2406. }
  2407. static inline void
  2408. z90crypt_schedule_cleanup_task(void)
  2409. {
  2410. if (timer_pending(&cleanup_timer))
  2411. return;
  2412. if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
  2413. PRINTK("Timer pending while modifying cleanup timer\n");
  2414. }
  2415. static inline void
  2416. helper_drain_queues(void)
  2417. {
  2418. struct work_element *pq_p;
  2419. struct list_head *lptr, *tptr;
  2420. list_for_each_safe(lptr, tptr, &pending_list) {
  2421. pq_p = list_entry(lptr, struct work_element, liste);
  2422. pq_p->retcode = -ENODEV;
  2423. pq_p->status[0] |= STAT_FAILED;
  2424. unbuild_caller(LONG2DEVPTR(pq_p->devindex),
  2425. (struct caller *)pq_p->requestptr);
  2426. list_del_init(lptr);
  2427. pendingq_count--;
  2428. pq_p->audit[1] |= FP_NOTPENDING;
  2429. pq_p->audit[1] |= FP_AWAKENING;
  2430. atomic_set(&pq_p->alarmrung, 1);
  2431. wake_up(&pq_p->waitq);
  2432. }
  2433. list_for_each_safe(lptr, tptr, &request_list) {
  2434. pq_p = list_entry(lptr, struct work_element, liste);
  2435. pq_p->retcode = -ENODEV;
  2436. pq_p->status[0] |= STAT_FAILED;
  2437. list_del_init(lptr);
  2438. requestq_count--;
  2439. pq_p->audit[1] |= FP_REMREQUEST;
  2440. pq_p->audit[1] |= FP_AWAKENING;
  2441. atomic_set(&pq_p->alarmrung, 1);
  2442. wake_up(&pq_p->waitq);
  2443. }
  2444. }
  2445. static inline void
  2446. helper_timeout_requests(void)
  2447. {
  2448. struct work_element *pq_p;
  2449. struct list_head *lptr, *tptr;
  2450. long timelimit;
  2451. timelimit = jiffies - (CLEANUPTIME * HZ);
  2452. /* The list is in strict chronological order */
  2453. list_for_each_safe(lptr, tptr, &pending_list) {
  2454. pq_p = list_entry(lptr, struct work_element, liste);
  2455. if (pq_p->requestsent >= timelimit)
  2456. break;
  2457. PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
  2458. ((struct caller *)pq_p->requestptr)->caller_id[0],
  2459. ((struct caller *)pq_p->requestptr)->caller_id[1],
  2460. ((struct caller *)pq_p->requestptr)->caller_id[2],
  2461. ((struct caller *)pq_p->requestptr)->caller_id[3],
  2462. ((struct caller *)pq_p->requestptr)->caller_id[4],
  2463. ((struct caller *)pq_p->requestptr)->caller_id[5],
  2464. ((struct caller *)pq_p->requestptr)->caller_id[6],
  2465. ((struct caller *)pq_p->requestptr)->caller_id[7]);
  2466. pq_p->retcode = -ETIMEOUT;
  2467. pq_p->status[0] |= STAT_FAILED;
  2468. /* get this off any caller queue it may be on */
  2469. unbuild_caller(LONG2DEVPTR(pq_p->devindex),
  2470. (struct caller *) pq_p->requestptr);
  2471. list_del_init(lptr);
  2472. pendingq_count--;
  2473. pq_p->audit[1] |= FP_TIMEDOUT;
  2474. pq_p->audit[1] |= FP_NOTPENDING;
  2475. pq_p->audit[1] |= FP_AWAKENING;
  2476. atomic_set(&pq_p->alarmrung, 1);
  2477. wake_up(&pq_p->waitq);
  2478. }
  2479. /**
  2480. * If pending count is zero, items left on the request queue may
  2481. * never be processed.
  2482. */
  2483. if (pendingq_count <= 0) {
  2484. list_for_each_safe(lptr, tptr, &request_list) {
  2485. pq_p = list_entry(lptr, struct work_element, liste);
  2486. if (pq_p->requestsent >= timelimit)
  2487. break;
  2488. PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
  2489. ((struct caller *)pq_p->requestptr)->caller_id[0],
  2490. ((struct caller *)pq_p->requestptr)->caller_id[1],
  2491. ((struct caller *)pq_p->requestptr)->caller_id[2],
  2492. ((struct caller *)pq_p->requestptr)->caller_id[3],
  2493. ((struct caller *)pq_p->requestptr)->caller_id[4],
  2494. ((struct caller *)pq_p->requestptr)->caller_id[5],
  2495. ((struct caller *)pq_p->requestptr)->caller_id[6],
  2496. ((struct caller *)pq_p->requestptr)->caller_id[7]);
  2497. pq_p->retcode = -ETIMEOUT;
  2498. pq_p->status[0] |= STAT_FAILED;
  2499. list_del_init(lptr);
  2500. requestq_count--;
  2501. pq_p->audit[1] |= FP_TIMEDOUT;
  2502. pq_p->audit[1] |= FP_REMREQUEST;
  2503. pq_p->audit[1] |= FP_AWAKENING;
  2504. atomic_set(&pq_p->alarmrung, 1);
  2505. wake_up(&pq_p->waitq);
  2506. }
  2507. }
  2508. }
  2509. static void
  2510. z90crypt_cleanup_task(unsigned long ptr)
  2511. {
  2512. PDEBUG("jiffies %ld\n", jiffies);
  2513. spin_lock_irq(&queuespinlock);
  2514. if (z90crypt.mask.st_count <= 0) // no devices!
  2515. helper_drain_queues();
  2516. else
  2517. helper_timeout_requests();
  2518. spin_unlock_irq(&queuespinlock);
  2519. z90crypt_schedule_cleanup_task();
  2520. }
  2521. static void
  2522. z90crypt_schedule_reader_task(unsigned long ptr)
  2523. {
  2524. tasklet_schedule(&reader_tasklet);
  2525. }
  2526. /**
  2527. * Lowlevel Functions:
  2528. *
  2529. * create_z90crypt: creates and initializes basic data structures
  2530. * refresh_z90crypt: re-initializes basic data structures
  2531. * find_crypto_devices: returns a count and mask of hardware status
  2532. * create_crypto_device: builds the descriptor for a device
  2533. * destroy_crypto_device: unallocates the descriptor for a device
  2534. * destroy_z90crypt: drains all work, unallocates structs
  2535. */
  2536. /**
  2537. * build the z90crypt root structure using the given domain index
  2538. */
  2539. static int
  2540. create_z90crypt(int *cdx_p)
  2541. {
  2542. struct hdware_block *hdware_blk_p;
  2543. memset(&z90crypt, 0x00, sizeof(struct z90crypt));
  2544. z90crypt.domain_established = 0;
  2545. z90crypt.len = sizeof(struct z90crypt);
  2546. z90crypt.max_count = Z90CRYPT_NUM_DEVS;
  2547. z90crypt.cdx = *cdx_p;
  2548. hdware_blk_p = (struct hdware_block *)
  2549. kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
  2550. if (!hdware_blk_p) {
  2551. PDEBUG("kmalloc for hardware block failed\n");
  2552. return ENOMEM;
  2553. }
  2554. memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
  2555. z90crypt.hdware_info = hdware_blk_p;
  2556. return 0;
  2557. }
  2558. static inline int
  2559. helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
  2560. {
  2561. enum hdstat hd_stat;
  2562. int q_depth, dev_type;
  2563. int indx, chkdom, numdomains;
  2564. q_depth = dev_type = numdomains = 0;
  2565. for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
  2566. for (indx = 0; indx < z90crypt.max_count; indx++) {
  2567. hd_stat = HD_NOT_THERE;
  2568. numdomains = 0;
  2569. for (chkdom = 0; chkdom <= 15; chkdom++) {
  2570. hd_stat = query_online(indx, chkdom, MAX_RESET,
  2571. &q_depth, &dev_type);
  2572. if (hd_stat == HD_TSQ_EXCEPTION) {
  2573. z90crypt.terminating = 1;
  2574. PRINTKC("exception taken!\n");
  2575. break;
  2576. }
  2577. if (hd_stat == HD_ONLINE) {
  2578. cdx_array[numdomains++] = chkdom;
  2579. if (*cdx_p == chkdom) {
  2580. *correct_cdx_found = 1;
  2581. break;
  2582. }
  2583. }
  2584. }
  2585. if ((*correct_cdx_found == 1) || (numdomains != 0))
  2586. break;
  2587. if (z90crypt.terminating)
  2588. break;
  2589. }
  2590. return numdomains;
  2591. }
  2592. static inline int
  2593. probe_crypto_domain(int *cdx_p)
  2594. {
  2595. int cdx_array[16];
  2596. char cdx_array_text[53], temp[5];
  2597. int correct_cdx_found, numdomains;
  2598. correct_cdx_found = 0;
  2599. numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
  2600. if (z90crypt.terminating)
  2601. return TSQ_FATAL_ERROR;
  2602. if (correct_cdx_found)
  2603. return 0;
  2604. if (numdomains == 0) {
  2605. PRINTKW("Unable to find crypto domain: No devices found\n");
  2606. return Z90C_NO_DEVICES;
  2607. }
  2608. if (numdomains == 1) {
  2609. if (*cdx_p == -1) {
  2610. *cdx_p = cdx_array[0];
  2611. return 0;
  2612. }
  2613. PRINTKW("incorrect domain: specified = %d, found = %d\n",
  2614. *cdx_p, cdx_array[0]);
  2615. return Z90C_INCORRECT_DOMAIN;
  2616. }
  2617. numdomains--;
  2618. sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
  2619. while (numdomains) {
  2620. numdomains--;
  2621. sprintf(temp, ", %d", cdx_array[numdomains]);
  2622. strcat(cdx_array_text, temp);
  2623. }
  2624. PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
  2625. *cdx_p, cdx_array_text);
  2626. return Z90C_AMBIGUOUS_DOMAIN;
  2627. }
  2628. static int
  2629. refresh_z90crypt(int *cdx_p)
  2630. {
  2631. int i, j, indx, rv;
  2632. static struct status local_mask;
  2633. struct device *devPtr;
  2634. unsigned char oldStat, newStat;
  2635. int return_unchanged;
  2636. if (z90crypt.len != sizeof(z90crypt))
  2637. return ENOTINIT;
  2638. if (z90crypt.terminating)
  2639. return TSQ_FATAL_ERROR;
  2640. rv = 0;
  2641. if (!z90crypt.hdware_info->hdware_mask.st_count &&
  2642. !z90crypt.domain_established) {
  2643. rv = probe_crypto_domain(cdx_p);
  2644. if (z90crypt.terminating)
  2645. return TSQ_FATAL_ERROR;
  2646. if (rv == Z90C_NO_DEVICES)
  2647. return 0; // try later
  2648. if (rv)
  2649. return rv;
  2650. z90crypt.cdx = *cdx_p;
  2651. z90crypt.domain_established = 1;
  2652. }
  2653. rv = find_crypto_devices(&local_mask);
  2654. if (rv) {
  2655. PRINTK("find crypto devices returned %d\n", rv);
  2656. return rv;
  2657. }
  2658. if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
  2659. sizeof(struct status))) {
  2660. return_unchanged = 1;
  2661. for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
  2662. /**
  2663. * Check for disabled cards. If any device is marked
  2664. * disabled, destroy it.
  2665. */
  2666. for (j = 0;
  2667. j < z90crypt.hdware_info->type_mask[i].st_count;
  2668. j++) {
  2669. indx = z90crypt.hdware_info->type_x_addr[i].
  2670. device_index[j];
  2671. devPtr = z90crypt.device_p[indx];
  2672. if (devPtr && devPtr->disabled) {
  2673. local_mask.st_mask[indx] = HD_NOT_THERE;
  2674. return_unchanged = 0;
  2675. }
  2676. }
  2677. }
  2678. if (return_unchanged == 1)
  2679. return 0;
  2680. }
  2681. spin_lock_irq(&queuespinlock);
  2682. for (i = 0; i < z90crypt.max_count; i++) {
  2683. oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
  2684. newStat = local_mask.st_mask[i];
  2685. if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
  2686. destroy_crypto_device(i);
  2687. else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
  2688. rv = create_crypto_device(i);
  2689. if (rv >= REC_FATAL_ERROR)
  2690. return rv;
  2691. if (rv != 0) {
  2692. local_mask.st_mask[i] = HD_NOT_THERE;
  2693. local_mask.st_count--;
  2694. }
  2695. }
  2696. }
  2697. memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
  2698. sizeof(local_mask.st_mask));
  2699. z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
  2700. z90crypt.hdware_info->hdware_mask.disabled_count =
  2701. local_mask.disabled_count;
  2702. refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
  2703. for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
  2704. refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
  2705. &(z90crypt.hdware_info->type_x_addr[i]));
  2706. spin_unlock_irq(&queuespinlock);
  2707. return rv;
  2708. }
  2709. static int
  2710. find_crypto_devices(struct status *deviceMask)
  2711. {
  2712. int i, q_depth, dev_type;
  2713. enum hdstat hd_stat;
  2714. deviceMask->st_count = 0;
  2715. deviceMask->disabled_count = 0;
  2716. deviceMask->user_disabled_count = 0;
  2717. for (i = 0; i < z90crypt.max_count; i++) {
  2718. hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
  2719. &dev_type);
  2720. if (hd_stat == HD_TSQ_EXCEPTION) {
  2721. z90crypt.terminating = 1;
  2722. PRINTKC("Exception during probe for crypto devices\n");
  2723. return TSQ_FATAL_ERROR;
  2724. }
  2725. deviceMask->st_mask[i] = hd_stat;
  2726. if (hd_stat == HD_ONLINE) {
  2727. PDEBUG("Got an online crypto!: %d\n", i);
  2728. PDEBUG("Got a queue depth of %d\n", q_depth);
  2729. PDEBUG("Got a device type of %d\n", dev_type);
  2730. if (q_depth <= 0)
  2731. return TSQ_FATAL_ERROR;
  2732. deviceMask->st_count++;
  2733. z90crypt.q_depth_array[i] = q_depth;
  2734. z90crypt.dev_type_array[i] = dev_type;
  2735. }
  2736. }
  2737. return 0;
  2738. }
  2739. static int
  2740. refresh_index_array(struct status *status_str, struct device_x *index_array)
  2741. {
  2742. int i, count;
  2743. enum devstat stat;
  2744. i = -1;
  2745. count = 0;
  2746. do {
  2747. stat = status_str->st_mask[++i];
  2748. if (stat == DEV_ONLINE)
  2749. index_array->device_index[count++] = i;
  2750. } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
  2751. return count;
  2752. }
  2753. static int
  2754. create_crypto_device(int index)
  2755. {
  2756. int rv, devstat, total_size;
  2757. struct device *dev_ptr;
  2758. struct status *type_str_p;
  2759. int deviceType;
  2760. dev_ptr = z90crypt.device_p[index];
  2761. if (!dev_ptr) {
  2762. total_size = sizeof(struct device) +
  2763. z90crypt.q_depth_array[index] * sizeof(int);
  2764. dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
  2765. if (!dev_ptr) {
  2766. PRINTK("kmalloc device %d failed\n", index);
  2767. return ENOMEM;
  2768. }
  2769. memset(dev_ptr, 0, total_size);
  2770. dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
  2771. if (!dev_ptr->dev_resp_p) {
  2772. kfree(dev_ptr);
  2773. PRINTK("kmalloc device %d rec buffer failed\n", index);
  2774. return ENOMEM;
  2775. }
  2776. dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
  2777. INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
  2778. }
  2779. devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
  2780. if (devstat == DEV_RSQ_EXCEPTION) {
  2781. PRINTK("exception during reset device %d\n", index);
  2782. kfree(dev_ptr->dev_resp_p);
  2783. kfree(dev_ptr);
  2784. return RSQ_FATAL_ERROR;
  2785. }
  2786. if (devstat == DEV_ONLINE) {
  2787. dev_ptr->dev_self_x = index;
  2788. dev_ptr->dev_type = z90crypt.dev_type_array[index];
  2789. if (dev_ptr->dev_type == NILDEV) {
  2790. rv = probe_device_type(dev_ptr);
  2791. if (rv) {
  2792. PRINTK("rv = %d from probe_device_type %d\n",
  2793. rv, index);
  2794. kfree(dev_ptr->dev_resp_p);
  2795. kfree(dev_ptr);
  2796. return rv;
  2797. }
  2798. }
  2799. if (dev_ptr->dev_type == PCIXCC_UNK) {
  2800. rv = probe_PCIXCC_type(dev_ptr);
  2801. if (rv) {
  2802. PRINTK("rv = %d from probe_PCIXCC_type %d\n",
  2803. rv, index);
  2804. kfree(dev_ptr->dev_resp_p);
  2805. kfree(dev_ptr);
  2806. return rv;
  2807. }
  2808. }
  2809. deviceType = dev_ptr->dev_type;
  2810. z90crypt.dev_type_array[index] = deviceType;
  2811. if (deviceType == PCICA)
  2812. z90crypt.hdware_info->device_type_array[index] = 1;
  2813. else if (deviceType == PCICC)
  2814. z90crypt.hdware_info->device_type_array[index] = 2;
  2815. else if (deviceType == PCIXCC_MCL2)
  2816. z90crypt.hdware_info->device_type_array[index] = 3;
  2817. else if (deviceType == PCIXCC_MCL3)
  2818. z90crypt.hdware_info->device_type_array[index] = 4;
  2819. else if (deviceType == CEX2C)
  2820. z90crypt.hdware_info->device_type_array[index] = 5;
  2821. else
  2822. z90crypt.hdware_info->device_type_array[index] = -1;
  2823. }
  2824. /**
  2825. * 'q_depth' returned by the hardware is one less than
  2826. * the actual depth
  2827. */
  2828. dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
  2829. dev_ptr->dev_type = z90crypt.dev_type_array[index];
  2830. dev_ptr->dev_stat = devstat;
  2831. dev_ptr->disabled = 0;
  2832. z90crypt.device_p[index] = dev_ptr;
  2833. if (devstat == DEV_ONLINE) {
  2834. if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
  2835. z90crypt.mask.st_mask[index] = DEV_ONLINE;
  2836. z90crypt.mask.st_count++;
  2837. }
  2838. deviceType = dev_ptr->dev_type;
  2839. type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
  2840. if (type_str_p->st_mask[index] != DEV_ONLINE) {
  2841. type_str_p->st_mask[index] = DEV_ONLINE;
  2842. type_str_p->st_count++;
  2843. }
  2844. }
  2845. return 0;
  2846. }
  2847. static int
  2848. destroy_crypto_device(int index)
  2849. {
  2850. struct device *dev_ptr;
  2851. int t, disabledFlag;
  2852. dev_ptr = z90crypt.device_p[index];
  2853. /* remember device type; get rid of device struct */
  2854. if (dev_ptr) {
  2855. disabledFlag = dev_ptr->disabled;
  2856. t = dev_ptr->dev_type;
  2857. if (dev_ptr->dev_resp_p)
  2858. kfree(dev_ptr->dev_resp_p);
  2859. kfree(dev_ptr);
  2860. } else {
  2861. disabledFlag = 0;
  2862. t = -1;
  2863. }
  2864. z90crypt.device_p[index] = 0;
  2865. /* if the type is valid, remove the device from the type_mask */
  2866. if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
  2867. z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
  2868. z90crypt.hdware_info->type_mask[t].st_count--;
  2869. if (disabledFlag == 1)
  2870. z90crypt.hdware_info->type_mask[t].disabled_count--;
  2871. }
  2872. if (z90crypt.mask.st_mask[index] != DEV_GONE) {
  2873. z90crypt.mask.st_mask[index] = DEV_GONE;
  2874. z90crypt.mask.st_count--;
  2875. }
  2876. z90crypt.hdware_info->device_type_array[index] = 0;
  2877. return 0;
  2878. }
  2879. static void
  2880. destroy_z90crypt(void)
  2881. {
  2882. int i;
  2883. for (i = 0; i < z90crypt.max_count; i++)
  2884. if (z90crypt.device_p[i])
  2885. destroy_crypto_device(i);
  2886. if (z90crypt.hdware_info)
  2887. kfree((void *)z90crypt.hdware_info);
  2888. memset((void *)&z90crypt, 0, sizeof(z90crypt));
  2889. }
  2890. static unsigned char static_testmsg[384] = {
  2891. 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
  2892. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
  2893. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
  2894. 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
  2895. 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2896. 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2897. 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
  2898. 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2899. 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2900. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2901. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2902. 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  2903. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
  2904. 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
  2905. 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
  2906. 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
  2907. 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
  2908. 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
  2909. 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
  2910. 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
  2911. 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
  2912. 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
  2913. 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
  2914. 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
  2915. };
  2916. static int
  2917. probe_device_type(struct device *devPtr)
  2918. {
  2919. int rv, dv, i, index, length;
  2920. unsigned char psmid[8];
  2921. static unsigned char loc_testmsg[sizeof(static_testmsg)];
  2922. index = devPtr->dev_self_x;
  2923. rv = 0;
  2924. do {
  2925. memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
  2926. length = sizeof(static_testmsg) - 24;
  2927. /* the -24 allows for the header */
  2928. dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
  2929. if (dv) {
  2930. PDEBUG("dv returned by send during probe: %d\n", dv);
  2931. if (dv == DEV_SEN_EXCEPTION) {
  2932. rv = SEN_FATAL_ERROR;
  2933. PRINTKC("exception in send to AP %d\n", index);
  2934. break;
  2935. }
  2936. PDEBUG("return value from send_to_AP: %d\n", rv);
  2937. switch (dv) {
  2938. case DEV_GONE:
  2939. PDEBUG("dev %d not available\n", index);
  2940. rv = SEN_NOT_AVAIL;
  2941. break;
  2942. case DEV_ONLINE:
  2943. rv = 0;
  2944. break;
  2945. case DEV_EMPTY:
  2946. rv = SEN_NOT_AVAIL;
  2947. break;
  2948. case DEV_NO_WORK:
  2949. rv = SEN_FATAL_ERROR;
  2950. break;
  2951. case DEV_BAD_MESSAGE:
  2952. rv = SEN_USER_ERROR;
  2953. break;
  2954. case DEV_QUEUE_FULL:
  2955. rv = SEN_QUEUE_FULL;
  2956. break;
  2957. default:
  2958. PRINTK("unknown dv=%d for dev %d\n", dv, index);
  2959. rv = SEN_NOT_AVAIL;
  2960. break;
  2961. }
  2962. }
  2963. if (rv)
  2964. break;
  2965. for (i = 0; i < 6; i++) {
  2966. mdelay(300);
  2967. dv = receive_from_AP(index, z90crypt.cdx,
  2968. devPtr->dev_resp_l,
  2969. devPtr->dev_resp_p, psmid);
  2970. PDEBUG("dv returned by DQ = %d\n", dv);
  2971. if (dv == DEV_REC_EXCEPTION) {
  2972. rv = REC_FATAL_ERROR;
  2973. PRINTKC("exception in dequeue %d\n",
  2974. index);
  2975. break;
  2976. }
  2977. switch (dv) {
  2978. case DEV_ONLINE:
  2979. rv = 0;
  2980. break;
  2981. case DEV_EMPTY:
  2982. rv = REC_EMPTY;
  2983. break;
  2984. case DEV_NO_WORK:
  2985. rv = REC_NO_WORK;
  2986. break;
  2987. case DEV_BAD_MESSAGE:
  2988. case DEV_GONE:
  2989. default:
  2990. rv = REC_NO_RESPONSE;
  2991. break;
  2992. }
  2993. if ((rv != 0) && (rv != REC_NO_WORK))
  2994. break;
  2995. if (rv == 0)
  2996. break;
  2997. }
  2998. if (rv)
  2999. break;
  3000. rv = (devPtr->dev_resp_p[0] == 0x00) &&
  3001. (devPtr->dev_resp_p[1] == 0x86);
  3002. if (rv)
  3003. devPtr->dev_type = PCICC;
  3004. else
  3005. devPtr->dev_type = PCICA;
  3006. rv = 0;
  3007. } while (0);
  3008. /* In a general error case, the card is not marked online */
  3009. return rv;
  3010. }
  3011. static unsigned char MCL3_testmsg[] = {
  3012. 0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
  3013. 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3014. 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3015. 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3016. 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
  3017. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
  3018. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
  3019. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
  3020. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3021. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3022. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3023. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3024. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3025. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3026. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3027. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3028. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3029. 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3030. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3031. 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
  3032. 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
  3033. 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
  3034. 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
  3035. 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
  3036. 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
  3037. 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
  3038. 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
  3039. 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
  3040. 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
  3041. 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
  3042. 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
  3043. 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
  3044. 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
  3045. 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
  3046. 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
  3047. };
  3048. static int
  3049. probe_PCIXCC_type(struct device *devPtr)
  3050. {
  3051. int rv, dv, i, index, length;
  3052. unsigned char psmid[8];
  3053. static unsigned char loc_testmsg[548];
  3054. struct CPRBX *cprbx_p;
  3055. index = devPtr->dev_self_x;
  3056. rv = 0;
  3057. do {
  3058. memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
  3059. length = sizeof(MCL3_testmsg) - 0x0C;
  3060. dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
  3061. if (dv) {
  3062. PDEBUG("dv returned = %d\n", dv);
  3063. if (dv == DEV_SEN_EXCEPTION) {
  3064. rv = SEN_FATAL_ERROR;
  3065. PRINTKC("exception in send to AP %d\n", index);
  3066. break;
  3067. }
  3068. PDEBUG("return value from send_to_AP: %d\n", rv);
  3069. switch (dv) {
  3070. case DEV_GONE:
  3071. PDEBUG("dev %d not available\n", index);
  3072. rv = SEN_NOT_AVAIL;
  3073. break;
  3074. case DEV_ONLINE:
  3075. rv = 0;
  3076. break;
  3077. case DEV_EMPTY:
  3078. rv = SEN_NOT_AVAIL;
  3079. break;
  3080. case DEV_NO_WORK:
  3081. rv = SEN_FATAL_ERROR;
  3082. break;
  3083. case DEV_BAD_MESSAGE:
  3084. rv = SEN_USER_ERROR;
  3085. break;
  3086. case DEV_QUEUE_FULL:
  3087. rv = SEN_QUEUE_FULL;
  3088. break;
  3089. default:
  3090. PRINTK("unknown dv=%d for dev %d\n", dv, index);
  3091. rv = SEN_NOT_AVAIL;
  3092. break;
  3093. }
  3094. }
  3095. if (rv)
  3096. break;
  3097. for (i = 0; i < 6; i++) {
  3098. mdelay(300);
  3099. dv = receive_from_AP(index, z90crypt.cdx,
  3100. devPtr->dev_resp_l,
  3101. devPtr->dev_resp_p, psmid);
  3102. PDEBUG("dv returned by DQ = %d\n", dv);
  3103. if (dv == DEV_REC_EXCEPTION) {
  3104. rv = REC_FATAL_ERROR;
  3105. PRINTKC("exception in dequeue %d\n",
  3106. index);
  3107. break;
  3108. }
  3109. switch (dv) {
  3110. case DEV_ONLINE:
  3111. rv = 0;
  3112. break;
  3113. case DEV_EMPTY:
  3114. rv = REC_EMPTY;
  3115. break;
  3116. case DEV_NO_WORK:
  3117. rv = REC_NO_WORK;
  3118. break;
  3119. case DEV_BAD_MESSAGE:
  3120. case DEV_GONE:
  3121. default:
  3122. rv = REC_NO_RESPONSE;
  3123. break;
  3124. }
  3125. if ((rv != 0) && (rv != REC_NO_WORK))
  3126. break;
  3127. if (rv == 0)
  3128. break;
  3129. }
  3130. if (rv)
  3131. break;
  3132. cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
  3133. if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
  3134. devPtr->dev_type = PCIXCC_MCL2;
  3135. PDEBUG("device %d is MCL2\n", index);
  3136. } else {
  3137. devPtr->dev_type = PCIXCC_MCL3;
  3138. PDEBUG("device %d is MCL3\n", index);
  3139. }
  3140. } while (0);
  3141. /* In a general error case, the card is not marked online */
  3142. return rv;
  3143. }
  3144. #ifdef Z90CRYPT_USE_HOTPLUG
  3145. static void
  3146. z90crypt_hotplug_event(int dev_major, int dev_minor, int action)
  3147. {
  3148. #ifdef CONFIG_HOTPLUG
  3149. char *argv[3];
  3150. char *envp[6];
  3151. char major[20];
  3152. char minor[20];
  3153. sprintf(major, "MAJOR=%d", dev_major);
  3154. sprintf(minor, "MINOR=%d", dev_minor);
  3155. argv[0] = hotplug_path;
  3156. argv[1] = "z90crypt";
  3157. argv[2] = 0;
  3158. envp[0] = "HOME=/";
  3159. envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
  3160. switch (action) {
  3161. case Z90CRYPT_HOTPLUG_ADD:
  3162. envp[2] = "ACTION=add";
  3163. break;
  3164. case Z90CRYPT_HOTPLUG_REMOVE:
  3165. envp[2] = "ACTION=remove";
  3166. break;
  3167. default:
  3168. BUG();
  3169. break;
  3170. }
  3171. envp[3] = major;
  3172. envp[4] = minor;
  3173. envp[5] = 0;
  3174. call_usermodehelper(argv[0], argv, envp, 0);
  3175. #endif
  3176. }
  3177. #endif
  3178. module_init(z90crypt_init_module);
  3179. module_exit(z90crypt_cleanup_module);