dasd.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558
  1. /*
  2. * File...........: linux/drivers/s390/block/dasd.c
  3. * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  4. * Horst Hummel <Horst.Hummel@de.ibm.com>
  5. * Carsten Otte <Cotte@de.ibm.com>
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * Bugreports.to..: <Linux390@de.ibm.com>
  8. * Copyright IBM Corp. 1999, 2009
  9. */
  10. #define KMSG_COMPONENT "dasd"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/kmod.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/ctype.h>
  16. #include <linux/major.h>
  17. #include <linux/slab.h>
  18. #include <linux/hdreg.h>
  19. #include <linux/async.h>
  20. #include <linux/mutex.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/vmalloc.h>
  24. #include <asm/ccwdev.h>
  25. #include <asm/ebcdic.h>
  26. #include <asm/idals.h>
  27. #include <asm/itcw.h>
  28. #include <asm/diag.h>
  29. /* This is ugly... */
  30. #define PRINTK_HEADER "dasd:"
  31. #include "dasd_int.h"
  32. /*
  33. * SECTION: Constant definitions to be used within this file
  34. */
  35. #define DASD_CHANQ_MAX_SIZE 4
  36. #define DASD_SLEEPON_START_TAG (void *) 1
  37. #define DASD_SLEEPON_END_TAG (void *) 2
  38. /*
  39. * SECTION: exported variables of dasd.c
  40. */
  41. debug_info_t *dasd_debug_area;
  42. static struct dentry *dasd_debugfs_root_entry;
  43. struct dasd_discipline *dasd_diag_discipline_pointer;
  44. void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  45. MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  46. MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  47. " Copyright 2000 IBM Corporation");
  48. MODULE_SUPPORTED_DEVICE("dasd");
  49. MODULE_LICENSE("GPL");
  50. /*
  51. * SECTION: prototypes for static functions of dasd.c
  52. */
  53. static int dasd_alloc_queue(struct dasd_block *);
  54. static void dasd_setup_queue(struct dasd_block *);
  55. static void dasd_free_queue(struct dasd_block *);
  56. static void dasd_flush_request_queue(struct dasd_block *);
  57. static int dasd_flush_block_queue(struct dasd_block *);
  58. static void dasd_device_tasklet(struct dasd_device *);
  59. static void dasd_block_tasklet(struct dasd_block *);
  60. static void do_kick_device(struct work_struct *);
  61. static void do_restore_device(struct work_struct *);
  62. static void do_reload_device(struct work_struct *);
  63. static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
  64. static void dasd_device_timeout(unsigned long);
  65. static void dasd_block_timeout(unsigned long);
  66. static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
  67. static void dasd_profile_init(struct dasd_profile *, struct dentry *);
  68. static void dasd_profile_exit(struct dasd_profile *);
  69. /*
  70. * SECTION: Operations on the device structure.
  71. */
  72. static wait_queue_head_t dasd_init_waitq;
  73. static wait_queue_head_t dasd_flush_wq;
  74. static wait_queue_head_t generic_waitq;
  75. /*
  76. * Allocate memory for a new device structure.
  77. */
  78. struct dasd_device *dasd_alloc_device(void)
  79. {
  80. struct dasd_device *device;
  81. device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
  82. if (!device)
  83. return ERR_PTR(-ENOMEM);
  84. /* Get two pages for normal block device operations. */
  85. device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
  86. if (!device->ccw_mem) {
  87. kfree(device);
  88. return ERR_PTR(-ENOMEM);
  89. }
  90. /* Get one page for error recovery. */
  91. device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
  92. if (!device->erp_mem) {
  93. free_pages((unsigned long) device->ccw_mem, 1);
  94. kfree(device);
  95. return ERR_PTR(-ENOMEM);
  96. }
  97. dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
  98. dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
  99. spin_lock_init(&device->mem_lock);
  100. atomic_set(&device->tasklet_scheduled, 0);
  101. tasklet_init(&device->tasklet,
  102. (void (*)(unsigned long)) dasd_device_tasklet,
  103. (unsigned long) device);
  104. INIT_LIST_HEAD(&device->ccw_queue);
  105. init_timer(&device->timer);
  106. device->timer.function = dasd_device_timeout;
  107. device->timer.data = (unsigned long) device;
  108. INIT_WORK(&device->kick_work, do_kick_device);
  109. INIT_WORK(&device->restore_device, do_restore_device);
  110. INIT_WORK(&device->reload_device, do_reload_device);
  111. device->state = DASD_STATE_NEW;
  112. device->target = DASD_STATE_NEW;
  113. mutex_init(&device->state_mutex);
  114. spin_lock_init(&device->profile.lock);
  115. return device;
  116. }
  117. /*
  118. * Free memory of a device structure.
  119. */
  120. void dasd_free_device(struct dasd_device *device)
  121. {
  122. kfree(device->private);
  123. free_page((unsigned long) device->erp_mem);
  124. free_pages((unsigned long) device->ccw_mem, 1);
  125. kfree(device);
  126. }
  127. /*
  128. * Allocate memory for a new device structure.
  129. */
  130. struct dasd_block *dasd_alloc_block(void)
  131. {
  132. struct dasd_block *block;
  133. block = kzalloc(sizeof(*block), GFP_ATOMIC);
  134. if (!block)
  135. return ERR_PTR(-ENOMEM);
  136. /* open_count = 0 means device online but not in use */
  137. atomic_set(&block->open_count, -1);
  138. spin_lock_init(&block->request_queue_lock);
  139. atomic_set(&block->tasklet_scheduled, 0);
  140. tasklet_init(&block->tasklet,
  141. (void (*)(unsigned long)) dasd_block_tasklet,
  142. (unsigned long) block);
  143. INIT_LIST_HEAD(&block->ccw_queue);
  144. spin_lock_init(&block->queue_lock);
  145. init_timer(&block->timer);
  146. block->timer.function = dasd_block_timeout;
  147. block->timer.data = (unsigned long) block;
  148. spin_lock_init(&block->profile.lock);
  149. return block;
  150. }
  151. /*
  152. * Free memory of a device structure.
  153. */
  154. void dasd_free_block(struct dasd_block *block)
  155. {
  156. kfree(block);
  157. }
  158. /*
  159. * Make a new device known to the system.
  160. */
  161. static int dasd_state_new_to_known(struct dasd_device *device)
  162. {
  163. int rc;
  164. /*
  165. * As long as the device is not in state DASD_STATE_NEW we want to
  166. * keep the reference count > 0.
  167. */
  168. dasd_get_device(device);
  169. if (device->block) {
  170. rc = dasd_alloc_queue(device->block);
  171. if (rc) {
  172. dasd_put_device(device);
  173. return rc;
  174. }
  175. }
  176. device->state = DASD_STATE_KNOWN;
  177. return 0;
  178. }
  179. /*
  180. * Let the system forget about a device.
  181. */
  182. static int dasd_state_known_to_new(struct dasd_device *device)
  183. {
  184. /* Disable extended error reporting for this device. */
  185. dasd_eer_disable(device);
  186. /* Forget the discipline information. */
  187. if (device->discipline) {
  188. if (device->discipline->uncheck_device)
  189. device->discipline->uncheck_device(device);
  190. module_put(device->discipline->owner);
  191. }
  192. device->discipline = NULL;
  193. if (device->base_discipline)
  194. module_put(device->base_discipline->owner);
  195. device->base_discipline = NULL;
  196. device->state = DASD_STATE_NEW;
  197. if (device->block)
  198. dasd_free_queue(device->block);
  199. /* Give up reference we took in dasd_state_new_to_known. */
  200. dasd_put_device(device);
  201. return 0;
  202. }
  203. static struct dentry *dasd_debugfs_setup(const char *name,
  204. struct dentry *base_dentry)
  205. {
  206. struct dentry *pde;
  207. if (!base_dentry)
  208. return NULL;
  209. pde = debugfs_create_dir(name, base_dentry);
  210. if (!pde || IS_ERR(pde))
  211. return NULL;
  212. return pde;
  213. }
  214. /*
  215. * Request the irq line for the device.
  216. */
  217. static int dasd_state_known_to_basic(struct dasd_device *device)
  218. {
  219. struct dasd_block *block = device->block;
  220. int rc;
  221. /* Allocate and register gendisk structure. */
  222. if (block) {
  223. rc = dasd_gendisk_alloc(block);
  224. if (rc)
  225. return rc;
  226. block->debugfs_dentry =
  227. dasd_debugfs_setup(block->gdp->disk_name,
  228. dasd_debugfs_root_entry);
  229. dasd_profile_init(&block->profile, block->debugfs_dentry);
  230. if (dasd_global_profile_level == DASD_PROFILE_ON)
  231. dasd_profile_on(&device->block->profile);
  232. }
  233. device->debugfs_dentry =
  234. dasd_debugfs_setup(dev_name(&device->cdev->dev),
  235. dasd_debugfs_root_entry);
  236. dasd_profile_init(&device->profile, device->debugfs_dentry);
  237. /* register 'device' debug area, used for all DBF_DEV_XXX calls */
  238. device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
  239. 8 * sizeof(long));
  240. debug_register_view(device->debug_area, &debug_sprintf_view);
  241. debug_set_level(device->debug_area, DBF_WARNING);
  242. DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
  243. device->state = DASD_STATE_BASIC;
  244. return 0;
  245. }
  246. /*
  247. * Release the irq line for the device. Terminate any running i/o.
  248. */
  249. static int dasd_state_basic_to_known(struct dasd_device *device)
  250. {
  251. int rc;
  252. if (device->block) {
  253. dasd_profile_exit(&device->block->profile);
  254. if (device->block->debugfs_dentry)
  255. debugfs_remove(device->block->debugfs_dentry);
  256. dasd_gendisk_free(device->block);
  257. dasd_block_clear_timer(device->block);
  258. }
  259. rc = dasd_flush_device_queue(device);
  260. if (rc)
  261. return rc;
  262. dasd_device_clear_timer(device);
  263. dasd_profile_exit(&device->profile);
  264. if (device->debugfs_dentry)
  265. debugfs_remove(device->debugfs_dentry);
  266. DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
  267. if (device->debug_area != NULL) {
  268. debug_unregister(device->debug_area);
  269. device->debug_area = NULL;
  270. }
  271. device->state = DASD_STATE_KNOWN;
  272. return 0;
  273. }
  274. /*
  275. * Do the initial analysis. The do_analysis function may return
  276. * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
  277. * until the discipline decides to continue the startup sequence
  278. * by calling the function dasd_change_state. The eckd disciplines
  279. * uses this to start a ccw that detects the format. The completion
  280. * interrupt for this detection ccw uses the kernel event daemon to
  281. * trigger the call to dasd_change_state. All this is done in the
  282. * discipline code, see dasd_eckd.c.
  283. * After the analysis ccw is done (do_analysis returned 0) the block
  284. * device is setup.
  285. * In case the analysis returns an error, the device setup is stopped
  286. * (a fake disk was already added to allow formatting).
  287. */
  288. static int dasd_state_basic_to_ready(struct dasd_device *device)
  289. {
  290. int rc;
  291. struct dasd_block *block;
  292. rc = 0;
  293. block = device->block;
  294. /* make disk known with correct capacity */
  295. if (block) {
  296. if (block->base->discipline->do_analysis != NULL)
  297. rc = block->base->discipline->do_analysis(block);
  298. if (rc) {
  299. if (rc != -EAGAIN)
  300. device->state = DASD_STATE_UNFMT;
  301. return rc;
  302. }
  303. dasd_setup_queue(block);
  304. set_capacity(block->gdp,
  305. block->blocks << block->s2b_shift);
  306. device->state = DASD_STATE_READY;
  307. rc = dasd_scan_partitions(block);
  308. if (rc)
  309. device->state = DASD_STATE_BASIC;
  310. } else {
  311. device->state = DASD_STATE_READY;
  312. }
  313. return rc;
  314. }
  315. /*
  316. * Remove device from block device layer. Destroy dirty buffers.
  317. * Forget format information. Check if the target level is basic
  318. * and if it is create fake disk for formatting.
  319. */
  320. static int dasd_state_ready_to_basic(struct dasd_device *device)
  321. {
  322. int rc;
  323. device->state = DASD_STATE_BASIC;
  324. if (device->block) {
  325. struct dasd_block *block = device->block;
  326. rc = dasd_flush_block_queue(block);
  327. if (rc) {
  328. device->state = DASD_STATE_READY;
  329. return rc;
  330. }
  331. dasd_flush_request_queue(block);
  332. dasd_destroy_partitions(block);
  333. block->blocks = 0;
  334. block->bp_block = 0;
  335. block->s2b_shift = 0;
  336. }
  337. return 0;
  338. }
  339. /*
  340. * Back to basic.
  341. */
  342. static int dasd_state_unfmt_to_basic(struct dasd_device *device)
  343. {
  344. device->state = DASD_STATE_BASIC;
  345. return 0;
  346. }
  347. /*
  348. * Make the device online and schedule the bottom half to start
  349. * the requeueing of requests from the linux request queue to the
  350. * ccw queue.
  351. */
  352. static int
  353. dasd_state_ready_to_online(struct dasd_device * device)
  354. {
  355. int rc;
  356. struct gendisk *disk;
  357. struct disk_part_iter piter;
  358. struct hd_struct *part;
  359. if (device->discipline->ready_to_online) {
  360. rc = device->discipline->ready_to_online(device);
  361. if (rc)
  362. return rc;
  363. }
  364. device->state = DASD_STATE_ONLINE;
  365. if (device->block) {
  366. dasd_schedule_block_bh(device->block);
  367. if ((device->features & DASD_FEATURE_USERAW)) {
  368. disk = device->block->gdp;
  369. kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
  370. return 0;
  371. }
  372. disk = device->block->bdev->bd_disk;
  373. disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
  374. while ((part = disk_part_iter_next(&piter)))
  375. kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
  376. disk_part_iter_exit(&piter);
  377. }
  378. return 0;
  379. }
  380. /*
  381. * Stop the requeueing of requests again.
  382. */
  383. static int dasd_state_online_to_ready(struct dasd_device *device)
  384. {
  385. int rc;
  386. struct gendisk *disk;
  387. struct disk_part_iter piter;
  388. struct hd_struct *part;
  389. if (device->discipline->online_to_ready) {
  390. rc = device->discipline->online_to_ready(device);
  391. if (rc)
  392. return rc;
  393. }
  394. device->state = DASD_STATE_READY;
  395. if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
  396. disk = device->block->bdev->bd_disk;
  397. disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
  398. while ((part = disk_part_iter_next(&piter)))
  399. kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
  400. disk_part_iter_exit(&piter);
  401. }
  402. return 0;
  403. }
  404. /*
  405. * Device startup state changes.
  406. */
  407. static int dasd_increase_state(struct dasd_device *device)
  408. {
  409. int rc;
  410. rc = 0;
  411. if (device->state == DASD_STATE_NEW &&
  412. device->target >= DASD_STATE_KNOWN)
  413. rc = dasd_state_new_to_known(device);
  414. if (!rc &&
  415. device->state == DASD_STATE_KNOWN &&
  416. device->target >= DASD_STATE_BASIC)
  417. rc = dasd_state_known_to_basic(device);
  418. if (!rc &&
  419. device->state == DASD_STATE_BASIC &&
  420. device->target >= DASD_STATE_READY)
  421. rc = dasd_state_basic_to_ready(device);
  422. if (!rc &&
  423. device->state == DASD_STATE_UNFMT &&
  424. device->target > DASD_STATE_UNFMT)
  425. rc = -EPERM;
  426. if (!rc &&
  427. device->state == DASD_STATE_READY &&
  428. device->target >= DASD_STATE_ONLINE)
  429. rc = dasd_state_ready_to_online(device);
  430. return rc;
  431. }
  432. /*
  433. * Device shutdown state changes.
  434. */
  435. static int dasd_decrease_state(struct dasd_device *device)
  436. {
  437. int rc;
  438. rc = 0;
  439. if (device->state == DASD_STATE_ONLINE &&
  440. device->target <= DASD_STATE_READY)
  441. rc = dasd_state_online_to_ready(device);
  442. if (!rc &&
  443. device->state == DASD_STATE_READY &&
  444. device->target <= DASD_STATE_BASIC)
  445. rc = dasd_state_ready_to_basic(device);
  446. if (!rc &&
  447. device->state == DASD_STATE_UNFMT &&
  448. device->target <= DASD_STATE_BASIC)
  449. rc = dasd_state_unfmt_to_basic(device);
  450. if (!rc &&
  451. device->state == DASD_STATE_BASIC &&
  452. device->target <= DASD_STATE_KNOWN)
  453. rc = dasd_state_basic_to_known(device);
  454. if (!rc &&
  455. device->state == DASD_STATE_KNOWN &&
  456. device->target <= DASD_STATE_NEW)
  457. rc = dasd_state_known_to_new(device);
  458. return rc;
  459. }
  460. /*
  461. * This is the main startup/shutdown routine.
  462. */
  463. static void dasd_change_state(struct dasd_device *device)
  464. {
  465. int rc;
  466. if (device->state == device->target)
  467. /* Already where we want to go today... */
  468. return;
  469. if (device->state < device->target)
  470. rc = dasd_increase_state(device);
  471. else
  472. rc = dasd_decrease_state(device);
  473. if (rc == -EAGAIN)
  474. return;
  475. if (rc)
  476. device->target = device->state;
  477. if (device->state == device->target)
  478. wake_up(&dasd_init_waitq);
  479. /* let user-space know that the device status changed */
  480. kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
  481. }
  482. /*
  483. * Kick starter for devices that did not complete the startup/shutdown
  484. * procedure or were sleeping because of a pending state.
  485. * dasd_kick_device will schedule a call do do_kick_device to the kernel
  486. * event daemon.
  487. */
  488. static void do_kick_device(struct work_struct *work)
  489. {
  490. struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
  491. mutex_lock(&device->state_mutex);
  492. dasd_change_state(device);
  493. mutex_unlock(&device->state_mutex);
  494. dasd_schedule_device_bh(device);
  495. dasd_put_device(device);
  496. }
  497. void dasd_kick_device(struct dasd_device *device)
  498. {
  499. dasd_get_device(device);
  500. /* queue call to dasd_kick_device to the kernel event daemon. */
  501. schedule_work(&device->kick_work);
  502. }
  503. /*
  504. * dasd_reload_device will schedule a call do do_reload_device to the kernel
  505. * event daemon.
  506. */
  507. static void do_reload_device(struct work_struct *work)
  508. {
  509. struct dasd_device *device = container_of(work, struct dasd_device,
  510. reload_device);
  511. device->discipline->reload(device);
  512. dasd_put_device(device);
  513. }
  514. void dasd_reload_device(struct dasd_device *device)
  515. {
  516. dasd_get_device(device);
  517. /* queue call to dasd_reload_device to the kernel event daemon. */
  518. schedule_work(&device->reload_device);
  519. }
  520. EXPORT_SYMBOL(dasd_reload_device);
  521. /*
  522. * dasd_restore_device will schedule a call do do_restore_device to the kernel
  523. * event daemon.
  524. */
  525. static void do_restore_device(struct work_struct *work)
  526. {
  527. struct dasd_device *device = container_of(work, struct dasd_device,
  528. restore_device);
  529. device->cdev->drv->restore(device->cdev);
  530. dasd_put_device(device);
  531. }
  532. void dasd_restore_device(struct dasd_device *device)
  533. {
  534. dasd_get_device(device);
  535. /* queue call to dasd_restore_device to the kernel event daemon. */
  536. schedule_work(&device->restore_device);
  537. }
  538. /*
  539. * Set the target state for a device and starts the state change.
  540. */
  541. void dasd_set_target_state(struct dasd_device *device, int target)
  542. {
  543. dasd_get_device(device);
  544. mutex_lock(&device->state_mutex);
  545. /* If we are in probeonly mode stop at DASD_STATE_READY. */
  546. if (dasd_probeonly && target > DASD_STATE_READY)
  547. target = DASD_STATE_READY;
  548. if (device->target != target) {
  549. if (device->state == target)
  550. wake_up(&dasd_init_waitq);
  551. device->target = target;
  552. }
  553. if (device->state != device->target)
  554. dasd_change_state(device);
  555. mutex_unlock(&device->state_mutex);
  556. dasd_put_device(device);
  557. }
  558. /*
  559. * Enable devices with device numbers in [from..to].
  560. */
  561. static inline int _wait_for_device(struct dasd_device *device)
  562. {
  563. return (device->state == device->target);
  564. }
  565. void dasd_enable_device(struct dasd_device *device)
  566. {
  567. dasd_set_target_state(device, DASD_STATE_ONLINE);
  568. if (device->state <= DASD_STATE_KNOWN)
  569. /* No discipline for device found. */
  570. dasd_set_target_state(device, DASD_STATE_NEW);
  571. /* Now wait for the devices to come up. */
  572. wait_event(dasd_init_waitq, _wait_for_device(device));
  573. }
  574. /*
  575. * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
  576. */
  577. unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
  578. #ifdef CONFIG_DASD_PROFILE
  579. struct dasd_profile_info dasd_global_profile_data;
  580. static struct dentry *dasd_global_profile_dentry;
  581. static struct dentry *dasd_debugfs_global_entry;
  582. /*
  583. * Add profiling information for cqr before execution.
  584. */
  585. static void dasd_profile_start(struct dasd_block *block,
  586. struct dasd_ccw_req *cqr,
  587. struct request *req)
  588. {
  589. struct list_head *l;
  590. unsigned int counter;
  591. struct dasd_device *device;
  592. /* count the length of the chanq for statistics */
  593. counter = 0;
  594. if (dasd_global_profile_level || block->profile.data)
  595. list_for_each(l, &block->ccw_queue)
  596. if (++counter >= 31)
  597. break;
  598. if (dasd_global_profile_level) {
  599. dasd_global_profile_data.dasd_io_nr_req[counter]++;
  600. if (rq_data_dir(req) == READ)
  601. dasd_global_profile_data.dasd_read_nr_req[counter]++;
  602. }
  603. spin_lock(&block->profile.lock);
  604. if (block->profile.data)
  605. block->profile.data->dasd_io_nr_req[counter]++;
  606. if (rq_data_dir(req) == READ)
  607. block->profile.data->dasd_read_nr_req[counter]++;
  608. spin_unlock(&block->profile.lock);
  609. /*
  610. * We count the request for the start device, even though it may run on
  611. * some other device due to error recovery. This way we make sure that
  612. * we count each request only once.
  613. */
  614. device = cqr->startdev;
  615. if (device->profile.data) {
  616. counter = 1; /* request is not yet queued on the start device */
  617. list_for_each(l, &device->ccw_queue)
  618. if (++counter >= 31)
  619. break;
  620. }
  621. spin_lock(&device->profile.lock);
  622. if (device->profile.data) {
  623. device->profile.data->dasd_io_nr_req[counter]++;
  624. if (rq_data_dir(req) == READ)
  625. device->profile.data->dasd_read_nr_req[counter]++;
  626. }
  627. spin_unlock(&device->profile.lock);
  628. }
  629. /*
  630. * Add profiling information for cqr after execution.
  631. */
  632. #define dasd_profile_counter(value, index) \
  633. { \
  634. for (index = 0; index < 31 && value >> (2+index); index++) \
  635. ; \
  636. }
  637. static void dasd_profile_end_add_data(struct dasd_profile_info *data,
  638. int is_alias,
  639. int is_tpm,
  640. int is_read,
  641. long sectors,
  642. int sectors_ind,
  643. int tottime_ind,
  644. int tottimeps_ind,
  645. int strtime_ind,
  646. int irqtime_ind,
  647. int irqtimeps_ind,
  648. int endtime_ind)
  649. {
  650. /* in case of an overflow, reset the whole profile */
  651. if (data->dasd_io_reqs == UINT_MAX) {
  652. memset(data, 0, sizeof(*data));
  653. getnstimeofday(&data->starttod);
  654. }
  655. data->dasd_io_reqs++;
  656. data->dasd_io_sects += sectors;
  657. if (is_alias)
  658. data->dasd_io_alias++;
  659. if (is_tpm)
  660. data->dasd_io_tpm++;
  661. data->dasd_io_secs[sectors_ind]++;
  662. data->dasd_io_times[tottime_ind]++;
  663. data->dasd_io_timps[tottimeps_ind]++;
  664. data->dasd_io_time1[strtime_ind]++;
  665. data->dasd_io_time2[irqtime_ind]++;
  666. data->dasd_io_time2ps[irqtimeps_ind]++;
  667. data->dasd_io_time3[endtime_ind]++;
  668. if (is_read) {
  669. data->dasd_read_reqs++;
  670. data->dasd_read_sects += sectors;
  671. if (is_alias)
  672. data->dasd_read_alias++;
  673. if (is_tpm)
  674. data->dasd_read_tpm++;
  675. data->dasd_read_secs[sectors_ind]++;
  676. data->dasd_read_times[tottime_ind]++;
  677. data->dasd_read_time1[strtime_ind]++;
  678. data->dasd_read_time2[irqtime_ind]++;
  679. data->dasd_read_time3[endtime_ind]++;
  680. }
  681. }
  682. static void dasd_profile_end(struct dasd_block *block,
  683. struct dasd_ccw_req *cqr,
  684. struct request *req)
  685. {
  686. long strtime, irqtime, endtime, tottime; /* in microseconds */
  687. long tottimeps, sectors;
  688. struct dasd_device *device;
  689. int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
  690. int irqtime_ind, irqtimeps_ind, endtime_ind;
  691. device = cqr->startdev;
  692. if (!(dasd_global_profile_level ||
  693. block->profile.data ||
  694. device->profile.data))
  695. return;
  696. sectors = blk_rq_sectors(req);
  697. if (!cqr->buildclk || !cqr->startclk ||
  698. !cqr->stopclk || !cqr->endclk ||
  699. !sectors)
  700. return;
  701. strtime = ((cqr->startclk - cqr->buildclk) >> 12);
  702. irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
  703. endtime = ((cqr->endclk - cqr->stopclk) >> 12);
  704. tottime = ((cqr->endclk - cqr->buildclk) >> 12);
  705. tottimeps = tottime / sectors;
  706. dasd_profile_counter(sectors, sectors_ind);
  707. dasd_profile_counter(tottime, tottime_ind);
  708. dasd_profile_counter(tottimeps, tottimeps_ind);
  709. dasd_profile_counter(strtime, strtime_ind);
  710. dasd_profile_counter(irqtime, irqtime_ind);
  711. dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
  712. dasd_profile_counter(endtime, endtime_ind);
  713. if (dasd_global_profile_level) {
  714. dasd_profile_end_add_data(&dasd_global_profile_data,
  715. cqr->startdev != block->base,
  716. cqr->cpmode == 1,
  717. rq_data_dir(req) == READ,
  718. sectors, sectors_ind, tottime_ind,
  719. tottimeps_ind, strtime_ind,
  720. irqtime_ind, irqtimeps_ind,
  721. endtime_ind);
  722. }
  723. spin_lock(&block->profile.lock);
  724. if (block->profile.data)
  725. dasd_profile_end_add_data(block->profile.data,
  726. cqr->startdev != block->base,
  727. cqr->cpmode == 1,
  728. rq_data_dir(req) == READ,
  729. sectors, sectors_ind, tottime_ind,
  730. tottimeps_ind, strtime_ind,
  731. irqtime_ind, irqtimeps_ind,
  732. endtime_ind);
  733. spin_unlock(&block->profile.lock);
  734. spin_lock(&device->profile.lock);
  735. if (device->profile.data)
  736. dasd_profile_end_add_data(device->profile.data,
  737. cqr->startdev != block->base,
  738. cqr->cpmode == 1,
  739. rq_data_dir(req) == READ,
  740. sectors, sectors_ind, tottime_ind,
  741. tottimeps_ind, strtime_ind,
  742. irqtime_ind, irqtimeps_ind,
  743. endtime_ind);
  744. spin_unlock(&device->profile.lock);
  745. }
  746. void dasd_profile_reset(struct dasd_profile *profile)
  747. {
  748. struct dasd_profile_info *data;
  749. spin_lock_bh(&profile->lock);
  750. data = profile->data;
  751. if (!data) {
  752. spin_unlock_bh(&profile->lock);
  753. return;
  754. }
  755. memset(data, 0, sizeof(*data));
  756. getnstimeofday(&data->starttod);
  757. spin_unlock_bh(&profile->lock);
  758. }
  759. void dasd_global_profile_reset(void)
  760. {
  761. memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data));
  762. getnstimeofday(&dasd_global_profile_data.starttod);
  763. }
  764. int dasd_profile_on(struct dasd_profile *profile)
  765. {
  766. struct dasd_profile_info *data;
  767. data = kzalloc(sizeof(*data), GFP_KERNEL);
  768. if (!data)
  769. return -ENOMEM;
  770. spin_lock_bh(&profile->lock);
  771. if (profile->data) {
  772. spin_unlock_bh(&profile->lock);
  773. kfree(data);
  774. return 0;
  775. }
  776. getnstimeofday(&data->starttod);
  777. profile->data = data;
  778. spin_unlock_bh(&profile->lock);
  779. return 0;
  780. }
  781. void dasd_profile_off(struct dasd_profile *profile)
  782. {
  783. spin_lock_bh(&profile->lock);
  784. kfree(profile->data);
  785. profile->data = NULL;
  786. spin_unlock_bh(&profile->lock);
  787. }
  788. char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
  789. {
  790. char *buffer;
  791. buffer = vmalloc(user_len + 1);
  792. if (buffer == NULL)
  793. return ERR_PTR(-ENOMEM);
  794. if (copy_from_user(buffer, user_buf, user_len) != 0) {
  795. vfree(buffer);
  796. return ERR_PTR(-EFAULT);
  797. }
  798. /* got the string, now strip linefeed. */
  799. if (buffer[user_len - 1] == '\n')
  800. buffer[user_len - 1] = 0;
  801. else
  802. buffer[user_len] = 0;
  803. return buffer;
  804. }
  805. static ssize_t dasd_stats_write(struct file *file,
  806. const char __user *user_buf,
  807. size_t user_len, loff_t *pos)
  808. {
  809. char *buffer, *str;
  810. int rc;
  811. struct seq_file *m = (struct seq_file *)file->private_data;
  812. struct dasd_profile *prof = m->private;
  813. if (user_len > 65536)
  814. user_len = 65536;
  815. buffer = dasd_get_user_string(user_buf, user_len);
  816. if (IS_ERR(buffer))
  817. return PTR_ERR(buffer);
  818. str = skip_spaces(buffer);
  819. rc = user_len;
  820. if (strncmp(str, "reset", 5) == 0) {
  821. dasd_profile_reset(prof);
  822. } else if (strncmp(str, "on", 2) == 0) {
  823. rc = dasd_profile_on(prof);
  824. if (!rc)
  825. rc = user_len;
  826. } else if (strncmp(str, "off", 3) == 0) {
  827. dasd_profile_off(prof);
  828. } else
  829. rc = -EINVAL;
  830. vfree(buffer);
  831. return rc;
  832. }
  833. static void dasd_stats_array(struct seq_file *m, unsigned int *array)
  834. {
  835. int i;
  836. for (i = 0; i < 32; i++)
  837. seq_printf(m, "%u ", array[i]);
  838. seq_putc(m, '\n');
  839. }
  840. static void dasd_stats_seq_print(struct seq_file *m,
  841. struct dasd_profile_info *data)
  842. {
  843. seq_printf(m, "start_time %ld.%09ld\n",
  844. data->starttod.tv_sec, data->starttod.tv_nsec);
  845. seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
  846. seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
  847. seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
  848. seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
  849. seq_printf(m, "histogram_sectors ");
  850. dasd_stats_array(m, data->dasd_io_secs);
  851. seq_printf(m, "histogram_io_times ");
  852. dasd_stats_array(m, data->dasd_io_times);
  853. seq_printf(m, "histogram_io_times_weighted ");
  854. dasd_stats_array(m, data->dasd_io_timps);
  855. seq_printf(m, "histogram_time_build_to_ssch ");
  856. dasd_stats_array(m, data->dasd_io_time1);
  857. seq_printf(m, "histogram_time_ssch_to_irq ");
  858. dasd_stats_array(m, data->dasd_io_time2);
  859. seq_printf(m, "histogram_time_ssch_to_irq_weighted ");
  860. dasd_stats_array(m, data->dasd_io_time2ps);
  861. seq_printf(m, "histogram_time_irq_to_end ");
  862. dasd_stats_array(m, data->dasd_io_time3);
  863. seq_printf(m, "histogram_ccw_queue_length ");
  864. dasd_stats_array(m, data->dasd_io_nr_req);
  865. seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
  866. seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
  867. seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
  868. seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
  869. seq_printf(m, "histogram_read_sectors ");
  870. dasd_stats_array(m, data->dasd_read_secs);
  871. seq_printf(m, "histogram_read_times ");
  872. dasd_stats_array(m, data->dasd_read_times);
  873. seq_printf(m, "histogram_read_time_build_to_ssch ");
  874. dasd_stats_array(m, data->dasd_read_time1);
  875. seq_printf(m, "histogram_read_time_ssch_to_irq ");
  876. dasd_stats_array(m, data->dasd_read_time2);
  877. seq_printf(m, "histogram_read_time_irq_to_end ");
  878. dasd_stats_array(m, data->dasd_read_time3);
  879. seq_printf(m, "histogram_read_ccw_queue_length ");
  880. dasd_stats_array(m, data->dasd_read_nr_req);
  881. }
  882. static int dasd_stats_show(struct seq_file *m, void *v)
  883. {
  884. struct dasd_profile *profile;
  885. struct dasd_profile_info *data;
  886. profile = m->private;
  887. spin_lock_bh(&profile->lock);
  888. data = profile->data;
  889. if (!data) {
  890. spin_unlock_bh(&profile->lock);
  891. seq_printf(m, "disabled\n");
  892. return 0;
  893. }
  894. dasd_stats_seq_print(m, data);
  895. spin_unlock_bh(&profile->lock);
  896. return 0;
  897. }
  898. static int dasd_stats_open(struct inode *inode, struct file *file)
  899. {
  900. struct dasd_profile *profile = inode->i_private;
  901. return single_open(file, dasd_stats_show, profile);
  902. }
  903. static const struct file_operations dasd_stats_raw_fops = {
  904. .owner = THIS_MODULE,
  905. .open = dasd_stats_open,
  906. .read = seq_read,
  907. .llseek = seq_lseek,
  908. .release = single_release,
  909. .write = dasd_stats_write,
  910. };
  911. static ssize_t dasd_stats_global_write(struct file *file,
  912. const char __user *user_buf,
  913. size_t user_len, loff_t *pos)
  914. {
  915. char *buffer, *str;
  916. ssize_t rc;
  917. if (user_len > 65536)
  918. user_len = 65536;
  919. buffer = dasd_get_user_string(user_buf, user_len);
  920. if (IS_ERR(buffer))
  921. return PTR_ERR(buffer);
  922. str = skip_spaces(buffer);
  923. rc = user_len;
  924. if (strncmp(str, "reset", 5) == 0) {
  925. dasd_global_profile_reset();
  926. } else if (strncmp(str, "on", 2) == 0) {
  927. dasd_global_profile_reset();
  928. dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
  929. } else if (strncmp(str, "off", 3) == 0) {
  930. dasd_global_profile_level = DASD_PROFILE_OFF;
  931. } else
  932. rc = -EINVAL;
  933. vfree(buffer);
  934. return rc;
  935. }
  936. static int dasd_stats_global_show(struct seq_file *m, void *v)
  937. {
  938. if (!dasd_global_profile_level) {
  939. seq_printf(m, "disabled\n");
  940. return 0;
  941. }
  942. dasd_stats_seq_print(m, &dasd_global_profile_data);
  943. return 0;
  944. }
  945. static int dasd_stats_global_open(struct inode *inode, struct file *file)
  946. {
  947. return single_open(file, dasd_stats_global_show, NULL);
  948. }
  949. static const struct file_operations dasd_stats_global_fops = {
  950. .owner = THIS_MODULE,
  951. .open = dasd_stats_global_open,
  952. .read = seq_read,
  953. .llseek = seq_lseek,
  954. .release = single_release,
  955. .write = dasd_stats_global_write,
  956. };
  957. static void dasd_profile_init(struct dasd_profile *profile,
  958. struct dentry *base_dentry)
  959. {
  960. umode_t mode;
  961. struct dentry *pde;
  962. if (!base_dentry)
  963. return;
  964. profile->dentry = NULL;
  965. profile->data = NULL;
  966. mode = (S_IRUSR | S_IWUSR | S_IFREG);
  967. pde = debugfs_create_file("statistics", mode, base_dentry,
  968. profile, &dasd_stats_raw_fops);
  969. if (pde && !IS_ERR(pde))
  970. profile->dentry = pde;
  971. return;
  972. }
  973. static void dasd_profile_exit(struct dasd_profile *profile)
  974. {
  975. dasd_profile_off(profile);
  976. if (profile->dentry) {
  977. debugfs_remove(profile->dentry);
  978. profile->dentry = NULL;
  979. }
  980. }
  981. static void dasd_statistics_removeroot(void)
  982. {
  983. dasd_global_profile_level = DASD_PROFILE_OFF;
  984. if (dasd_global_profile_dentry) {
  985. debugfs_remove(dasd_global_profile_dentry);
  986. dasd_global_profile_dentry = NULL;
  987. }
  988. if (dasd_debugfs_global_entry)
  989. debugfs_remove(dasd_debugfs_global_entry);
  990. if (dasd_debugfs_root_entry)
  991. debugfs_remove(dasd_debugfs_root_entry);
  992. }
  993. static void dasd_statistics_createroot(void)
  994. {
  995. umode_t mode;
  996. struct dentry *pde;
  997. dasd_debugfs_root_entry = NULL;
  998. dasd_debugfs_global_entry = NULL;
  999. dasd_global_profile_dentry = NULL;
  1000. pde = debugfs_create_dir("dasd", NULL);
  1001. if (!pde || IS_ERR(pde))
  1002. goto error;
  1003. dasd_debugfs_root_entry = pde;
  1004. pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
  1005. if (!pde || IS_ERR(pde))
  1006. goto error;
  1007. dasd_debugfs_global_entry = pde;
  1008. mode = (S_IRUSR | S_IWUSR | S_IFREG);
  1009. pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry,
  1010. NULL, &dasd_stats_global_fops);
  1011. if (!pde || IS_ERR(pde))
  1012. goto error;
  1013. dasd_global_profile_dentry = pde;
  1014. return;
  1015. error:
  1016. DBF_EVENT(DBF_ERR, "%s",
  1017. "Creation of the dasd debugfs interface failed");
  1018. dasd_statistics_removeroot();
  1019. return;
  1020. }
  1021. #else
  1022. #define dasd_profile_start(block, cqr, req) do {} while (0)
  1023. #define dasd_profile_end(block, cqr, req) do {} while (0)
  1024. static void dasd_statistics_createroot(void)
  1025. {
  1026. return;
  1027. }
  1028. static void dasd_statistics_removeroot(void)
  1029. {
  1030. return;
  1031. }
  1032. int dasd_stats_generic_show(struct seq_file *m, void *v)
  1033. {
  1034. seq_printf(m, "Statistics are not activated in this kernel\n");
  1035. return 0;
  1036. }
  1037. static void dasd_profile_init(struct dasd_profile *profile,
  1038. struct dentry *base_dentry)
  1039. {
  1040. return;
  1041. }
  1042. static void dasd_profile_exit(struct dasd_profile *profile)
  1043. {
  1044. return;
  1045. }
  1046. int dasd_profile_on(struct dasd_profile *profile)
  1047. {
  1048. return 0;
  1049. }
  1050. #endif /* CONFIG_DASD_PROFILE */
  1051. /*
  1052. * Allocate memory for a channel program with 'cplength' channel
  1053. * command words and 'datasize' additional space. There are two
  1054. * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
  1055. * memory and 2) dasd_smalloc_request uses the static ccw memory
  1056. * that gets allocated for each device.
  1057. */
  1058. struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
  1059. int datasize,
  1060. struct dasd_device *device)
  1061. {
  1062. struct dasd_ccw_req *cqr;
  1063. /* Sanity checks */
  1064. BUG_ON(datasize > PAGE_SIZE ||
  1065. (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
  1066. cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
  1067. if (cqr == NULL)
  1068. return ERR_PTR(-ENOMEM);
  1069. cqr->cpaddr = NULL;
  1070. if (cplength > 0) {
  1071. cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
  1072. GFP_ATOMIC | GFP_DMA);
  1073. if (cqr->cpaddr == NULL) {
  1074. kfree(cqr);
  1075. return ERR_PTR(-ENOMEM);
  1076. }
  1077. }
  1078. cqr->data = NULL;
  1079. if (datasize > 0) {
  1080. cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
  1081. if (cqr->data == NULL) {
  1082. kfree(cqr->cpaddr);
  1083. kfree(cqr);
  1084. return ERR_PTR(-ENOMEM);
  1085. }
  1086. }
  1087. cqr->magic = magic;
  1088. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  1089. dasd_get_device(device);
  1090. return cqr;
  1091. }
  1092. struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
  1093. int datasize,
  1094. struct dasd_device *device)
  1095. {
  1096. unsigned long flags;
  1097. struct dasd_ccw_req *cqr;
  1098. char *data;
  1099. int size;
  1100. size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
  1101. if (cplength > 0)
  1102. size += cplength * sizeof(struct ccw1);
  1103. if (datasize > 0)
  1104. size += datasize;
  1105. spin_lock_irqsave(&device->mem_lock, flags);
  1106. cqr = (struct dasd_ccw_req *)
  1107. dasd_alloc_chunk(&device->ccw_chunks, size);
  1108. spin_unlock_irqrestore(&device->mem_lock, flags);
  1109. if (cqr == NULL)
  1110. return ERR_PTR(-ENOMEM);
  1111. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  1112. data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
  1113. cqr->cpaddr = NULL;
  1114. if (cplength > 0) {
  1115. cqr->cpaddr = (struct ccw1 *) data;
  1116. data += cplength*sizeof(struct ccw1);
  1117. memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
  1118. }
  1119. cqr->data = NULL;
  1120. if (datasize > 0) {
  1121. cqr->data = data;
  1122. memset(cqr->data, 0, datasize);
  1123. }
  1124. cqr->magic = magic;
  1125. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  1126. dasd_get_device(device);
  1127. return cqr;
  1128. }
  1129. /*
  1130. * Free memory of a channel program. This function needs to free all the
  1131. * idal lists that might have been created by dasd_set_cda and the
  1132. * struct dasd_ccw_req itself.
  1133. */
  1134. void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
  1135. {
  1136. #ifdef CONFIG_64BIT
  1137. struct ccw1 *ccw;
  1138. /* Clear any idals used for the request. */
  1139. ccw = cqr->cpaddr;
  1140. do {
  1141. clear_normalized_cda(ccw);
  1142. } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
  1143. #endif
  1144. kfree(cqr->cpaddr);
  1145. kfree(cqr->data);
  1146. kfree(cqr);
  1147. dasd_put_device(device);
  1148. }
  1149. void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
  1150. {
  1151. unsigned long flags;
  1152. spin_lock_irqsave(&device->mem_lock, flags);
  1153. dasd_free_chunk(&device->ccw_chunks, cqr);
  1154. spin_unlock_irqrestore(&device->mem_lock, flags);
  1155. dasd_put_device(device);
  1156. }
  1157. /*
  1158. * Check discipline magic in cqr.
  1159. */
  1160. static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
  1161. {
  1162. struct dasd_device *device;
  1163. if (cqr == NULL)
  1164. return -EINVAL;
  1165. device = cqr->startdev;
  1166. if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
  1167. DBF_DEV_EVENT(DBF_WARNING, device,
  1168. " dasd_ccw_req 0x%08x magic doesn't match"
  1169. " discipline 0x%08x",
  1170. cqr->magic,
  1171. *(unsigned int *) device->discipline->name);
  1172. return -EINVAL;
  1173. }
  1174. return 0;
  1175. }
  1176. /*
  1177. * Terminate the current i/o and set the request to clear_pending.
  1178. * Timer keeps device runnig.
  1179. * ccw_device_clear can fail if the i/o subsystem
  1180. * is in a bad mood.
  1181. */
  1182. int dasd_term_IO(struct dasd_ccw_req *cqr)
  1183. {
  1184. struct dasd_device *device;
  1185. int retries, rc;
  1186. char errorstring[ERRORLENGTH];
  1187. /* Check the cqr */
  1188. rc = dasd_check_cqr(cqr);
  1189. if (rc)
  1190. return rc;
  1191. retries = 0;
  1192. device = (struct dasd_device *) cqr->startdev;
  1193. while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
  1194. rc = ccw_device_clear(device->cdev, (long) cqr);
  1195. switch (rc) {
  1196. case 0: /* termination successful */
  1197. cqr->status = DASD_CQR_CLEAR_PENDING;
  1198. cqr->stopclk = get_clock();
  1199. cqr->starttime = 0;
  1200. DBF_DEV_EVENT(DBF_DEBUG, device,
  1201. "terminate cqr %p successful",
  1202. cqr);
  1203. break;
  1204. case -ENODEV:
  1205. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  1206. "device gone, retry");
  1207. break;
  1208. case -EIO:
  1209. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  1210. "I/O error, retry");
  1211. break;
  1212. case -EINVAL:
  1213. case -EBUSY:
  1214. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  1215. "device busy, retry later");
  1216. break;
  1217. default:
  1218. /* internal error 10 - unknown rc*/
  1219. snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
  1220. dev_err(&device->cdev->dev, "An error occurred in the "
  1221. "DASD device driver, reason=%s\n", errorstring);
  1222. BUG();
  1223. break;
  1224. }
  1225. retries++;
  1226. }
  1227. dasd_schedule_device_bh(device);
  1228. return rc;
  1229. }
  1230. /*
  1231. * Start the i/o. This start_IO can fail if the channel is really busy.
  1232. * In that case set up a timer to start the request later.
  1233. */
  1234. int dasd_start_IO(struct dasd_ccw_req *cqr)
  1235. {
  1236. struct dasd_device *device;
  1237. int rc;
  1238. char errorstring[ERRORLENGTH];
  1239. /* Check the cqr */
  1240. rc = dasd_check_cqr(cqr);
  1241. if (rc) {
  1242. cqr->intrc = rc;
  1243. return rc;
  1244. }
  1245. device = (struct dasd_device *) cqr->startdev;
  1246. if (((cqr->block &&
  1247. test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
  1248. test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
  1249. !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
  1250. DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
  1251. "because of stolen lock", cqr);
  1252. cqr->status = DASD_CQR_ERROR;
  1253. cqr->intrc = -EPERM;
  1254. return -EPERM;
  1255. }
  1256. if (cqr->retries < 0) {
  1257. /* internal error 14 - start_IO run out of retries */
  1258. sprintf(errorstring, "14 %p", cqr);
  1259. dev_err(&device->cdev->dev, "An error occurred in the DASD "
  1260. "device driver, reason=%s\n", errorstring);
  1261. cqr->status = DASD_CQR_ERROR;
  1262. return -EIO;
  1263. }
  1264. cqr->startclk = get_clock();
  1265. cqr->starttime = jiffies;
  1266. cqr->retries--;
  1267. if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
  1268. cqr->lpm &= device->path_data.opm;
  1269. if (!cqr->lpm)
  1270. cqr->lpm = device->path_data.opm;
  1271. }
  1272. if (cqr->cpmode == 1) {
  1273. rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
  1274. (long) cqr, cqr->lpm);
  1275. } else {
  1276. rc = ccw_device_start(device->cdev, cqr->cpaddr,
  1277. (long) cqr, cqr->lpm, 0);
  1278. }
  1279. switch (rc) {
  1280. case 0:
  1281. cqr->status = DASD_CQR_IN_IO;
  1282. break;
  1283. case -EBUSY:
  1284. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1285. "start_IO: device busy, retry later");
  1286. break;
  1287. case -ETIMEDOUT:
  1288. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1289. "start_IO: request timeout, retry later");
  1290. break;
  1291. case -EACCES:
  1292. /* -EACCES indicates that the request used only a subset of the
  1293. * available paths and all these paths are gone. If the lpm of
  1294. * this request was only a subset of the opm (e.g. the ppm) then
  1295. * we just do a retry with all available paths.
  1296. * If we already use the full opm, something is amiss, and we
  1297. * need a full path verification.
  1298. */
  1299. if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
  1300. DBF_DEV_EVENT(DBF_WARNING, device,
  1301. "start_IO: selected paths gone (%x)",
  1302. cqr->lpm);
  1303. } else if (cqr->lpm != device->path_data.opm) {
  1304. cqr->lpm = device->path_data.opm;
  1305. DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
  1306. "start_IO: selected paths gone,"
  1307. " retry on all paths");
  1308. } else {
  1309. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1310. "start_IO: all paths in opm gone,"
  1311. " do path verification");
  1312. dasd_generic_last_path_gone(device);
  1313. device->path_data.opm = 0;
  1314. device->path_data.ppm = 0;
  1315. device->path_data.npm = 0;
  1316. device->path_data.tbvpm =
  1317. ccw_device_get_path_mask(device->cdev);
  1318. }
  1319. break;
  1320. case -ENODEV:
  1321. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1322. "start_IO: -ENODEV device gone, retry");
  1323. break;
  1324. case -EIO:
  1325. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1326. "start_IO: -EIO device gone, retry");
  1327. break;
  1328. case -EINVAL:
  1329. /* most likely caused in power management context */
  1330. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1331. "start_IO: -EINVAL device currently "
  1332. "not accessible");
  1333. break;
  1334. default:
  1335. /* internal error 11 - unknown rc */
  1336. snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
  1337. dev_err(&device->cdev->dev,
  1338. "An error occurred in the DASD device driver, "
  1339. "reason=%s\n", errorstring);
  1340. BUG();
  1341. break;
  1342. }
  1343. cqr->intrc = rc;
  1344. return rc;
  1345. }
  1346. /*
  1347. * Timeout function for dasd devices. This is used for different purposes
  1348. * 1) missing interrupt handler for normal operation
  1349. * 2) delayed start of request where start_IO failed with -EBUSY
  1350. * 3) timeout for missing state change interrupts
  1351. * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
  1352. * DASD_CQR_QUEUED for 2) and 3).
  1353. */
  1354. static void dasd_device_timeout(unsigned long ptr)
  1355. {
  1356. unsigned long flags;
  1357. struct dasd_device *device;
  1358. device = (struct dasd_device *) ptr;
  1359. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1360. /* re-activate request queue */
  1361. dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
  1362. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1363. dasd_schedule_device_bh(device);
  1364. }
  1365. /*
  1366. * Setup timeout for a device in jiffies.
  1367. */
  1368. void dasd_device_set_timer(struct dasd_device *device, int expires)
  1369. {
  1370. if (expires == 0)
  1371. del_timer(&device->timer);
  1372. else
  1373. mod_timer(&device->timer, jiffies + expires);
  1374. }
  1375. /*
  1376. * Clear timeout for a device.
  1377. */
  1378. void dasd_device_clear_timer(struct dasd_device *device)
  1379. {
  1380. del_timer(&device->timer);
  1381. }
  1382. static void dasd_handle_killed_request(struct ccw_device *cdev,
  1383. unsigned long intparm)
  1384. {
  1385. struct dasd_ccw_req *cqr;
  1386. struct dasd_device *device;
  1387. if (!intparm)
  1388. return;
  1389. cqr = (struct dasd_ccw_req *) intparm;
  1390. if (cqr->status != DASD_CQR_IN_IO) {
  1391. DBF_EVENT_DEVID(DBF_DEBUG, cdev,
  1392. "invalid status in handle_killed_request: "
  1393. "%02x", cqr->status);
  1394. return;
  1395. }
  1396. device = dasd_device_from_cdev_locked(cdev);
  1397. if (IS_ERR(device)) {
  1398. DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
  1399. "unable to get device from cdev");
  1400. return;
  1401. }
  1402. if (!cqr->startdev ||
  1403. device != cqr->startdev ||
  1404. strncmp(cqr->startdev->discipline->ebcname,
  1405. (char *) &cqr->magic, 4)) {
  1406. DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
  1407. "invalid device in request");
  1408. dasd_put_device(device);
  1409. return;
  1410. }
  1411. /* Schedule request to be retried. */
  1412. cqr->status = DASD_CQR_QUEUED;
  1413. dasd_device_clear_timer(device);
  1414. dasd_schedule_device_bh(device);
  1415. dasd_put_device(device);
  1416. }
  1417. void dasd_generic_handle_state_change(struct dasd_device *device)
  1418. {
  1419. /* First of all start sense subsystem status request. */
  1420. dasd_eer_snss(device);
  1421. dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
  1422. dasd_schedule_device_bh(device);
  1423. if (device->block)
  1424. dasd_schedule_block_bh(device->block);
  1425. }
  1426. /*
  1427. * Interrupt handler for "normal" ssch-io based dasd devices.
  1428. */
  1429. void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
  1430. struct irb *irb)
  1431. {
  1432. struct dasd_ccw_req *cqr, *next;
  1433. struct dasd_device *device;
  1434. unsigned long long now;
  1435. int expires;
  1436. if (IS_ERR(irb)) {
  1437. switch (PTR_ERR(irb)) {
  1438. case -EIO:
  1439. break;
  1440. case -ETIMEDOUT:
  1441. DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
  1442. "request timed out\n", __func__);
  1443. break;
  1444. default:
  1445. DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
  1446. "unknown error %ld\n", __func__,
  1447. PTR_ERR(irb));
  1448. }
  1449. dasd_handle_killed_request(cdev, intparm);
  1450. return;
  1451. }
  1452. now = get_clock();
  1453. cqr = (struct dasd_ccw_req *) intparm;
  1454. /* check for conditions that should be handled immediately */
  1455. if (!cqr ||
  1456. !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
  1457. scsw_cstat(&irb->scsw) == 0)) {
  1458. if (cqr)
  1459. memcpy(&cqr->irb, irb, sizeof(*irb));
  1460. device = dasd_device_from_cdev_locked(cdev);
  1461. if (IS_ERR(device))
  1462. return;
  1463. /* ignore unsolicited interrupts for DIAG discipline */
  1464. if (device->discipline == dasd_diag_discipline_pointer) {
  1465. dasd_put_device(device);
  1466. return;
  1467. }
  1468. device->discipline->dump_sense_dbf(device, irb, "int");
  1469. if (device->features & DASD_FEATURE_ERPLOG)
  1470. device->discipline->dump_sense(device, cqr, irb);
  1471. device->discipline->check_for_device_change(device, cqr, irb);
  1472. dasd_put_device(device);
  1473. }
  1474. if (!cqr)
  1475. return;
  1476. device = (struct dasd_device *) cqr->startdev;
  1477. if (!device ||
  1478. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  1479. DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
  1480. "invalid device in request");
  1481. return;
  1482. }
  1483. /* Check for clear pending */
  1484. if (cqr->status == DASD_CQR_CLEAR_PENDING &&
  1485. scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
  1486. cqr->status = DASD_CQR_CLEARED;
  1487. dasd_device_clear_timer(device);
  1488. wake_up(&dasd_flush_wq);
  1489. dasd_schedule_device_bh(device);
  1490. return;
  1491. }
  1492. /* check status - the request might have been killed by dyn detach */
  1493. if (cqr->status != DASD_CQR_IN_IO) {
  1494. DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
  1495. "status %02x", dev_name(&cdev->dev), cqr->status);
  1496. return;
  1497. }
  1498. next = NULL;
  1499. expires = 0;
  1500. if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
  1501. scsw_cstat(&irb->scsw) == 0) {
  1502. /* request was completed successfully */
  1503. cqr->status = DASD_CQR_SUCCESS;
  1504. cqr->stopclk = now;
  1505. /* Start first request on queue if possible -> fast_io. */
  1506. if (cqr->devlist.next != &device->ccw_queue) {
  1507. next = list_entry(cqr->devlist.next,
  1508. struct dasd_ccw_req, devlist);
  1509. }
  1510. } else { /* error */
  1511. /*
  1512. * If we don't want complex ERP for this request, then just
  1513. * reset this and retry it in the fastpath
  1514. */
  1515. if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
  1516. cqr->retries > 0) {
  1517. if (cqr->lpm == device->path_data.opm)
  1518. DBF_DEV_EVENT(DBF_DEBUG, device,
  1519. "default ERP in fastpath "
  1520. "(%i retries left)",
  1521. cqr->retries);
  1522. if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
  1523. cqr->lpm = device->path_data.opm;
  1524. cqr->status = DASD_CQR_QUEUED;
  1525. next = cqr;
  1526. } else
  1527. cqr->status = DASD_CQR_ERROR;
  1528. }
  1529. if (next && (next->status == DASD_CQR_QUEUED) &&
  1530. (!device->stopped)) {
  1531. if (device->discipline->start_IO(next) == 0)
  1532. expires = next->expires;
  1533. }
  1534. if (expires != 0)
  1535. dasd_device_set_timer(device, expires);
  1536. else
  1537. dasd_device_clear_timer(device);
  1538. dasd_schedule_device_bh(device);
  1539. }
  1540. enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
  1541. {
  1542. struct dasd_device *device;
  1543. device = dasd_device_from_cdev_locked(cdev);
  1544. if (IS_ERR(device))
  1545. goto out;
  1546. if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
  1547. device->state != device->target ||
  1548. !device->discipline->check_for_device_change){
  1549. dasd_put_device(device);
  1550. goto out;
  1551. }
  1552. if (device->discipline->dump_sense_dbf)
  1553. device->discipline->dump_sense_dbf(device, irb, "uc");
  1554. device->discipline->check_for_device_change(device, NULL, irb);
  1555. dasd_put_device(device);
  1556. out:
  1557. return UC_TODO_RETRY;
  1558. }
  1559. EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
  1560. /*
  1561. * If we have an error on a dasd_block layer request then we cancel
  1562. * and return all further requests from the same dasd_block as well.
  1563. */
  1564. static void __dasd_device_recovery(struct dasd_device *device,
  1565. struct dasd_ccw_req *ref_cqr)
  1566. {
  1567. struct list_head *l, *n;
  1568. struct dasd_ccw_req *cqr;
  1569. /*
  1570. * only requeue request that came from the dasd_block layer
  1571. */
  1572. if (!ref_cqr->block)
  1573. return;
  1574. list_for_each_safe(l, n, &device->ccw_queue) {
  1575. cqr = list_entry(l, struct dasd_ccw_req, devlist);
  1576. if (cqr->status == DASD_CQR_QUEUED &&
  1577. ref_cqr->block == cqr->block) {
  1578. cqr->status = DASD_CQR_CLEARED;
  1579. }
  1580. }
  1581. };
  1582. /*
  1583. * Remove those ccw requests from the queue that need to be returned
  1584. * to the upper layer.
  1585. */
  1586. static void __dasd_device_process_ccw_queue(struct dasd_device *device,
  1587. struct list_head *final_queue)
  1588. {
  1589. struct list_head *l, *n;
  1590. struct dasd_ccw_req *cqr;
  1591. /* Process request with final status. */
  1592. list_for_each_safe(l, n, &device->ccw_queue) {
  1593. cqr = list_entry(l, struct dasd_ccw_req, devlist);
  1594. /* Stop list processing at the first non-final request. */
  1595. if (cqr->status == DASD_CQR_QUEUED ||
  1596. cqr->status == DASD_CQR_IN_IO ||
  1597. cqr->status == DASD_CQR_CLEAR_PENDING)
  1598. break;
  1599. if (cqr->status == DASD_CQR_ERROR) {
  1600. __dasd_device_recovery(device, cqr);
  1601. }
  1602. /* Rechain finished requests to final queue */
  1603. list_move_tail(&cqr->devlist, final_queue);
  1604. }
  1605. }
  1606. /*
  1607. * the cqrs from the final queue are returned to the upper layer
  1608. * by setting a dasd_block state and calling the callback function
  1609. */
  1610. static void __dasd_device_process_final_queue(struct dasd_device *device,
  1611. struct list_head *final_queue)
  1612. {
  1613. struct list_head *l, *n;
  1614. struct dasd_ccw_req *cqr;
  1615. struct dasd_block *block;
  1616. void (*callback)(struct dasd_ccw_req *, void *data);
  1617. void *callback_data;
  1618. char errorstring[ERRORLENGTH];
  1619. list_for_each_safe(l, n, final_queue) {
  1620. cqr = list_entry(l, struct dasd_ccw_req, devlist);
  1621. list_del_init(&cqr->devlist);
  1622. block = cqr->block;
  1623. callback = cqr->callback;
  1624. callback_data = cqr->callback_data;
  1625. if (block)
  1626. spin_lock_bh(&block->queue_lock);
  1627. switch (cqr->status) {
  1628. case DASD_CQR_SUCCESS:
  1629. cqr->status = DASD_CQR_DONE;
  1630. break;
  1631. case DASD_CQR_ERROR:
  1632. cqr->status = DASD_CQR_NEED_ERP;
  1633. break;
  1634. case DASD_CQR_CLEARED:
  1635. cqr->status = DASD_CQR_TERMINATED;
  1636. break;
  1637. default:
  1638. /* internal error 12 - wrong cqr status*/
  1639. snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
  1640. dev_err(&device->cdev->dev,
  1641. "An error occurred in the DASD device driver, "
  1642. "reason=%s\n", errorstring);
  1643. BUG();
  1644. }
  1645. if (cqr->callback != NULL)
  1646. (callback)(cqr, callback_data);
  1647. if (block)
  1648. spin_unlock_bh(&block->queue_lock);
  1649. }
  1650. }
  1651. /*
  1652. * Take a look at the first request on the ccw queue and check
  1653. * if it reached its expire time. If so, terminate the IO.
  1654. */
  1655. static void __dasd_device_check_expire(struct dasd_device *device)
  1656. {
  1657. struct dasd_ccw_req *cqr;
  1658. if (list_empty(&device->ccw_queue))
  1659. return;
  1660. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
  1661. if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
  1662. (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
  1663. if (device->discipline->term_IO(cqr) != 0) {
  1664. /* Hmpf, try again in 5 sec */
  1665. dev_err(&device->cdev->dev,
  1666. "cqr %p timed out (%lus) but cannot be "
  1667. "ended, retrying in 5 s\n",
  1668. cqr, (cqr->expires/HZ));
  1669. cqr->expires += 5*HZ;
  1670. dasd_device_set_timer(device, 5*HZ);
  1671. } else {
  1672. dev_err(&device->cdev->dev,
  1673. "cqr %p timed out (%lus), %i retries "
  1674. "remaining\n", cqr, (cqr->expires/HZ),
  1675. cqr->retries);
  1676. }
  1677. }
  1678. }
  1679. /*
  1680. * Take a look at the first request on the ccw queue and check
  1681. * if it needs to be started.
  1682. */
  1683. static void __dasd_device_start_head(struct dasd_device *device)
  1684. {
  1685. struct dasd_ccw_req *cqr;
  1686. int rc;
  1687. if (list_empty(&device->ccw_queue))
  1688. return;
  1689. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
  1690. if (cqr->status != DASD_CQR_QUEUED)
  1691. return;
  1692. /* when device is stopped, return request to previous layer
  1693. * exception: only the disconnect or unresumed bits are set and the
  1694. * cqr is a path verification request
  1695. */
  1696. if (device->stopped &&
  1697. !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
  1698. && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
  1699. cqr->intrc = -EAGAIN;
  1700. cqr->status = DASD_CQR_CLEARED;
  1701. dasd_schedule_device_bh(device);
  1702. return;
  1703. }
  1704. rc = device->discipline->start_IO(cqr);
  1705. if (rc == 0)
  1706. dasd_device_set_timer(device, cqr->expires);
  1707. else if (rc == -EACCES) {
  1708. dasd_schedule_device_bh(device);
  1709. } else
  1710. /* Hmpf, try again in 1/2 sec */
  1711. dasd_device_set_timer(device, 50);
  1712. }
  1713. static void __dasd_device_check_path_events(struct dasd_device *device)
  1714. {
  1715. int rc;
  1716. if (device->path_data.tbvpm) {
  1717. if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
  1718. DASD_UNRESUMED_PM))
  1719. return;
  1720. rc = device->discipline->verify_path(
  1721. device, device->path_data.tbvpm);
  1722. if (rc)
  1723. dasd_device_set_timer(device, 50);
  1724. else
  1725. device->path_data.tbvpm = 0;
  1726. }
  1727. };
  1728. /*
  1729. * Go through all request on the dasd_device request queue,
  1730. * terminate them on the cdev if necessary, and return them to the
  1731. * submitting layer via callback.
  1732. * Note:
  1733. * Make sure that all 'submitting layers' still exist when
  1734. * this function is called!. In other words, when 'device' is a base
  1735. * device then all block layer requests must have been removed before
  1736. * via dasd_flush_block_queue.
  1737. */
  1738. int dasd_flush_device_queue(struct dasd_device *device)
  1739. {
  1740. struct dasd_ccw_req *cqr, *n;
  1741. int rc;
  1742. struct list_head flush_queue;
  1743. INIT_LIST_HEAD(&flush_queue);
  1744. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1745. rc = 0;
  1746. list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
  1747. /* Check status and move request to flush_queue */
  1748. switch (cqr->status) {
  1749. case DASD_CQR_IN_IO:
  1750. rc = device->discipline->term_IO(cqr);
  1751. if (rc) {
  1752. /* unable to terminate requeust */
  1753. dev_err(&device->cdev->dev,
  1754. "Flushing the DASD request queue "
  1755. "failed for request %p\n", cqr);
  1756. /* stop flush processing */
  1757. goto finished;
  1758. }
  1759. break;
  1760. case DASD_CQR_QUEUED:
  1761. cqr->stopclk = get_clock();
  1762. cqr->status = DASD_CQR_CLEARED;
  1763. break;
  1764. default: /* no need to modify the others */
  1765. break;
  1766. }
  1767. list_move_tail(&cqr->devlist, &flush_queue);
  1768. }
  1769. finished:
  1770. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1771. /*
  1772. * After this point all requests must be in state CLEAR_PENDING,
  1773. * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
  1774. * one of the others.
  1775. */
  1776. list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
  1777. wait_event(dasd_flush_wq,
  1778. (cqr->status != DASD_CQR_CLEAR_PENDING));
  1779. /*
  1780. * Now set each request back to TERMINATED, DONE or NEED_ERP
  1781. * and call the callback function of flushed requests
  1782. */
  1783. __dasd_device_process_final_queue(device, &flush_queue);
  1784. return rc;
  1785. }
  1786. /*
  1787. * Acquire the device lock and process queues for the device.
  1788. */
  1789. static void dasd_device_tasklet(struct dasd_device *device)
  1790. {
  1791. struct list_head final_queue;
  1792. atomic_set (&device->tasklet_scheduled, 0);
  1793. INIT_LIST_HEAD(&final_queue);
  1794. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1795. /* Check expire time of first request on the ccw queue. */
  1796. __dasd_device_check_expire(device);
  1797. /* find final requests on ccw queue */
  1798. __dasd_device_process_ccw_queue(device, &final_queue);
  1799. __dasd_device_check_path_events(device);
  1800. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1801. /* Now call the callback function of requests with final status */
  1802. __dasd_device_process_final_queue(device, &final_queue);
  1803. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1804. /* Now check if the head of the ccw queue needs to be started. */
  1805. __dasd_device_start_head(device);
  1806. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1807. dasd_put_device(device);
  1808. }
  1809. /*
  1810. * Schedules a call to dasd_tasklet over the device tasklet.
  1811. */
  1812. void dasd_schedule_device_bh(struct dasd_device *device)
  1813. {
  1814. /* Protect against rescheduling. */
  1815. if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
  1816. return;
  1817. dasd_get_device(device);
  1818. tasklet_hi_schedule(&device->tasklet);
  1819. }
  1820. void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
  1821. {
  1822. device->stopped |= bits;
  1823. }
  1824. EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
  1825. void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
  1826. {
  1827. device->stopped &= ~bits;
  1828. if (!device->stopped)
  1829. wake_up(&generic_waitq);
  1830. }
  1831. EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
  1832. /*
  1833. * Queue a request to the head of the device ccw_queue.
  1834. * Start the I/O if possible.
  1835. */
  1836. void dasd_add_request_head(struct dasd_ccw_req *cqr)
  1837. {
  1838. struct dasd_device *device;
  1839. unsigned long flags;
  1840. device = cqr->startdev;
  1841. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1842. cqr->status = DASD_CQR_QUEUED;
  1843. list_add(&cqr->devlist, &device->ccw_queue);
  1844. /* let the bh start the request to keep them in order */
  1845. dasd_schedule_device_bh(device);
  1846. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1847. }
  1848. /*
  1849. * Queue a request to the tail of the device ccw_queue.
  1850. * Start the I/O if possible.
  1851. */
  1852. void dasd_add_request_tail(struct dasd_ccw_req *cqr)
  1853. {
  1854. struct dasd_device *device;
  1855. unsigned long flags;
  1856. device = cqr->startdev;
  1857. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1858. cqr->status = DASD_CQR_QUEUED;
  1859. list_add_tail(&cqr->devlist, &device->ccw_queue);
  1860. /* let the bh start the request to keep them in order */
  1861. dasd_schedule_device_bh(device);
  1862. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1863. }
  1864. /*
  1865. * Wakeup helper for the 'sleep_on' functions.
  1866. */
  1867. void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
  1868. {
  1869. spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
  1870. cqr->callback_data = DASD_SLEEPON_END_TAG;
  1871. spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
  1872. wake_up(&generic_waitq);
  1873. }
  1874. EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
  1875. static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
  1876. {
  1877. struct dasd_device *device;
  1878. int rc;
  1879. device = cqr->startdev;
  1880. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1881. rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
  1882. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1883. return rc;
  1884. }
  1885. /*
  1886. * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
  1887. */
  1888. static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
  1889. {
  1890. struct dasd_device *device;
  1891. dasd_erp_fn_t erp_fn;
  1892. if (cqr->status == DASD_CQR_FILLED)
  1893. return 0;
  1894. device = cqr->startdev;
  1895. if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
  1896. if (cqr->status == DASD_CQR_TERMINATED) {
  1897. device->discipline->handle_terminated_request(cqr);
  1898. return 1;
  1899. }
  1900. if (cqr->status == DASD_CQR_NEED_ERP) {
  1901. erp_fn = device->discipline->erp_action(cqr);
  1902. erp_fn(cqr);
  1903. return 1;
  1904. }
  1905. if (cqr->status == DASD_CQR_FAILED)
  1906. dasd_log_sense(cqr, &cqr->irb);
  1907. if (cqr->refers) {
  1908. __dasd_process_erp(device, cqr);
  1909. return 1;
  1910. }
  1911. }
  1912. return 0;
  1913. }
  1914. static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
  1915. {
  1916. if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
  1917. if (cqr->refers) /* erp is not done yet */
  1918. return 1;
  1919. return ((cqr->status != DASD_CQR_DONE) &&
  1920. (cqr->status != DASD_CQR_FAILED));
  1921. } else
  1922. return (cqr->status == DASD_CQR_FILLED);
  1923. }
  1924. static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
  1925. {
  1926. struct dasd_device *device;
  1927. int rc;
  1928. struct list_head ccw_queue;
  1929. struct dasd_ccw_req *cqr;
  1930. INIT_LIST_HEAD(&ccw_queue);
  1931. maincqr->status = DASD_CQR_FILLED;
  1932. device = maincqr->startdev;
  1933. list_add(&maincqr->blocklist, &ccw_queue);
  1934. for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
  1935. cqr = list_first_entry(&ccw_queue,
  1936. struct dasd_ccw_req, blocklist)) {
  1937. if (__dasd_sleep_on_erp(cqr))
  1938. continue;
  1939. if (cqr->status != DASD_CQR_FILLED) /* could be failed */
  1940. continue;
  1941. if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
  1942. !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
  1943. cqr->status = DASD_CQR_FAILED;
  1944. cqr->intrc = -EPERM;
  1945. continue;
  1946. }
  1947. /* Non-temporary stop condition will trigger fail fast */
  1948. if (device->stopped & ~DASD_STOPPED_PENDING &&
  1949. test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
  1950. (!dasd_eer_enabled(device))) {
  1951. cqr->status = DASD_CQR_FAILED;
  1952. continue;
  1953. }
  1954. /* Don't try to start requests if device is stopped */
  1955. if (interruptible) {
  1956. rc = wait_event_interruptible(
  1957. generic_waitq, !(device->stopped));
  1958. if (rc == -ERESTARTSYS) {
  1959. cqr->status = DASD_CQR_FAILED;
  1960. maincqr->intrc = rc;
  1961. continue;
  1962. }
  1963. } else
  1964. wait_event(generic_waitq, !(device->stopped));
  1965. if (!cqr->callback)
  1966. cqr->callback = dasd_wakeup_cb;
  1967. cqr->callback_data = DASD_SLEEPON_START_TAG;
  1968. dasd_add_request_tail(cqr);
  1969. if (interruptible) {
  1970. rc = wait_event_interruptible(
  1971. generic_waitq, _wait_for_wakeup(cqr));
  1972. if (rc == -ERESTARTSYS) {
  1973. dasd_cancel_req(cqr);
  1974. /* wait (non-interruptible) for final status */
  1975. wait_event(generic_waitq,
  1976. _wait_for_wakeup(cqr));
  1977. cqr->status = DASD_CQR_FAILED;
  1978. maincqr->intrc = rc;
  1979. continue;
  1980. }
  1981. } else
  1982. wait_event(generic_waitq, _wait_for_wakeup(cqr));
  1983. }
  1984. maincqr->endclk = get_clock();
  1985. if ((maincqr->status != DASD_CQR_DONE) &&
  1986. (maincqr->intrc != -ERESTARTSYS))
  1987. dasd_log_sense(maincqr, &maincqr->irb);
  1988. if (maincqr->status == DASD_CQR_DONE)
  1989. rc = 0;
  1990. else if (maincqr->intrc)
  1991. rc = maincqr->intrc;
  1992. else
  1993. rc = -EIO;
  1994. return rc;
  1995. }
  1996. /*
  1997. * Queue a request to the tail of the device ccw_queue and wait for
  1998. * it's completion.
  1999. */
  2000. int dasd_sleep_on(struct dasd_ccw_req *cqr)
  2001. {
  2002. return _dasd_sleep_on(cqr, 0);
  2003. }
  2004. /*
  2005. * Queue a request to the tail of the device ccw_queue and wait
  2006. * interruptible for it's completion.
  2007. */
  2008. int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
  2009. {
  2010. return _dasd_sleep_on(cqr, 1);
  2011. }
  2012. /*
  2013. * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
  2014. * for eckd devices) the currently running request has to be terminated
  2015. * and be put back to status queued, before the special request is added
  2016. * to the head of the queue. Then the special request is waited on normally.
  2017. */
  2018. static inline int _dasd_term_running_cqr(struct dasd_device *device)
  2019. {
  2020. struct dasd_ccw_req *cqr;
  2021. int rc;
  2022. if (list_empty(&device->ccw_queue))
  2023. return 0;
  2024. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
  2025. rc = device->discipline->term_IO(cqr);
  2026. if (!rc)
  2027. /*
  2028. * CQR terminated because a more important request is pending.
  2029. * Undo decreasing of retry counter because this is
  2030. * not an error case.
  2031. */
  2032. cqr->retries++;
  2033. return rc;
  2034. }
  2035. int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
  2036. {
  2037. struct dasd_device *device;
  2038. int rc;
  2039. device = cqr->startdev;
  2040. if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
  2041. !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
  2042. cqr->status = DASD_CQR_FAILED;
  2043. cqr->intrc = -EPERM;
  2044. return -EIO;
  2045. }
  2046. spin_lock_irq(get_ccwdev_lock(device->cdev));
  2047. rc = _dasd_term_running_cqr(device);
  2048. if (rc) {
  2049. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  2050. return rc;
  2051. }
  2052. cqr->callback = dasd_wakeup_cb;
  2053. cqr->callback_data = DASD_SLEEPON_START_TAG;
  2054. cqr->status = DASD_CQR_QUEUED;
  2055. /*
  2056. * add new request as second
  2057. * first the terminated cqr needs to be finished
  2058. */
  2059. list_add(&cqr->devlist, device->ccw_queue.next);
  2060. /* let the bh start the request to keep them in order */
  2061. dasd_schedule_device_bh(device);
  2062. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  2063. wait_event(generic_waitq, _wait_for_wakeup(cqr));
  2064. if (cqr->status == DASD_CQR_DONE)
  2065. rc = 0;
  2066. else if (cqr->intrc)
  2067. rc = cqr->intrc;
  2068. else
  2069. rc = -EIO;
  2070. return rc;
  2071. }
  2072. /*
  2073. * Cancels a request that was started with dasd_sleep_on_req.
  2074. * This is useful to timeout requests. The request will be
  2075. * terminated if it is currently in i/o.
  2076. * Returns 1 if the request has been terminated.
  2077. * 0 if there was no need to terminate the request (not started yet)
  2078. * negative error code if termination failed
  2079. * Cancellation of a request is an asynchronous operation! The calling
  2080. * function has to wait until the request is properly returned via callback.
  2081. */
  2082. int dasd_cancel_req(struct dasd_ccw_req *cqr)
  2083. {
  2084. struct dasd_device *device = cqr->startdev;
  2085. unsigned long flags;
  2086. int rc;
  2087. rc = 0;
  2088. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  2089. switch (cqr->status) {
  2090. case DASD_CQR_QUEUED:
  2091. /* request was not started - just set to cleared */
  2092. cqr->status = DASD_CQR_CLEARED;
  2093. break;
  2094. case DASD_CQR_IN_IO:
  2095. /* request in IO - terminate IO and release again */
  2096. rc = device->discipline->term_IO(cqr);
  2097. if (rc) {
  2098. dev_err(&device->cdev->dev,
  2099. "Cancelling request %p failed with rc=%d\n",
  2100. cqr, rc);
  2101. } else {
  2102. cqr->stopclk = get_clock();
  2103. }
  2104. break;
  2105. default: /* already finished or clear pending - do nothing */
  2106. break;
  2107. }
  2108. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  2109. dasd_schedule_device_bh(device);
  2110. return rc;
  2111. }
  2112. /*
  2113. * SECTION: Operations of the dasd_block layer.
  2114. */
  2115. /*
  2116. * Timeout function for dasd_block. This is used when the block layer
  2117. * is waiting for something that may not come reliably, (e.g. a state
  2118. * change interrupt)
  2119. */
  2120. static void dasd_block_timeout(unsigned long ptr)
  2121. {
  2122. unsigned long flags;
  2123. struct dasd_block *block;
  2124. block = (struct dasd_block *) ptr;
  2125. spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
  2126. /* re-activate request queue */
  2127. dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
  2128. spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
  2129. dasd_schedule_block_bh(block);
  2130. }
  2131. /*
  2132. * Setup timeout for a dasd_block in jiffies.
  2133. */
  2134. void dasd_block_set_timer(struct dasd_block *block, int expires)
  2135. {
  2136. if (expires == 0)
  2137. del_timer(&block->timer);
  2138. else
  2139. mod_timer(&block->timer, jiffies + expires);
  2140. }
  2141. /*
  2142. * Clear timeout for a dasd_block.
  2143. */
  2144. void dasd_block_clear_timer(struct dasd_block *block)
  2145. {
  2146. del_timer(&block->timer);
  2147. }
  2148. /*
  2149. * Process finished error recovery ccw.
  2150. */
  2151. static void __dasd_process_erp(struct dasd_device *device,
  2152. struct dasd_ccw_req *cqr)
  2153. {
  2154. dasd_erp_fn_t erp_fn;
  2155. if (cqr->status == DASD_CQR_DONE)
  2156. DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
  2157. else
  2158. dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
  2159. erp_fn = device->discipline->erp_postaction(cqr);
  2160. erp_fn(cqr);
  2161. }
  2162. /*
  2163. * Fetch requests from the block device queue.
  2164. */
  2165. static void __dasd_process_request_queue(struct dasd_block *block)
  2166. {
  2167. struct request_queue *queue;
  2168. struct request *req;
  2169. struct dasd_ccw_req *cqr;
  2170. struct dasd_device *basedev;
  2171. unsigned long flags;
  2172. queue = block->request_queue;
  2173. basedev = block->base;
  2174. /* No queue ? Then there is nothing to do. */
  2175. if (queue == NULL)
  2176. return;
  2177. /*
  2178. * We requeue request from the block device queue to the ccw
  2179. * queue only in two states. In state DASD_STATE_READY the
  2180. * partition detection is done and we need to requeue requests
  2181. * for that. State DASD_STATE_ONLINE is normal block device
  2182. * operation.
  2183. */
  2184. if (basedev->state < DASD_STATE_READY) {
  2185. while ((req = blk_fetch_request(block->request_queue)))
  2186. __blk_end_request_all(req, -EIO);
  2187. return;
  2188. }
  2189. /* Now we try to fetch requests from the request queue */
  2190. while ((req = blk_peek_request(queue))) {
  2191. if (basedev->features & DASD_FEATURE_READONLY &&
  2192. rq_data_dir(req) == WRITE) {
  2193. DBF_DEV_EVENT(DBF_ERR, basedev,
  2194. "Rejecting write request %p",
  2195. req);
  2196. blk_start_request(req);
  2197. __blk_end_request_all(req, -EIO);
  2198. continue;
  2199. }
  2200. cqr = basedev->discipline->build_cp(basedev, block, req);
  2201. if (IS_ERR(cqr)) {
  2202. if (PTR_ERR(cqr) == -EBUSY)
  2203. break; /* normal end condition */
  2204. if (PTR_ERR(cqr) == -ENOMEM)
  2205. break; /* terminate request queue loop */
  2206. if (PTR_ERR(cqr) == -EAGAIN) {
  2207. /*
  2208. * The current request cannot be build right
  2209. * now, we have to try later. If this request
  2210. * is the head-of-queue we stop the device
  2211. * for 1/2 second.
  2212. */
  2213. if (!list_empty(&block->ccw_queue))
  2214. break;
  2215. spin_lock_irqsave(
  2216. get_ccwdev_lock(basedev->cdev), flags);
  2217. dasd_device_set_stop_bits(basedev,
  2218. DASD_STOPPED_PENDING);
  2219. spin_unlock_irqrestore(
  2220. get_ccwdev_lock(basedev->cdev), flags);
  2221. dasd_block_set_timer(block, HZ/2);
  2222. break;
  2223. }
  2224. DBF_DEV_EVENT(DBF_ERR, basedev,
  2225. "CCW creation failed (rc=%ld) "
  2226. "on request %p",
  2227. PTR_ERR(cqr), req);
  2228. blk_start_request(req);
  2229. __blk_end_request_all(req, -EIO);
  2230. continue;
  2231. }
  2232. /*
  2233. * Note: callback is set to dasd_return_cqr_cb in
  2234. * __dasd_block_start_head to cover erp requests as well
  2235. */
  2236. cqr->callback_data = (void *) req;
  2237. cqr->status = DASD_CQR_FILLED;
  2238. blk_start_request(req);
  2239. list_add_tail(&cqr->blocklist, &block->ccw_queue);
  2240. dasd_profile_start(block, cqr, req);
  2241. }
  2242. }
  2243. static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
  2244. {
  2245. struct request *req;
  2246. int status;
  2247. int error = 0;
  2248. req = (struct request *) cqr->callback_data;
  2249. dasd_profile_end(cqr->block, cqr, req);
  2250. status = cqr->block->base->discipline->free_cp(cqr, req);
  2251. if (status <= 0)
  2252. error = status ? status : -EIO;
  2253. __blk_end_request_all(req, error);
  2254. }
  2255. /*
  2256. * Process ccw request queue.
  2257. */
  2258. static void __dasd_process_block_ccw_queue(struct dasd_block *block,
  2259. struct list_head *final_queue)
  2260. {
  2261. struct list_head *l, *n;
  2262. struct dasd_ccw_req *cqr;
  2263. dasd_erp_fn_t erp_fn;
  2264. unsigned long flags;
  2265. struct dasd_device *base = block->base;
  2266. restart:
  2267. /* Process request with final status. */
  2268. list_for_each_safe(l, n, &block->ccw_queue) {
  2269. cqr = list_entry(l, struct dasd_ccw_req, blocklist);
  2270. if (cqr->status != DASD_CQR_DONE &&
  2271. cqr->status != DASD_CQR_FAILED &&
  2272. cqr->status != DASD_CQR_NEED_ERP &&
  2273. cqr->status != DASD_CQR_TERMINATED)
  2274. continue;
  2275. if (cqr->status == DASD_CQR_TERMINATED) {
  2276. base->discipline->handle_terminated_request(cqr);
  2277. goto restart;
  2278. }
  2279. /* Process requests that may be recovered */
  2280. if (cqr->status == DASD_CQR_NEED_ERP) {
  2281. erp_fn = base->discipline->erp_action(cqr);
  2282. if (IS_ERR(erp_fn(cqr)))
  2283. continue;
  2284. goto restart;
  2285. }
  2286. /* log sense for fatal error */
  2287. if (cqr->status == DASD_CQR_FAILED) {
  2288. dasd_log_sense(cqr, &cqr->irb);
  2289. }
  2290. /* First of all call extended error reporting. */
  2291. if (dasd_eer_enabled(base) &&
  2292. cqr->status == DASD_CQR_FAILED) {
  2293. dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
  2294. /* restart request */
  2295. cqr->status = DASD_CQR_FILLED;
  2296. cqr->retries = 255;
  2297. spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
  2298. dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
  2299. spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
  2300. flags);
  2301. goto restart;
  2302. }
  2303. /* Process finished ERP request. */
  2304. if (cqr->refers) {
  2305. __dasd_process_erp(base, cqr);
  2306. goto restart;
  2307. }
  2308. /* Rechain finished requests to final queue */
  2309. cqr->endclk = get_clock();
  2310. list_move_tail(&cqr->blocklist, final_queue);
  2311. }
  2312. }
  2313. static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
  2314. {
  2315. dasd_schedule_block_bh(cqr->block);
  2316. }
  2317. static void __dasd_block_start_head(struct dasd_block *block)
  2318. {
  2319. struct dasd_ccw_req *cqr;
  2320. if (list_empty(&block->ccw_queue))
  2321. return;
  2322. /* We allways begin with the first requests on the queue, as some
  2323. * of previously started requests have to be enqueued on a
  2324. * dasd_device again for error recovery.
  2325. */
  2326. list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
  2327. if (cqr->status != DASD_CQR_FILLED)
  2328. continue;
  2329. if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
  2330. !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
  2331. cqr->status = DASD_CQR_FAILED;
  2332. cqr->intrc = -EPERM;
  2333. dasd_schedule_block_bh(block);
  2334. continue;
  2335. }
  2336. /* Non-temporary stop condition will trigger fail fast */
  2337. if (block->base->stopped & ~DASD_STOPPED_PENDING &&
  2338. test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
  2339. (!dasd_eer_enabled(block->base))) {
  2340. cqr->status = DASD_CQR_FAILED;
  2341. dasd_schedule_block_bh(block);
  2342. continue;
  2343. }
  2344. /* Don't try to start requests if device is stopped */
  2345. if (block->base->stopped)
  2346. return;
  2347. /* just a fail safe check, should not happen */
  2348. if (!cqr->startdev)
  2349. cqr->startdev = block->base;
  2350. /* make sure that the requests we submit find their way back */
  2351. cqr->callback = dasd_return_cqr_cb;
  2352. dasd_add_request_tail(cqr);
  2353. }
  2354. }
  2355. /*
  2356. * Central dasd_block layer routine. Takes requests from the generic
  2357. * block layer request queue, creates ccw requests, enqueues them on
  2358. * a dasd_device and processes ccw requests that have been returned.
  2359. */
  2360. static void dasd_block_tasklet(struct dasd_block *block)
  2361. {
  2362. struct list_head final_queue;
  2363. struct list_head *l, *n;
  2364. struct dasd_ccw_req *cqr;
  2365. atomic_set(&block->tasklet_scheduled, 0);
  2366. INIT_LIST_HEAD(&final_queue);
  2367. spin_lock(&block->queue_lock);
  2368. /* Finish off requests on ccw queue */
  2369. __dasd_process_block_ccw_queue(block, &final_queue);
  2370. spin_unlock(&block->queue_lock);
  2371. /* Now call the callback function of requests with final status */
  2372. spin_lock_irq(&block->request_queue_lock);
  2373. list_for_each_safe(l, n, &final_queue) {
  2374. cqr = list_entry(l, struct dasd_ccw_req, blocklist);
  2375. list_del_init(&cqr->blocklist);
  2376. __dasd_cleanup_cqr(cqr);
  2377. }
  2378. spin_lock(&block->queue_lock);
  2379. /* Get new request from the block device request queue */
  2380. __dasd_process_request_queue(block);
  2381. /* Now check if the head of the ccw queue needs to be started. */
  2382. __dasd_block_start_head(block);
  2383. spin_unlock(&block->queue_lock);
  2384. spin_unlock_irq(&block->request_queue_lock);
  2385. dasd_put_device(block->base);
  2386. }
  2387. static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
  2388. {
  2389. wake_up(&dasd_flush_wq);
  2390. }
  2391. /*
  2392. * Go through all request on the dasd_block request queue, cancel them
  2393. * on the respective dasd_device, and return them to the generic
  2394. * block layer.
  2395. */
  2396. static int dasd_flush_block_queue(struct dasd_block *block)
  2397. {
  2398. struct dasd_ccw_req *cqr, *n;
  2399. int rc, i;
  2400. struct list_head flush_queue;
  2401. INIT_LIST_HEAD(&flush_queue);
  2402. spin_lock_bh(&block->queue_lock);
  2403. rc = 0;
  2404. restart:
  2405. list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
  2406. /* if this request currently owned by a dasd_device cancel it */
  2407. if (cqr->status >= DASD_CQR_QUEUED)
  2408. rc = dasd_cancel_req(cqr);
  2409. if (rc < 0)
  2410. break;
  2411. /* Rechain request (including erp chain) so it won't be
  2412. * touched by the dasd_block_tasklet anymore.
  2413. * Replace the callback so we notice when the request
  2414. * is returned from the dasd_device layer.
  2415. */
  2416. cqr->callback = _dasd_wake_block_flush_cb;
  2417. for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
  2418. list_move_tail(&cqr->blocklist, &flush_queue);
  2419. if (i > 1)
  2420. /* moved more than one request - need to restart */
  2421. goto restart;
  2422. }
  2423. spin_unlock_bh(&block->queue_lock);
  2424. /* Now call the callback function of flushed requests */
  2425. restart_cb:
  2426. list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
  2427. wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
  2428. /* Process finished ERP request. */
  2429. if (cqr->refers) {
  2430. spin_lock_bh(&block->queue_lock);
  2431. __dasd_process_erp(block->base, cqr);
  2432. spin_unlock_bh(&block->queue_lock);
  2433. /* restart list_for_xx loop since dasd_process_erp
  2434. * might remove multiple elements */
  2435. goto restart_cb;
  2436. }
  2437. /* call the callback function */
  2438. spin_lock_irq(&block->request_queue_lock);
  2439. cqr->endclk = get_clock();
  2440. list_del_init(&cqr->blocklist);
  2441. __dasd_cleanup_cqr(cqr);
  2442. spin_unlock_irq(&block->request_queue_lock);
  2443. }
  2444. return rc;
  2445. }
  2446. /*
  2447. * Schedules a call to dasd_tasklet over the device tasklet.
  2448. */
  2449. void dasd_schedule_block_bh(struct dasd_block *block)
  2450. {
  2451. /* Protect against rescheduling. */
  2452. if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
  2453. return;
  2454. /* life cycle of block is bound to it's base device */
  2455. dasd_get_device(block->base);
  2456. tasklet_hi_schedule(&block->tasklet);
  2457. }
  2458. /*
  2459. * SECTION: external block device operations
  2460. * (request queue handling, open, release, etc.)
  2461. */
  2462. /*
  2463. * Dasd request queue function. Called from ll_rw_blk.c
  2464. */
  2465. static void do_dasd_request(struct request_queue *queue)
  2466. {
  2467. struct dasd_block *block;
  2468. block = queue->queuedata;
  2469. spin_lock(&block->queue_lock);
  2470. /* Get new request from the block device request queue */
  2471. __dasd_process_request_queue(block);
  2472. /* Now check if the head of the ccw queue needs to be started. */
  2473. __dasd_block_start_head(block);
  2474. spin_unlock(&block->queue_lock);
  2475. }
  2476. /*
  2477. * Allocate and initialize request queue and default I/O scheduler.
  2478. */
  2479. static int dasd_alloc_queue(struct dasd_block *block)
  2480. {
  2481. int rc;
  2482. block->request_queue = blk_init_queue(do_dasd_request,
  2483. &block->request_queue_lock);
  2484. if (block->request_queue == NULL)
  2485. return -ENOMEM;
  2486. block->request_queue->queuedata = block;
  2487. elevator_exit(block->request_queue->elevator);
  2488. block->request_queue->elevator = NULL;
  2489. rc = elevator_init(block->request_queue, "deadline");
  2490. if (rc) {
  2491. blk_cleanup_queue(block->request_queue);
  2492. return rc;
  2493. }
  2494. return 0;
  2495. }
  2496. /*
  2497. * Allocate and initialize request queue.
  2498. */
  2499. static void dasd_setup_queue(struct dasd_block *block)
  2500. {
  2501. int max;
  2502. if (block->base->features & DASD_FEATURE_USERAW) {
  2503. /*
  2504. * the max_blocks value for raw_track access is 256
  2505. * it is higher than the native ECKD value because we
  2506. * only need one ccw per track
  2507. * so the max_hw_sectors are
  2508. * 2048 x 512B = 1024kB = 16 tracks
  2509. */
  2510. max = 2048;
  2511. } else {
  2512. max = block->base->discipline->max_blocks << block->s2b_shift;
  2513. }
  2514. blk_queue_logical_block_size(block->request_queue,
  2515. block->bp_block);
  2516. blk_queue_max_hw_sectors(block->request_queue, max);
  2517. blk_queue_max_segments(block->request_queue, -1L);
  2518. /* with page sized segments we can translate each segement into
  2519. * one idaw/tidaw
  2520. */
  2521. blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
  2522. blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
  2523. }
  2524. /*
  2525. * Deactivate and free request queue.
  2526. */
  2527. static void dasd_free_queue(struct dasd_block *block)
  2528. {
  2529. if (block->request_queue) {
  2530. blk_cleanup_queue(block->request_queue);
  2531. block->request_queue = NULL;
  2532. }
  2533. }
  2534. /*
  2535. * Flush request on the request queue.
  2536. */
  2537. static void dasd_flush_request_queue(struct dasd_block *block)
  2538. {
  2539. struct request *req;
  2540. if (!block->request_queue)
  2541. return;
  2542. spin_lock_irq(&block->request_queue_lock);
  2543. while ((req = blk_fetch_request(block->request_queue)))
  2544. __blk_end_request_all(req, -EIO);
  2545. spin_unlock_irq(&block->request_queue_lock);
  2546. }
  2547. static int dasd_open(struct block_device *bdev, fmode_t mode)
  2548. {
  2549. struct dasd_device *base;
  2550. int rc;
  2551. base = dasd_device_from_gendisk(bdev->bd_disk);
  2552. if (!base)
  2553. return -ENODEV;
  2554. atomic_inc(&base->block->open_count);
  2555. if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
  2556. rc = -ENODEV;
  2557. goto unlock;
  2558. }
  2559. if (!try_module_get(base->discipline->owner)) {
  2560. rc = -EINVAL;
  2561. goto unlock;
  2562. }
  2563. if (dasd_probeonly) {
  2564. dev_info(&base->cdev->dev,
  2565. "Accessing the DASD failed because it is in "
  2566. "probeonly mode\n");
  2567. rc = -EPERM;
  2568. goto out;
  2569. }
  2570. if (base->state <= DASD_STATE_BASIC) {
  2571. DBF_DEV_EVENT(DBF_ERR, base, " %s",
  2572. " Cannot open unrecognized device");
  2573. rc = -ENODEV;
  2574. goto out;
  2575. }
  2576. if ((mode & FMODE_WRITE) &&
  2577. (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
  2578. (base->features & DASD_FEATURE_READONLY))) {
  2579. rc = -EROFS;
  2580. goto out;
  2581. }
  2582. dasd_put_device(base);
  2583. return 0;
  2584. out:
  2585. module_put(base->discipline->owner);
  2586. unlock:
  2587. atomic_dec(&base->block->open_count);
  2588. dasd_put_device(base);
  2589. return rc;
  2590. }
  2591. static int dasd_release(struct gendisk *disk, fmode_t mode)
  2592. {
  2593. struct dasd_device *base;
  2594. base = dasd_device_from_gendisk(disk);
  2595. if (!base)
  2596. return -ENODEV;
  2597. atomic_dec(&base->block->open_count);
  2598. module_put(base->discipline->owner);
  2599. dasd_put_device(base);
  2600. return 0;
  2601. }
  2602. /*
  2603. * Return disk geometry.
  2604. */
  2605. static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  2606. {
  2607. struct dasd_device *base;
  2608. base = dasd_device_from_gendisk(bdev->bd_disk);
  2609. if (!base)
  2610. return -ENODEV;
  2611. if (!base->discipline ||
  2612. !base->discipline->fill_geometry) {
  2613. dasd_put_device(base);
  2614. return -EINVAL;
  2615. }
  2616. base->discipline->fill_geometry(base->block, geo);
  2617. geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
  2618. dasd_put_device(base);
  2619. return 0;
  2620. }
  2621. const struct block_device_operations
  2622. dasd_device_operations = {
  2623. .owner = THIS_MODULE,
  2624. .open = dasd_open,
  2625. .release = dasd_release,
  2626. .ioctl = dasd_ioctl,
  2627. .compat_ioctl = dasd_ioctl,
  2628. .getgeo = dasd_getgeo,
  2629. };
  2630. /*******************************************************************************
  2631. * end of block device operations
  2632. */
  2633. static void
  2634. dasd_exit(void)
  2635. {
  2636. #ifdef CONFIG_PROC_FS
  2637. dasd_proc_exit();
  2638. #endif
  2639. dasd_eer_exit();
  2640. if (dasd_page_cache != NULL) {
  2641. kmem_cache_destroy(dasd_page_cache);
  2642. dasd_page_cache = NULL;
  2643. }
  2644. dasd_gendisk_exit();
  2645. dasd_devmap_exit();
  2646. if (dasd_debug_area != NULL) {
  2647. debug_unregister(dasd_debug_area);
  2648. dasd_debug_area = NULL;
  2649. }
  2650. dasd_statistics_removeroot();
  2651. }
  2652. /*
  2653. * SECTION: common functions for ccw_driver use
  2654. */
  2655. /*
  2656. * Is the device read-only?
  2657. * Note that this function does not report the setting of the
  2658. * readonly device attribute, but how it is configured in z/VM.
  2659. */
  2660. int dasd_device_is_ro(struct dasd_device *device)
  2661. {
  2662. struct ccw_dev_id dev_id;
  2663. struct diag210 diag_data;
  2664. int rc;
  2665. if (!MACHINE_IS_VM)
  2666. return 0;
  2667. ccw_device_get_id(device->cdev, &dev_id);
  2668. memset(&diag_data, 0, sizeof(diag_data));
  2669. diag_data.vrdcdvno = dev_id.devno;
  2670. diag_data.vrdclen = sizeof(diag_data);
  2671. rc = diag210(&diag_data);
  2672. if (rc == 0 || rc == 2) {
  2673. return diag_data.vrdcvfla & 0x80;
  2674. } else {
  2675. DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
  2676. dev_id.devno, rc);
  2677. return 0;
  2678. }
  2679. }
  2680. EXPORT_SYMBOL_GPL(dasd_device_is_ro);
  2681. static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
  2682. {
  2683. struct ccw_device *cdev = data;
  2684. int ret;
  2685. ret = ccw_device_set_online(cdev);
  2686. if (ret)
  2687. pr_warning("%s: Setting the DASD online failed with rc=%d\n",
  2688. dev_name(&cdev->dev), ret);
  2689. }
  2690. /*
  2691. * Initial attempt at a probe function. this can be simplified once
  2692. * the other detection code is gone.
  2693. */
  2694. int dasd_generic_probe(struct ccw_device *cdev,
  2695. struct dasd_discipline *discipline)
  2696. {
  2697. int ret;
  2698. ret = dasd_add_sysfs_files(cdev);
  2699. if (ret) {
  2700. DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
  2701. "dasd_generic_probe: could not add "
  2702. "sysfs entries");
  2703. return ret;
  2704. }
  2705. cdev->handler = &dasd_int_handler;
  2706. /*
  2707. * Automatically online either all dasd devices (dasd_autodetect)
  2708. * or all devices specified with dasd= parameters during
  2709. * initial probe.
  2710. */
  2711. if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
  2712. (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
  2713. async_schedule(dasd_generic_auto_online, cdev);
  2714. return 0;
  2715. }
  2716. /*
  2717. * This will one day be called from a global not_oper handler.
  2718. * It is also used by driver_unregister during module unload.
  2719. */
  2720. void dasd_generic_remove(struct ccw_device *cdev)
  2721. {
  2722. struct dasd_device *device;
  2723. struct dasd_block *block;
  2724. cdev->handler = NULL;
  2725. dasd_remove_sysfs_files(cdev);
  2726. device = dasd_device_from_cdev(cdev);
  2727. if (IS_ERR(device))
  2728. return;
  2729. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  2730. /* Already doing offline processing */
  2731. dasd_put_device(device);
  2732. return;
  2733. }
  2734. /*
  2735. * This device is removed unconditionally. Set offline
  2736. * flag to prevent dasd_open from opening it while it is
  2737. * no quite down yet.
  2738. */
  2739. dasd_set_target_state(device, DASD_STATE_NEW);
  2740. /* dasd_delete_device destroys the device reference. */
  2741. block = device->block;
  2742. dasd_delete_device(device);
  2743. /*
  2744. * life cycle of block is bound to device, so delete it after
  2745. * device was safely removed
  2746. */
  2747. if (block)
  2748. dasd_free_block(block);
  2749. }
  2750. /*
  2751. * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
  2752. * the device is detected for the first time and is supposed to be used
  2753. * or the user has started activation through sysfs.
  2754. */
  2755. int dasd_generic_set_online(struct ccw_device *cdev,
  2756. struct dasd_discipline *base_discipline)
  2757. {
  2758. struct dasd_discipline *discipline;
  2759. struct dasd_device *device;
  2760. int rc;
  2761. /* first online clears initial online feature flag */
  2762. dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
  2763. device = dasd_create_device(cdev);
  2764. if (IS_ERR(device))
  2765. return PTR_ERR(device);
  2766. discipline = base_discipline;
  2767. if (device->features & DASD_FEATURE_USEDIAG) {
  2768. if (!dasd_diag_discipline_pointer) {
  2769. pr_warning("%s Setting the DASD online failed because "
  2770. "of missing DIAG discipline\n",
  2771. dev_name(&cdev->dev));
  2772. dasd_delete_device(device);
  2773. return -ENODEV;
  2774. }
  2775. discipline = dasd_diag_discipline_pointer;
  2776. }
  2777. if (!try_module_get(base_discipline->owner)) {
  2778. dasd_delete_device(device);
  2779. return -EINVAL;
  2780. }
  2781. if (!try_module_get(discipline->owner)) {
  2782. module_put(base_discipline->owner);
  2783. dasd_delete_device(device);
  2784. return -EINVAL;
  2785. }
  2786. device->base_discipline = base_discipline;
  2787. device->discipline = discipline;
  2788. /* check_device will allocate block device if necessary */
  2789. rc = discipline->check_device(device);
  2790. if (rc) {
  2791. pr_warning("%s Setting the DASD online with discipline %s "
  2792. "failed with rc=%i\n",
  2793. dev_name(&cdev->dev), discipline->name, rc);
  2794. module_put(discipline->owner);
  2795. module_put(base_discipline->owner);
  2796. dasd_delete_device(device);
  2797. return rc;
  2798. }
  2799. dasd_set_target_state(device, DASD_STATE_ONLINE);
  2800. if (device->state <= DASD_STATE_KNOWN) {
  2801. pr_warning("%s Setting the DASD online failed because of a "
  2802. "missing discipline\n", dev_name(&cdev->dev));
  2803. rc = -ENODEV;
  2804. dasd_set_target_state(device, DASD_STATE_NEW);
  2805. if (device->block)
  2806. dasd_free_block(device->block);
  2807. dasd_delete_device(device);
  2808. } else
  2809. pr_debug("dasd_generic device %s found\n",
  2810. dev_name(&cdev->dev));
  2811. wait_event(dasd_init_waitq, _wait_for_device(device));
  2812. dasd_put_device(device);
  2813. return rc;
  2814. }
  2815. int dasd_generic_set_offline(struct ccw_device *cdev)
  2816. {
  2817. struct dasd_device *device;
  2818. struct dasd_block *block;
  2819. int max_count, open_count;
  2820. device = dasd_device_from_cdev(cdev);
  2821. if (IS_ERR(device))
  2822. return PTR_ERR(device);
  2823. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  2824. /* Already doing offline processing */
  2825. dasd_put_device(device);
  2826. return 0;
  2827. }
  2828. /*
  2829. * We must make sure that this device is currently not in use.
  2830. * The open_count is increased for every opener, that includes
  2831. * the blkdev_get in dasd_scan_partitions. We are only interested
  2832. * in the other openers.
  2833. */
  2834. if (device->block) {
  2835. max_count = device->block->bdev ? 0 : -1;
  2836. open_count = atomic_read(&device->block->open_count);
  2837. if (open_count > max_count) {
  2838. if (open_count > 0)
  2839. pr_warning("%s: The DASD cannot be set offline "
  2840. "with open count %i\n",
  2841. dev_name(&cdev->dev), open_count);
  2842. else
  2843. pr_warning("%s: The DASD cannot be set offline "
  2844. "while it is in use\n",
  2845. dev_name(&cdev->dev));
  2846. clear_bit(DASD_FLAG_OFFLINE, &device->flags);
  2847. dasd_put_device(device);
  2848. return -EBUSY;
  2849. }
  2850. }
  2851. dasd_set_target_state(device, DASD_STATE_NEW);
  2852. /* dasd_delete_device destroys the device reference. */
  2853. block = device->block;
  2854. dasd_delete_device(device);
  2855. /*
  2856. * life cycle of block is bound to device, so delete it after
  2857. * device was safely removed
  2858. */
  2859. if (block)
  2860. dasd_free_block(block);
  2861. return 0;
  2862. }
  2863. int dasd_generic_last_path_gone(struct dasd_device *device)
  2864. {
  2865. struct dasd_ccw_req *cqr;
  2866. dev_warn(&device->cdev->dev, "No operational channel path is left "
  2867. "for the device\n");
  2868. DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
  2869. /* First of all call extended error reporting. */
  2870. dasd_eer_write(device, NULL, DASD_EER_NOPATH);
  2871. if (device->state < DASD_STATE_BASIC)
  2872. return 0;
  2873. /* Device is active. We want to keep it. */
  2874. list_for_each_entry(cqr, &device->ccw_queue, devlist)
  2875. if ((cqr->status == DASD_CQR_IN_IO) ||
  2876. (cqr->status == DASD_CQR_CLEAR_PENDING)) {
  2877. cqr->status = DASD_CQR_QUEUED;
  2878. cqr->retries++;
  2879. }
  2880. dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
  2881. dasd_device_clear_timer(device);
  2882. dasd_schedule_device_bh(device);
  2883. return 1;
  2884. }
  2885. EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
  2886. int dasd_generic_path_operational(struct dasd_device *device)
  2887. {
  2888. dev_info(&device->cdev->dev, "A channel path to the device has become "
  2889. "operational\n");
  2890. DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
  2891. dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
  2892. if (device->stopped & DASD_UNRESUMED_PM) {
  2893. dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
  2894. dasd_restore_device(device);
  2895. return 1;
  2896. }
  2897. dasd_schedule_device_bh(device);
  2898. if (device->block)
  2899. dasd_schedule_block_bh(device->block);
  2900. return 1;
  2901. }
  2902. EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
  2903. int dasd_generic_notify(struct ccw_device *cdev, int event)
  2904. {
  2905. struct dasd_device *device;
  2906. int ret;
  2907. device = dasd_device_from_cdev_locked(cdev);
  2908. if (IS_ERR(device))
  2909. return 0;
  2910. ret = 0;
  2911. switch (event) {
  2912. case CIO_GONE:
  2913. case CIO_BOXED:
  2914. case CIO_NO_PATH:
  2915. device->path_data.opm = 0;
  2916. device->path_data.ppm = 0;
  2917. device->path_data.npm = 0;
  2918. ret = dasd_generic_last_path_gone(device);
  2919. break;
  2920. case CIO_OPER:
  2921. ret = 1;
  2922. if (device->path_data.opm)
  2923. ret = dasd_generic_path_operational(device);
  2924. break;
  2925. }
  2926. dasd_put_device(device);
  2927. return ret;
  2928. }
  2929. void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
  2930. {
  2931. int chp;
  2932. __u8 oldopm, eventlpm;
  2933. struct dasd_device *device;
  2934. device = dasd_device_from_cdev_locked(cdev);
  2935. if (IS_ERR(device))
  2936. return;
  2937. for (chp = 0; chp < 8; chp++) {
  2938. eventlpm = 0x80 >> chp;
  2939. if (path_event[chp] & PE_PATH_GONE) {
  2940. oldopm = device->path_data.opm;
  2941. device->path_data.opm &= ~eventlpm;
  2942. device->path_data.ppm &= ~eventlpm;
  2943. device->path_data.npm &= ~eventlpm;
  2944. if (oldopm && !device->path_data.opm)
  2945. dasd_generic_last_path_gone(device);
  2946. }
  2947. if (path_event[chp] & PE_PATH_AVAILABLE) {
  2948. device->path_data.opm &= ~eventlpm;
  2949. device->path_data.ppm &= ~eventlpm;
  2950. device->path_data.npm &= ~eventlpm;
  2951. device->path_data.tbvpm |= eventlpm;
  2952. dasd_schedule_device_bh(device);
  2953. }
  2954. if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
  2955. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  2956. "Pathgroup re-established\n");
  2957. if (device->discipline->kick_validate)
  2958. device->discipline->kick_validate(device);
  2959. }
  2960. }
  2961. dasd_put_device(device);
  2962. }
  2963. EXPORT_SYMBOL_GPL(dasd_generic_path_event);
  2964. int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
  2965. {
  2966. if (!device->path_data.opm && lpm) {
  2967. device->path_data.opm = lpm;
  2968. dasd_generic_path_operational(device);
  2969. } else
  2970. device->path_data.opm |= lpm;
  2971. return 0;
  2972. }
  2973. EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
  2974. int dasd_generic_pm_freeze(struct ccw_device *cdev)
  2975. {
  2976. struct dasd_ccw_req *cqr, *n;
  2977. int rc;
  2978. struct list_head freeze_queue;
  2979. struct dasd_device *device = dasd_device_from_cdev(cdev);
  2980. if (IS_ERR(device))
  2981. return PTR_ERR(device);
  2982. /* mark device as suspended */
  2983. set_bit(DASD_FLAG_SUSPENDED, &device->flags);
  2984. if (device->discipline->freeze)
  2985. rc = device->discipline->freeze(device);
  2986. /* disallow new I/O */
  2987. dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
  2988. /* clear active requests */
  2989. INIT_LIST_HEAD(&freeze_queue);
  2990. spin_lock_irq(get_ccwdev_lock(cdev));
  2991. rc = 0;
  2992. list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
  2993. /* Check status and move request to flush_queue */
  2994. if (cqr->status == DASD_CQR_IN_IO) {
  2995. rc = device->discipline->term_IO(cqr);
  2996. if (rc) {
  2997. /* unable to terminate requeust */
  2998. dev_err(&device->cdev->dev,
  2999. "Unable to terminate request %p "
  3000. "on suspend\n", cqr);
  3001. spin_unlock_irq(get_ccwdev_lock(cdev));
  3002. dasd_put_device(device);
  3003. return rc;
  3004. }
  3005. }
  3006. list_move_tail(&cqr->devlist, &freeze_queue);
  3007. }
  3008. spin_unlock_irq(get_ccwdev_lock(cdev));
  3009. list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
  3010. wait_event(dasd_flush_wq,
  3011. (cqr->status != DASD_CQR_CLEAR_PENDING));
  3012. if (cqr->status == DASD_CQR_CLEARED)
  3013. cqr->status = DASD_CQR_QUEUED;
  3014. }
  3015. /* move freeze_queue to start of the ccw_queue */
  3016. spin_lock_irq(get_ccwdev_lock(cdev));
  3017. list_splice_tail(&freeze_queue, &device->ccw_queue);
  3018. spin_unlock_irq(get_ccwdev_lock(cdev));
  3019. dasd_put_device(device);
  3020. return rc;
  3021. }
  3022. EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
  3023. int dasd_generic_restore_device(struct ccw_device *cdev)
  3024. {
  3025. struct dasd_device *device = dasd_device_from_cdev(cdev);
  3026. int rc = 0;
  3027. if (IS_ERR(device))
  3028. return PTR_ERR(device);
  3029. /* allow new IO again */
  3030. dasd_device_remove_stop_bits(device,
  3031. (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
  3032. dasd_schedule_device_bh(device);
  3033. /*
  3034. * call discipline restore function
  3035. * if device is stopped do nothing e.g. for disconnected devices
  3036. */
  3037. if (device->discipline->restore && !(device->stopped))
  3038. rc = device->discipline->restore(device);
  3039. if (rc || device->stopped)
  3040. /*
  3041. * if the resume failed for the DASD we put it in
  3042. * an UNRESUMED stop state
  3043. */
  3044. device->stopped |= DASD_UNRESUMED_PM;
  3045. if (device->block)
  3046. dasd_schedule_block_bh(device->block);
  3047. clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
  3048. dasd_put_device(device);
  3049. return 0;
  3050. }
  3051. EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
  3052. static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
  3053. void *rdc_buffer,
  3054. int rdc_buffer_size,
  3055. int magic)
  3056. {
  3057. struct dasd_ccw_req *cqr;
  3058. struct ccw1 *ccw;
  3059. unsigned long *idaw;
  3060. cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
  3061. if (IS_ERR(cqr)) {
  3062. /* internal error 13 - Allocating the RDC request failed*/
  3063. dev_err(&device->cdev->dev,
  3064. "An error occurred in the DASD device driver, "
  3065. "reason=%s\n", "13");
  3066. return cqr;
  3067. }
  3068. ccw = cqr->cpaddr;
  3069. ccw->cmd_code = CCW_CMD_RDC;
  3070. if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
  3071. idaw = (unsigned long *) (cqr->data);
  3072. ccw->cda = (__u32)(addr_t) idaw;
  3073. ccw->flags = CCW_FLAG_IDA;
  3074. idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
  3075. } else {
  3076. ccw->cda = (__u32)(addr_t) rdc_buffer;
  3077. ccw->flags = 0;
  3078. }
  3079. ccw->count = rdc_buffer_size;
  3080. cqr->startdev = device;
  3081. cqr->memdev = device;
  3082. cqr->expires = 10*HZ;
  3083. cqr->retries = 256;
  3084. cqr->buildclk = get_clock();
  3085. cqr->status = DASD_CQR_FILLED;
  3086. return cqr;
  3087. }
  3088. int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
  3089. void *rdc_buffer, int rdc_buffer_size)
  3090. {
  3091. int ret;
  3092. struct dasd_ccw_req *cqr;
  3093. cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
  3094. magic);
  3095. if (IS_ERR(cqr))
  3096. return PTR_ERR(cqr);
  3097. ret = dasd_sleep_on(cqr);
  3098. dasd_sfree_request(cqr, cqr->memdev);
  3099. return ret;
  3100. }
  3101. EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
  3102. /*
  3103. * In command mode and transport mode we need to look for sense
  3104. * data in different places. The sense data itself is allways
  3105. * an array of 32 bytes, so we can unify the sense data access
  3106. * for both modes.
  3107. */
  3108. char *dasd_get_sense(struct irb *irb)
  3109. {
  3110. struct tsb *tsb = NULL;
  3111. char *sense = NULL;
  3112. if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
  3113. if (irb->scsw.tm.tcw)
  3114. tsb = tcw_get_tsb((struct tcw *)(unsigned long)
  3115. irb->scsw.tm.tcw);
  3116. if (tsb && tsb->length == 64 && tsb->flags)
  3117. switch (tsb->flags & 0x07) {
  3118. case 1: /* tsa_iostat */
  3119. sense = tsb->tsa.iostat.sense;
  3120. break;
  3121. case 2: /* tsa_ddpc */
  3122. sense = tsb->tsa.ddpc.sense;
  3123. break;
  3124. default:
  3125. /* currently we don't use interrogate data */
  3126. break;
  3127. }
  3128. } else if (irb->esw.esw0.erw.cons) {
  3129. sense = irb->ecw;
  3130. }
  3131. return sense;
  3132. }
  3133. EXPORT_SYMBOL_GPL(dasd_get_sense);
  3134. static int __init dasd_init(void)
  3135. {
  3136. int rc;
  3137. init_waitqueue_head(&dasd_init_waitq);
  3138. init_waitqueue_head(&dasd_flush_wq);
  3139. init_waitqueue_head(&generic_waitq);
  3140. /* register 'common' DASD debug area, used for all DBF_XXX calls */
  3141. dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
  3142. if (dasd_debug_area == NULL) {
  3143. rc = -ENOMEM;
  3144. goto failed;
  3145. }
  3146. debug_register_view(dasd_debug_area, &debug_sprintf_view);
  3147. debug_set_level(dasd_debug_area, DBF_WARNING);
  3148. DBF_EVENT(DBF_EMERG, "%s", "debug area created");
  3149. dasd_diag_discipline_pointer = NULL;
  3150. dasd_statistics_createroot();
  3151. rc = dasd_devmap_init();
  3152. if (rc)
  3153. goto failed;
  3154. rc = dasd_gendisk_init();
  3155. if (rc)
  3156. goto failed;
  3157. rc = dasd_parse();
  3158. if (rc)
  3159. goto failed;
  3160. rc = dasd_eer_init();
  3161. if (rc)
  3162. goto failed;
  3163. #ifdef CONFIG_PROC_FS
  3164. rc = dasd_proc_init();
  3165. if (rc)
  3166. goto failed;
  3167. #endif
  3168. return 0;
  3169. failed:
  3170. pr_info("The DASD device driver could not be initialized\n");
  3171. dasd_exit();
  3172. return rc;
  3173. }
  3174. module_init(dasd_init);
  3175. module_exit(dasd_exit);
  3176. EXPORT_SYMBOL(dasd_debug_area);
  3177. EXPORT_SYMBOL(dasd_diag_discipline_pointer);
  3178. EXPORT_SYMBOL(dasd_add_request_head);
  3179. EXPORT_SYMBOL(dasd_add_request_tail);
  3180. EXPORT_SYMBOL(dasd_cancel_req);
  3181. EXPORT_SYMBOL(dasd_device_clear_timer);
  3182. EXPORT_SYMBOL(dasd_block_clear_timer);
  3183. EXPORT_SYMBOL(dasd_enable_device);
  3184. EXPORT_SYMBOL(dasd_int_handler);
  3185. EXPORT_SYMBOL(dasd_kfree_request);
  3186. EXPORT_SYMBOL(dasd_kick_device);
  3187. EXPORT_SYMBOL(dasd_kmalloc_request);
  3188. EXPORT_SYMBOL(dasd_schedule_device_bh);
  3189. EXPORT_SYMBOL(dasd_schedule_block_bh);
  3190. EXPORT_SYMBOL(dasd_set_target_state);
  3191. EXPORT_SYMBOL(dasd_device_set_timer);
  3192. EXPORT_SYMBOL(dasd_block_set_timer);
  3193. EXPORT_SYMBOL(dasd_sfree_request);
  3194. EXPORT_SYMBOL(dasd_sleep_on);
  3195. EXPORT_SYMBOL(dasd_sleep_on_immediatly);
  3196. EXPORT_SYMBOL(dasd_sleep_on_interruptible);
  3197. EXPORT_SYMBOL(dasd_smalloc_request);
  3198. EXPORT_SYMBOL(dasd_start_IO);
  3199. EXPORT_SYMBOL(dasd_term_IO);
  3200. EXPORT_SYMBOL_GPL(dasd_generic_probe);
  3201. EXPORT_SYMBOL_GPL(dasd_generic_remove);
  3202. EXPORT_SYMBOL_GPL(dasd_generic_notify);
  3203. EXPORT_SYMBOL_GPL(dasd_generic_set_online);
  3204. EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
  3205. EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
  3206. EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
  3207. EXPORT_SYMBOL_GPL(dasd_alloc_block);
  3208. EXPORT_SYMBOL_GPL(dasd_free_block);