dasd.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686
  1. /*
  2. * File...........: linux/drivers/s390/block/dasd.c
  3. * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  4. * Horst Hummel <Horst.Hummel@de.ibm.com>
  5. * Carsten Otte <Cotte@de.ibm.com>
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * Bugreports.to..: <Linux390@de.ibm.com>
  8. * Copyright IBM Corp. 1999, 2009
  9. */
  10. #define KMSG_COMPONENT "dasd"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/kmod.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/ctype.h>
  16. #include <linux/major.h>
  17. #include <linux/slab.h>
  18. #include <linux/buffer_head.h>
  19. #include <linux/hdreg.h>
  20. #include <linux/async.h>
  21. #include <asm/ccwdev.h>
  22. #include <asm/ebcdic.h>
  23. #include <asm/idals.h>
  24. #include <asm/todclk.h>
  25. #include <asm/itcw.h>
  26. /* This is ugly... */
  27. #define PRINTK_HEADER "dasd:"
  28. #include "dasd_int.h"
  29. /*
  30. * SECTION: Constant definitions to be used within this file
  31. */
  32. #define DASD_CHANQ_MAX_SIZE 4
  33. /*
  34. * SECTION: exported variables of dasd.c
  35. */
  36. debug_info_t *dasd_debug_area;
  37. struct dasd_discipline *dasd_diag_discipline_pointer;
  38. void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  39. MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  40. MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  41. " Copyright 2000 IBM Corporation");
  42. MODULE_SUPPORTED_DEVICE("dasd");
  43. MODULE_LICENSE("GPL");
  44. /*
  45. * SECTION: prototypes for static functions of dasd.c
  46. */
  47. static int dasd_alloc_queue(struct dasd_block *);
  48. static void dasd_setup_queue(struct dasd_block *);
  49. static void dasd_free_queue(struct dasd_block *);
  50. static void dasd_flush_request_queue(struct dasd_block *);
  51. static int dasd_flush_block_queue(struct dasd_block *);
  52. static void dasd_device_tasklet(struct dasd_device *);
  53. static void dasd_block_tasklet(struct dasd_block *);
  54. static void do_kick_device(struct work_struct *);
  55. static void do_restore_device(struct work_struct *);
  56. static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
  57. static void dasd_device_timeout(unsigned long);
  58. static void dasd_block_timeout(unsigned long);
  59. /*
  60. * SECTION: Operations on the device structure.
  61. */
  62. static wait_queue_head_t dasd_init_waitq;
  63. static wait_queue_head_t dasd_flush_wq;
  64. static wait_queue_head_t generic_waitq;
  65. /*
  66. * Allocate memory for a new device structure.
  67. */
  68. struct dasd_device *dasd_alloc_device(void)
  69. {
  70. struct dasd_device *device;
  71. device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
  72. if (!device)
  73. return ERR_PTR(-ENOMEM);
  74. /* Get two pages for normal block device operations. */
  75. device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
  76. if (!device->ccw_mem) {
  77. kfree(device);
  78. return ERR_PTR(-ENOMEM);
  79. }
  80. /* Get one page for error recovery. */
  81. device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
  82. if (!device->erp_mem) {
  83. free_pages((unsigned long) device->ccw_mem, 1);
  84. kfree(device);
  85. return ERR_PTR(-ENOMEM);
  86. }
  87. dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
  88. dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
  89. spin_lock_init(&device->mem_lock);
  90. atomic_set(&device->tasklet_scheduled, 0);
  91. tasklet_init(&device->tasklet,
  92. (void (*)(unsigned long)) dasd_device_tasklet,
  93. (unsigned long) device);
  94. INIT_LIST_HEAD(&device->ccw_queue);
  95. init_timer(&device->timer);
  96. device->timer.function = dasd_device_timeout;
  97. device->timer.data = (unsigned long) device;
  98. INIT_WORK(&device->kick_work, do_kick_device);
  99. INIT_WORK(&device->restore_device, do_restore_device);
  100. device->state = DASD_STATE_NEW;
  101. device->target = DASD_STATE_NEW;
  102. return device;
  103. }
  104. /*
  105. * Free memory of a device structure.
  106. */
  107. void dasd_free_device(struct dasd_device *device)
  108. {
  109. kfree(device->private);
  110. free_page((unsigned long) device->erp_mem);
  111. free_pages((unsigned long) device->ccw_mem, 1);
  112. kfree(device);
  113. }
  114. /*
  115. * Allocate memory for a new device structure.
  116. */
  117. struct dasd_block *dasd_alloc_block(void)
  118. {
  119. struct dasd_block *block;
  120. block = kzalloc(sizeof(*block), GFP_ATOMIC);
  121. if (!block)
  122. return ERR_PTR(-ENOMEM);
  123. /* open_count = 0 means device online but not in use */
  124. atomic_set(&block->open_count, -1);
  125. spin_lock_init(&block->request_queue_lock);
  126. atomic_set(&block->tasklet_scheduled, 0);
  127. tasklet_init(&block->tasklet,
  128. (void (*)(unsigned long)) dasd_block_tasklet,
  129. (unsigned long) block);
  130. INIT_LIST_HEAD(&block->ccw_queue);
  131. spin_lock_init(&block->queue_lock);
  132. init_timer(&block->timer);
  133. block->timer.function = dasd_block_timeout;
  134. block->timer.data = (unsigned long) block;
  135. return block;
  136. }
  137. /*
  138. * Free memory of a device structure.
  139. */
  140. void dasd_free_block(struct dasd_block *block)
  141. {
  142. kfree(block);
  143. }
  144. /*
  145. * Make a new device known to the system.
  146. */
  147. static int dasd_state_new_to_known(struct dasd_device *device)
  148. {
  149. int rc;
  150. /*
  151. * As long as the device is not in state DASD_STATE_NEW we want to
  152. * keep the reference count > 0.
  153. */
  154. dasd_get_device(device);
  155. if (device->block) {
  156. rc = dasd_alloc_queue(device->block);
  157. if (rc) {
  158. dasd_put_device(device);
  159. return rc;
  160. }
  161. }
  162. device->state = DASD_STATE_KNOWN;
  163. return 0;
  164. }
  165. /*
  166. * Let the system forget about a device.
  167. */
  168. static int dasd_state_known_to_new(struct dasd_device *device)
  169. {
  170. /* Disable extended error reporting for this device. */
  171. dasd_eer_disable(device);
  172. /* Forget the discipline information. */
  173. if (device->discipline) {
  174. if (device->discipline->uncheck_device)
  175. device->discipline->uncheck_device(device);
  176. module_put(device->discipline->owner);
  177. }
  178. device->discipline = NULL;
  179. if (device->base_discipline)
  180. module_put(device->base_discipline->owner);
  181. device->base_discipline = NULL;
  182. device->state = DASD_STATE_NEW;
  183. if (device->block)
  184. dasd_free_queue(device->block);
  185. /* Give up reference we took in dasd_state_new_to_known. */
  186. dasd_put_device(device);
  187. return 0;
  188. }
  189. /*
  190. * Request the irq line for the device.
  191. */
  192. static int dasd_state_known_to_basic(struct dasd_device *device)
  193. {
  194. int rc;
  195. /* Allocate and register gendisk structure. */
  196. if (device->block) {
  197. rc = dasd_gendisk_alloc(device->block);
  198. if (rc)
  199. return rc;
  200. }
  201. /* register 'device' debug area, used for all DBF_DEV_XXX calls */
  202. device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
  203. 8 * sizeof(long));
  204. debug_register_view(device->debug_area, &debug_sprintf_view);
  205. debug_set_level(device->debug_area, DBF_WARNING);
  206. DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
  207. device->state = DASD_STATE_BASIC;
  208. return 0;
  209. }
  210. /*
  211. * Release the irq line for the device. Terminate any running i/o.
  212. */
  213. static int dasd_state_basic_to_known(struct dasd_device *device)
  214. {
  215. int rc;
  216. if (device->block) {
  217. dasd_gendisk_free(device->block);
  218. dasd_block_clear_timer(device->block);
  219. }
  220. rc = dasd_flush_device_queue(device);
  221. if (rc)
  222. return rc;
  223. dasd_device_clear_timer(device);
  224. DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
  225. if (device->debug_area != NULL) {
  226. debug_unregister(device->debug_area);
  227. device->debug_area = NULL;
  228. }
  229. device->state = DASD_STATE_KNOWN;
  230. return 0;
  231. }
  232. /*
  233. * Do the initial analysis. The do_analysis function may return
  234. * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
  235. * until the discipline decides to continue the startup sequence
  236. * by calling the function dasd_change_state. The eckd disciplines
  237. * uses this to start a ccw that detects the format. The completion
  238. * interrupt for this detection ccw uses the kernel event daemon to
  239. * trigger the call to dasd_change_state. All this is done in the
  240. * discipline code, see dasd_eckd.c.
  241. * After the analysis ccw is done (do_analysis returned 0) the block
  242. * device is setup.
  243. * In case the analysis returns an error, the device setup is stopped
  244. * (a fake disk was already added to allow formatting).
  245. */
  246. static int dasd_state_basic_to_ready(struct dasd_device *device)
  247. {
  248. int rc;
  249. struct dasd_block *block;
  250. rc = 0;
  251. block = device->block;
  252. /* make disk known with correct capacity */
  253. if (block) {
  254. if (block->base->discipline->do_analysis != NULL)
  255. rc = block->base->discipline->do_analysis(block);
  256. if (rc) {
  257. if (rc != -EAGAIN)
  258. device->state = DASD_STATE_UNFMT;
  259. return rc;
  260. }
  261. dasd_setup_queue(block);
  262. set_capacity(block->gdp,
  263. block->blocks << block->s2b_shift);
  264. device->state = DASD_STATE_READY;
  265. rc = dasd_scan_partitions(block);
  266. if (rc)
  267. device->state = DASD_STATE_BASIC;
  268. } else {
  269. device->state = DASD_STATE_READY;
  270. }
  271. return rc;
  272. }
  273. /*
  274. * Remove device from block device layer. Destroy dirty buffers.
  275. * Forget format information. Check if the target level is basic
  276. * and if it is create fake disk for formatting.
  277. */
  278. static int dasd_state_ready_to_basic(struct dasd_device *device)
  279. {
  280. int rc;
  281. device->state = DASD_STATE_BASIC;
  282. if (device->block) {
  283. struct dasd_block *block = device->block;
  284. rc = dasd_flush_block_queue(block);
  285. if (rc) {
  286. device->state = DASD_STATE_READY;
  287. return rc;
  288. }
  289. dasd_destroy_partitions(block);
  290. dasd_flush_request_queue(block);
  291. block->blocks = 0;
  292. block->bp_block = 0;
  293. block->s2b_shift = 0;
  294. }
  295. return 0;
  296. }
  297. /*
  298. * Back to basic.
  299. */
  300. static int dasd_state_unfmt_to_basic(struct dasd_device *device)
  301. {
  302. device->state = DASD_STATE_BASIC;
  303. return 0;
  304. }
  305. /*
  306. * Make the device online and schedule the bottom half to start
  307. * the requeueing of requests from the linux request queue to the
  308. * ccw queue.
  309. */
  310. static int
  311. dasd_state_ready_to_online(struct dasd_device * device)
  312. {
  313. int rc;
  314. struct gendisk *disk;
  315. struct disk_part_iter piter;
  316. struct hd_struct *part;
  317. if (device->discipline->ready_to_online) {
  318. rc = device->discipline->ready_to_online(device);
  319. if (rc)
  320. return rc;
  321. }
  322. device->state = DASD_STATE_ONLINE;
  323. if (device->block) {
  324. dasd_schedule_block_bh(device->block);
  325. disk = device->block->bdev->bd_disk;
  326. disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
  327. while ((part = disk_part_iter_next(&piter)))
  328. kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
  329. disk_part_iter_exit(&piter);
  330. }
  331. return 0;
  332. }
  333. /*
  334. * Stop the requeueing of requests again.
  335. */
  336. static int dasd_state_online_to_ready(struct dasd_device *device)
  337. {
  338. int rc;
  339. struct gendisk *disk;
  340. struct disk_part_iter piter;
  341. struct hd_struct *part;
  342. if (device->discipline->online_to_ready) {
  343. rc = device->discipline->online_to_ready(device);
  344. if (rc)
  345. return rc;
  346. }
  347. device->state = DASD_STATE_READY;
  348. if (device->block) {
  349. disk = device->block->bdev->bd_disk;
  350. disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
  351. while ((part = disk_part_iter_next(&piter)))
  352. kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
  353. disk_part_iter_exit(&piter);
  354. }
  355. return 0;
  356. }
  357. /*
  358. * Device startup state changes.
  359. */
  360. static int dasd_increase_state(struct dasd_device *device)
  361. {
  362. int rc;
  363. rc = 0;
  364. if (device->state == DASD_STATE_NEW &&
  365. device->target >= DASD_STATE_KNOWN)
  366. rc = dasd_state_new_to_known(device);
  367. if (!rc &&
  368. device->state == DASD_STATE_KNOWN &&
  369. device->target >= DASD_STATE_BASIC)
  370. rc = dasd_state_known_to_basic(device);
  371. if (!rc &&
  372. device->state == DASD_STATE_BASIC &&
  373. device->target >= DASD_STATE_READY)
  374. rc = dasd_state_basic_to_ready(device);
  375. if (!rc &&
  376. device->state == DASD_STATE_UNFMT &&
  377. device->target > DASD_STATE_UNFMT)
  378. rc = -EPERM;
  379. if (!rc &&
  380. device->state == DASD_STATE_READY &&
  381. device->target >= DASD_STATE_ONLINE)
  382. rc = dasd_state_ready_to_online(device);
  383. return rc;
  384. }
  385. /*
  386. * Device shutdown state changes.
  387. */
  388. static int dasd_decrease_state(struct dasd_device *device)
  389. {
  390. int rc;
  391. rc = 0;
  392. if (device->state == DASD_STATE_ONLINE &&
  393. device->target <= DASD_STATE_READY)
  394. rc = dasd_state_online_to_ready(device);
  395. if (!rc &&
  396. device->state == DASD_STATE_READY &&
  397. device->target <= DASD_STATE_BASIC)
  398. rc = dasd_state_ready_to_basic(device);
  399. if (!rc &&
  400. device->state == DASD_STATE_UNFMT &&
  401. device->target <= DASD_STATE_BASIC)
  402. rc = dasd_state_unfmt_to_basic(device);
  403. if (!rc &&
  404. device->state == DASD_STATE_BASIC &&
  405. device->target <= DASD_STATE_KNOWN)
  406. rc = dasd_state_basic_to_known(device);
  407. if (!rc &&
  408. device->state == DASD_STATE_KNOWN &&
  409. device->target <= DASD_STATE_NEW)
  410. rc = dasd_state_known_to_new(device);
  411. return rc;
  412. }
  413. /*
  414. * This is the main startup/shutdown routine.
  415. */
  416. static void dasd_change_state(struct dasd_device *device)
  417. {
  418. int rc;
  419. if (device->state == device->target)
  420. /* Already where we want to go today... */
  421. return;
  422. if (device->state < device->target)
  423. rc = dasd_increase_state(device);
  424. else
  425. rc = dasd_decrease_state(device);
  426. if (rc && rc != -EAGAIN)
  427. device->target = device->state;
  428. if (device->state == device->target) {
  429. wake_up(&dasd_init_waitq);
  430. dasd_put_device(device);
  431. }
  432. /* let user-space know that the device status changed */
  433. kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
  434. }
  435. /*
  436. * Kick starter for devices that did not complete the startup/shutdown
  437. * procedure or were sleeping because of a pending state.
  438. * dasd_kick_device will schedule a call do do_kick_device to the kernel
  439. * event daemon.
  440. */
  441. static void do_kick_device(struct work_struct *work)
  442. {
  443. struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
  444. dasd_change_state(device);
  445. dasd_schedule_device_bh(device);
  446. dasd_put_device(device);
  447. }
  448. void dasd_kick_device(struct dasd_device *device)
  449. {
  450. dasd_get_device(device);
  451. /* queue call to dasd_kick_device to the kernel event daemon. */
  452. schedule_work(&device->kick_work);
  453. }
  454. /*
  455. * dasd_restore_device will schedule a call do do_restore_device to the kernel
  456. * event daemon.
  457. */
  458. static void do_restore_device(struct work_struct *work)
  459. {
  460. struct dasd_device *device = container_of(work, struct dasd_device,
  461. restore_device);
  462. device->cdev->drv->restore(device->cdev);
  463. dasd_put_device(device);
  464. }
  465. void dasd_restore_device(struct dasd_device *device)
  466. {
  467. dasd_get_device(device);
  468. /* queue call to dasd_restore_device to the kernel event daemon. */
  469. schedule_work(&device->restore_device);
  470. }
  471. /*
  472. * Set the target state for a device and starts the state change.
  473. */
  474. void dasd_set_target_state(struct dasd_device *device, int target)
  475. {
  476. dasd_get_device(device);
  477. /* If we are in probeonly mode stop at DASD_STATE_READY. */
  478. if (dasd_probeonly && target > DASD_STATE_READY)
  479. target = DASD_STATE_READY;
  480. if (device->target != target) {
  481. if (device->state == target) {
  482. wake_up(&dasd_init_waitq);
  483. dasd_put_device(device);
  484. }
  485. device->target = target;
  486. }
  487. if (device->state != device->target)
  488. dasd_change_state(device);
  489. }
  490. /*
  491. * Enable devices with device numbers in [from..to].
  492. */
  493. static inline int _wait_for_device(struct dasd_device *device)
  494. {
  495. return (device->state == device->target);
  496. }
  497. void dasd_enable_device(struct dasd_device *device)
  498. {
  499. dasd_set_target_state(device, DASD_STATE_ONLINE);
  500. if (device->state <= DASD_STATE_KNOWN)
  501. /* No discipline for device found. */
  502. dasd_set_target_state(device, DASD_STATE_NEW);
  503. /* Now wait for the devices to come up. */
  504. wait_event(dasd_init_waitq, _wait_for_device(device));
  505. }
  506. /*
  507. * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
  508. */
  509. #ifdef CONFIG_DASD_PROFILE
  510. struct dasd_profile_info_t dasd_global_profile;
  511. unsigned int dasd_profile_level = DASD_PROFILE_OFF;
  512. /*
  513. * Increments counter in global and local profiling structures.
  514. */
  515. #define dasd_profile_counter(value, counter, block) \
  516. { \
  517. int index; \
  518. for (index = 0; index < 31 && value >> (2+index); index++); \
  519. dasd_global_profile.counter[index]++; \
  520. block->profile.counter[index]++; \
  521. }
  522. /*
  523. * Add profiling information for cqr before execution.
  524. */
  525. static void dasd_profile_start(struct dasd_block *block,
  526. struct dasd_ccw_req *cqr,
  527. struct request *req)
  528. {
  529. struct list_head *l;
  530. unsigned int counter;
  531. if (dasd_profile_level != DASD_PROFILE_ON)
  532. return;
  533. /* count the length of the chanq for statistics */
  534. counter = 0;
  535. list_for_each(l, &block->ccw_queue)
  536. if (++counter >= 31)
  537. break;
  538. dasd_global_profile.dasd_io_nr_req[counter]++;
  539. block->profile.dasd_io_nr_req[counter]++;
  540. }
  541. /*
  542. * Add profiling information for cqr after execution.
  543. */
  544. static void dasd_profile_end(struct dasd_block *block,
  545. struct dasd_ccw_req *cqr,
  546. struct request *req)
  547. {
  548. long strtime, irqtime, endtime, tottime; /* in microseconds */
  549. long tottimeps, sectors;
  550. if (dasd_profile_level != DASD_PROFILE_ON)
  551. return;
  552. sectors = blk_rq_sectors(req);
  553. if (!cqr->buildclk || !cqr->startclk ||
  554. !cqr->stopclk || !cqr->endclk ||
  555. !sectors)
  556. return;
  557. strtime = ((cqr->startclk - cqr->buildclk) >> 12);
  558. irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
  559. endtime = ((cqr->endclk - cqr->stopclk) >> 12);
  560. tottime = ((cqr->endclk - cqr->buildclk) >> 12);
  561. tottimeps = tottime / sectors;
  562. if (!dasd_global_profile.dasd_io_reqs)
  563. memset(&dasd_global_profile, 0,
  564. sizeof(struct dasd_profile_info_t));
  565. dasd_global_profile.dasd_io_reqs++;
  566. dasd_global_profile.dasd_io_sects += sectors;
  567. if (!block->profile.dasd_io_reqs)
  568. memset(&block->profile, 0,
  569. sizeof(struct dasd_profile_info_t));
  570. block->profile.dasd_io_reqs++;
  571. block->profile.dasd_io_sects += sectors;
  572. dasd_profile_counter(sectors, dasd_io_secs, block);
  573. dasd_profile_counter(tottime, dasd_io_times, block);
  574. dasd_profile_counter(tottimeps, dasd_io_timps, block);
  575. dasd_profile_counter(strtime, dasd_io_time1, block);
  576. dasd_profile_counter(irqtime, dasd_io_time2, block);
  577. dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
  578. dasd_profile_counter(endtime, dasd_io_time3, block);
  579. }
  580. #else
  581. #define dasd_profile_start(block, cqr, req) do {} while (0)
  582. #define dasd_profile_end(block, cqr, req) do {} while (0)
  583. #endif /* CONFIG_DASD_PROFILE */
  584. /*
  585. * Allocate memory for a channel program with 'cplength' channel
  586. * command words and 'datasize' additional space. There are two
  587. * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
  588. * memory and 2) dasd_smalloc_request uses the static ccw memory
  589. * that gets allocated for each device.
  590. */
  591. struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
  592. int datasize,
  593. struct dasd_device *device)
  594. {
  595. struct dasd_ccw_req *cqr;
  596. /* Sanity checks */
  597. BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
  598. (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
  599. cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
  600. if (cqr == NULL)
  601. return ERR_PTR(-ENOMEM);
  602. cqr->cpaddr = NULL;
  603. if (cplength > 0) {
  604. cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
  605. GFP_ATOMIC | GFP_DMA);
  606. if (cqr->cpaddr == NULL) {
  607. kfree(cqr);
  608. return ERR_PTR(-ENOMEM);
  609. }
  610. }
  611. cqr->data = NULL;
  612. if (datasize > 0) {
  613. cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
  614. if (cqr->data == NULL) {
  615. kfree(cqr->cpaddr);
  616. kfree(cqr);
  617. return ERR_PTR(-ENOMEM);
  618. }
  619. }
  620. strncpy((char *) &cqr->magic, magic, 4);
  621. ASCEBC((char *) &cqr->magic, 4);
  622. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  623. dasd_get_device(device);
  624. return cqr;
  625. }
  626. struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
  627. int datasize,
  628. struct dasd_device *device)
  629. {
  630. unsigned long flags;
  631. struct dasd_ccw_req *cqr;
  632. char *data;
  633. int size;
  634. /* Sanity checks */
  635. BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
  636. (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
  637. size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
  638. if (cplength > 0)
  639. size += cplength * sizeof(struct ccw1);
  640. if (datasize > 0)
  641. size += datasize;
  642. spin_lock_irqsave(&device->mem_lock, flags);
  643. cqr = (struct dasd_ccw_req *)
  644. dasd_alloc_chunk(&device->ccw_chunks, size);
  645. spin_unlock_irqrestore(&device->mem_lock, flags);
  646. if (cqr == NULL)
  647. return ERR_PTR(-ENOMEM);
  648. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  649. data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
  650. cqr->cpaddr = NULL;
  651. if (cplength > 0) {
  652. cqr->cpaddr = (struct ccw1 *) data;
  653. data += cplength*sizeof(struct ccw1);
  654. memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
  655. }
  656. cqr->data = NULL;
  657. if (datasize > 0) {
  658. cqr->data = data;
  659. memset(cqr->data, 0, datasize);
  660. }
  661. strncpy((char *) &cqr->magic, magic, 4);
  662. ASCEBC((char *) &cqr->magic, 4);
  663. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  664. dasd_get_device(device);
  665. return cqr;
  666. }
  667. /*
  668. * Free memory of a channel program. This function needs to free all the
  669. * idal lists that might have been created by dasd_set_cda and the
  670. * struct dasd_ccw_req itself.
  671. */
  672. void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
  673. {
  674. #ifdef CONFIG_64BIT
  675. struct ccw1 *ccw;
  676. /* Clear any idals used for the request. */
  677. ccw = cqr->cpaddr;
  678. do {
  679. clear_normalized_cda(ccw);
  680. } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
  681. #endif
  682. kfree(cqr->cpaddr);
  683. kfree(cqr->data);
  684. kfree(cqr);
  685. dasd_put_device(device);
  686. }
  687. void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
  688. {
  689. unsigned long flags;
  690. spin_lock_irqsave(&device->mem_lock, flags);
  691. dasd_free_chunk(&device->ccw_chunks, cqr);
  692. spin_unlock_irqrestore(&device->mem_lock, flags);
  693. dasd_put_device(device);
  694. }
  695. /*
  696. * Check discipline magic in cqr.
  697. */
  698. static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
  699. {
  700. struct dasd_device *device;
  701. if (cqr == NULL)
  702. return -EINVAL;
  703. device = cqr->startdev;
  704. if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
  705. DBF_DEV_EVENT(DBF_WARNING, device,
  706. " dasd_ccw_req 0x%08x magic doesn't match"
  707. " discipline 0x%08x",
  708. cqr->magic,
  709. *(unsigned int *) device->discipline->name);
  710. return -EINVAL;
  711. }
  712. return 0;
  713. }
  714. /*
  715. * Terminate the current i/o and set the request to clear_pending.
  716. * Timer keeps device runnig.
  717. * ccw_device_clear can fail if the i/o subsystem
  718. * is in a bad mood.
  719. */
  720. int dasd_term_IO(struct dasd_ccw_req *cqr)
  721. {
  722. struct dasd_device *device;
  723. int retries, rc;
  724. char errorstring[ERRORLENGTH];
  725. /* Check the cqr */
  726. rc = dasd_check_cqr(cqr);
  727. if (rc)
  728. return rc;
  729. retries = 0;
  730. device = (struct dasd_device *) cqr->startdev;
  731. while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
  732. rc = ccw_device_clear(device->cdev, (long) cqr);
  733. switch (rc) {
  734. case 0: /* termination successful */
  735. cqr->retries--;
  736. cqr->status = DASD_CQR_CLEAR_PENDING;
  737. cqr->stopclk = get_clock();
  738. cqr->starttime = 0;
  739. DBF_DEV_EVENT(DBF_DEBUG, device,
  740. "terminate cqr %p successful",
  741. cqr);
  742. break;
  743. case -ENODEV:
  744. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  745. "device gone, retry");
  746. break;
  747. case -EIO:
  748. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  749. "I/O error, retry");
  750. break;
  751. case -EINVAL:
  752. case -EBUSY:
  753. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  754. "device busy, retry later");
  755. break;
  756. default:
  757. /* internal error 10 - unknown rc*/
  758. snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
  759. dev_err(&device->cdev->dev, "An error occurred in the "
  760. "DASD device driver, reason=%s\n", errorstring);
  761. BUG();
  762. break;
  763. }
  764. retries++;
  765. }
  766. dasd_schedule_device_bh(device);
  767. return rc;
  768. }
  769. /*
  770. * Start the i/o. This start_IO can fail if the channel is really busy.
  771. * In that case set up a timer to start the request later.
  772. */
  773. int dasd_start_IO(struct dasd_ccw_req *cqr)
  774. {
  775. struct dasd_device *device;
  776. int rc;
  777. char errorstring[ERRORLENGTH];
  778. /* Check the cqr */
  779. rc = dasd_check_cqr(cqr);
  780. if (rc) {
  781. cqr->intrc = rc;
  782. return rc;
  783. }
  784. device = (struct dasd_device *) cqr->startdev;
  785. if (cqr->retries < 0) {
  786. /* internal error 14 - start_IO run out of retries */
  787. sprintf(errorstring, "14 %p", cqr);
  788. dev_err(&device->cdev->dev, "An error occurred in the DASD "
  789. "device driver, reason=%s\n", errorstring);
  790. cqr->status = DASD_CQR_ERROR;
  791. return -EIO;
  792. }
  793. cqr->startclk = get_clock();
  794. cqr->starttime = jiffies;
  795. cqr->retries--;
  796. if (cqr->cpmode == 1) {
  797. rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
  798. (long) cqr, cqr->lpm);
  799. } else {
  800. rc = ccw_device_start(device->cdev, cqr->cpaddr,
  801. (long) cqr, cqr->lpm, 0);
  802. }
  803. switch (rc) {
  804. case 0:
  805. cqr->status = DASD_CQR_IN_IO;
  806. DBF_DEV_EVENT(DBF_DEBUG, device,
  807. "start_IO: request %p started successful",
  808. cqr);
  809. break;
  810. case -EBUSY:
  811. DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
  812. "start_IO: device busy, retry later");
  813. break;
  814. case -ETIMEDOUT:
  815. DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
  816. "start_IO: request timeout, retry later");
  817. break;
  818. case -EACCES:
  819. /* -EACCES indicates that the request used only a
  820. * subset of the available pathes and all these
  821. * pathes are gone.
  822. * Do a retry with all available pathes.
  823. */
  824. cqr->lpm = LPM_ANYPATH;
  825. DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
  826. "start_IO: selected pathes gone,"
  827. " retry on all pathes");
  828. break;
  829. case -ENODEV:
  830. DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
  831. "start_IO: -ENODEV device gone, retry");
  832. break;
  833. case -EIO:
  834. DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
  835. "start_IO: -EIO device gone, retry");
  836. break;
  837. case -EINVAL:
  838. /* most likely caused in power management context */
  839. DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
  840. "start_IO: -EINVAL device currently "
  841. "not accessible");
  842. break;
  843. default:
  844. /* internal error 11 - unknown rc */
  845. snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
  846. dev_err(&device->cdev->dev,
  847. "An error occurred in the DASD device driver, "
  848. "reason=%s\n", errorstring);
  849. BUG();
  850. break;
  851. }
  852. cqr->intrc = rc;
  853. return rc;
  854. }
  855. /*
  856. * Timeout function for dasd devices. This is used for different purposes
  857. * 1) missing interrupt handler for normal operation
  858. * 2) delayed start of request where start_IO failed with -EBUSY
  859. * 3) timeout for missing state change interrupts
  860. * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
  861. * DASD_CQR_QUEUED for 2) and 3).
  862. */
  863. static void dasd_device_timeout(unsigned long ptr)
  864. {
  865. unsigned long flags;
  866. struct dasd_device *device;
  867. device = (struct dasd_device *) ptr;
  868. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  869. /* re-activate request queue */
  870. device->stopped &= ~DASD_STOPPED_PENDING;
  871. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  872. dasd_schedule_device_bh(device);
  873. }
  874. /*
  875. * Setup timeout for a device in jiffies.
  876. */
  877. void dasd_device_set_timer(struct dasd_device *device, int expires)
  878. {
  879. if (expires == 0)
  880. del_timer(&device->timer);
  881. else
  882. mod_timer(&device->timer, jiffies + expires);
  883. }
  884. /*
  885. * Clear timeout for a device.
  886. */
  887. void dasd_device_clear_timer(struct dasd_device *device)
  888. {
  889. del_timer(&device->timer);
  890. }
  891. static void dasd_handle_killed_request(struct ccw_device *cdev,
  892. unsigned long intparm)
  893. {
  894. struct dasd_ccw_req *cqr;
  895. struct dasd_device *device;
  896. if (!intparm)
  897. return;
  898. cqr = (struct dasd_ccw_req *) intparm;
  899. if (cqr->status != DASD_CQR_IN_IO) {
  900. DBF_EVENT(DBF_DEBUG,
  901. "invalid status in handle_killed_request: "
  902. "bus_id %s, status %02x",
  903. dev_name(&cdev->dev), cqr->status);
  904. return;
  905. }
  906. device = (struct dasd_device *) cqr->startdev;
  907. if (device == NULL ||
  908. device != dasd_device_from_cdev_locked(cdev) ||
  909. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  910. DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
  911. "bus_id %s", dev_name(&cdev->dev));
  912. return;
  913. }
  914. /* Schedule request to be retried. */
  915. cqr->status = DASD_CQR_QUEUED;
  916. dasd_device_clear_timer(device);
  917. dasd_schedule_device_bh(device);
  918. dasd_put_device(device);
  919. }
  920. void dasd_generic_handle_state_change(struct dasd_device *device)
  921. {
  922. /* First of all start sense subsystem status request. */
  923. dasd_eer_snss(device);
  924. device->stopped &= ~DASD_STOPPED_PENDING;
  925. dasd_schedule_device_bh(device);
  926. if (device->block)
  927. dasd_schedule_block_bh(device->block);
  928. }
  929. /*
  930. * Interrupt handler for "normal" ssch-io based dasd devices.
  931. */
  932. void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
  933. struct irb *irb)
  934. {
  935. struct dasd_ccw_req *cqr, *next;
  936. struct dasd_device *device;
  937. unsigned long long now;
  938. int expires;
  939. if (IS_ERR(irb)) {
  940. switch (PTR_ERR(irb)) {
  941. case -EIO:
  942. break;
  943. case -ETIMEDOUT:
  944. DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
  945. __func__, dev_name(&cdev->dev));
  946. break;
  947. default:
  948. DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
  949. __func__, dev_name(&cdev->dev), PTR_ERR(irb));
  950. }
  951. dasd_handle_killed_request(cdev, intparm);
  952. return;
  953. }
  954. now = get_clock();
  955. /* check for unsolicited interrupts */
  956. cqr = (struct dasd_ccw_req *) intparm;
  957. if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
  958. (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
  959. (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
  960. if (cqr && cqr->status == DASD_CQR_IN_IO)
  961. cqr->status = DASD_CQR_QUEUED;
  962. device = dasd_device_from_cdev_locked(cdev);
  963. if (!IS_ERR(device)) {
  964. dasd_device_clear_timer(device);
  965. device->discipline->handle_unsolicited_interrupt(device,
  966. irb);
  967. dasd_put_device(device);
  968. }
  969. return;
  970. }
  971. device = (struct dasd_device *) cqr->startdev;
  972. if (!device ||
  973. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  974. DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
  975. "bus_id %s", dev_name(&cdev->dev));
  976. return;
  977. }
  978. /* Check for clear pending */
  979. if (cqr->status == DASD_CQR_CLEAR_PENDING &&
  980. scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
  981. cqr->status = DASD_CQR_CLEARED;
  982. dasd_device_clear_timer(device);
  983. wake_up(&dasd_flush_wq);
  984. dasd_schedule_device_bh(device);
  985. return;
  986. }
  987. /* check status - the request might have been killed by dyn detach */
  988. if (cqr->status != DASD_CQR_IN_IO) {
  989. DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
  990. "status %02x", dev_name(&cdev->dev), cqr->status);
  991. return;
  992. }
  993. next = NULL;
  994. expires = 0;
  995. if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
  996. scsw_cstat(&irb->scsw) == 0) {
  997. /* request was completed successfully */
  998. cqr->status = DASD_CQR_SUCCESS;
  999. cqr->stopclk = now;
  1000. /* Start first request on queue if possible -> fast_io. */
  1001. if (cqr->devlist.next != &device->ccw_queue) {
  1002. next = list_entry(cqr->devlist.next,
  1003. struct dasd_ccw_req, devlist);
  1004. }
  1005. } else { /* error */
  1006. memcpy(&cqr->irb, irb, sizeof(struct irb));
  1007. /* log sense for every failed I/O to s390 debugfeature */
  1008. dasd_log_sense_dbf(cqr, irb);
  1009. if (device->features & DASD_FEATURE_ERPLOG) {
  1010. dasd_log_sense(cqr, irb);
  1011. }
  1012. /*
  1013. * If we don't want complex ERP for this request, then just
  1014. * reset this and retry it in the fastpath
  1015. */
  1016. if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
  1017. cqr->retries > 0) {
  1018. if (cqr->lpm == LPM_ANYPATH)
  1019. DBF_DEV_EVENT(DBF_DEBUG, device,
  1020. "default ERP in fastpath "
  1021. "(%i retries left)",
  1022. cqr->retries);
  1023. cqr->lpm = LPM_ANYPATH;
  1024. cqr->status = DASD_CQR_QUEUED;
  1025. next = cqr;
  1026. } else
  1027. cqr->status = DASD_CQR_ERROR;
  1028. }
  1029. if (next && (next->status == DASD_CQR_QUEUED) &&
  1030. (!device->stopped)) {
  1031. if (device->discipline->start_IO(next) == 0)
  1032. expires = next->expires;
  1033. }
  1034. if (expires != 0)
  1035. dasd_device_set_timer(device, expires);
  1036. else
  1037. dasd_device_clear_timer(device);
  1038. dasd_schedule_device_bh(device);
  1039. }
  1040. /*
  1041. * If we have an error on a dasd_block layer request then we cancel
  1042. * and return all further requests from the same dasd_block as well.
  1043. */
  1044. static void __dasd_device_recovery(struct dasd_device *device,
  1045. struct dasd_ccw_req *ref_cqr)
  1046. {
  1047. struct list_head *l, *n;
  1048. struct dasd_ccw_req *cqr;
  1049. /*
  1050. * only requeue request that came from the dasd_block layer
  1051. */
  1052. if (!ref_cqr->block)
  1053. return;
  1054. list_for_each_safe(l, n, &device->ccw_queue) {
  1055. cqr = list_entry(l, struct dasd_ccw_req, devlist);
  1056. if (cqr->status == DASD_CQR_QUEUED &&
  1057. ref_cqr->block == cqr->block) {
  1058. cqr->status = DASD_CQR_CLEARED;
  1059. }
  1060. }
  1061. };
  1062. /*
  1063. * Remove those ccw requests from the queue that need to be returned
  1064. * to the upper layer.
  1065. */
  1066. static void __dasd_device_process_ccw_queue(struct dasd_device *device,
  1067. struct list_head *final_queue)
  1068. {
  1069. struct list_head *l, *n;
  1070. struct dasd_ccw_req *cqr;
  1071. /* Process request with final status. */
  1072. list_for_each_safe(l, n, &device->ccw_queue) {
  1073. cqr = list_entry(l, struct dasd_ccw_req, devlist);
  1074. /* Stop list processing at the first non-final request. */
  1075. if (cqr->status == DASD_CQR_QUEUED ||
  1076. cqr->status == DASD_CQR_IN_IO ||
  1077. cqr->status == DASD_CQR_CLEAR_PENDING)
  1078. break;
  1079. if (cqr->status == DASD_CQR_ERROR) {
  1080. __dasd_device_recovery(device, cqr);
  1081. }
  1082. /* Rechain finished requests to final queue */
  1083. list_move_tail(&cqr->devlist, final_queue);
  1084. }
  1085. }
  1086. /*
  1087. * the cqrs from the final queue are returned to the upper layer
  1088. * by setting a dasd_block state and calling the callback function
  1089. */
  1090. static void __dasd_device_process_final_queue(struct dasd_device *device,
  1091. struct list_head *final_queue)
  1092. {
  1093. struct list_head *l, *n;
  1094. struct dasd_ccw_req *cqr;
  1095. struct dasd_block *block;
  1096. void (*callback)(struct dasd_ccw_req *, void *data);
  1097. void *callback_data;
  1098. char errorstring[ERRORLENGTH];
  1099. list_for_each_safe(l, n, final_queue) {
  1100. cqr = list_entry(l, struct dasd_ccw_req, devlist);
  1101. list_del_init(&cqr->devlist);
  1102. block = cqr->block;
  1103. callback = cqr->callback;
  1104. callback_data = cqr->callback_data;
  1105. if (block)
  1106. spin_lock_bh(&block->queue_lock);
  1107. switch (cqr->status) {
  1108. case DASD_CQR_SUCCESS:
  1109. cqr->status = DASD_CQR_DONE;
  1110. break;
  1111. case DASD_CQR_ERROR:
  1112. cqr->status = DASD_CQR_NEED_ERP;
  1113. break;
  1114. case DASD_CQR_CLEARED:
  1115. cqr->status = DASD_CQR_TERMINATED;
  1116. break;
  1117. default:
  1118. /* internal error 12 - wrong cqr status*/
  1119. snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
  1120. dev_err(&device->cdev->dev,
  1121. "An error occurred in the DASD device driver, "
  1122. "reason=%s\n", errorstring);
  1123. BUG();
  1124. }
  1125. if (cqr->callback != NULL)
  1126. (callback)(cqr, callback_data);
  1127. if (block)
  1128. spin_unlock_bh(&block->queue_lock);
  1129. }
  1130. }
  1131. /*
  1132. * Take a look at the first request on the ccw queue and check
  1133. * if it reached its expire time. If so, terminate the IO.
  1134. */
  1135. static void __dasd_device_check_expire(struct dasd_device *device)
  1136. {
  1137. struct dasd_ccw_req *cqr;
  1138. if (list_empty(&device->ccw_queue))
  1139. return;
  1140. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
  1141. if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
  1142. (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
  1143. if (device->discipline->term_IO(cqr) != 0) {
  1144. /* Hmpf, try again in 5 sec */
  1145. dev_err(&device->cdev->dev,
  1146. "cqr %p timed out (%is) but cannot be "
  1147. "ended, retrying in 5 s\n",
  1148. cqr, (cqr->expires/HZ));
  1149. cqr->expires += 5*HZ;
  1150. dasd_device_set_timer(device, 5*HZ);
  1151. } else {
  1152. dev_err(&device->cdev->dev,
  1153. "cqr %p timed out (%is), %i retries "
  1154. "remaining\n", cqr, (cqr->expires/HZ),
  1155. cqr->retries);
  1156. }
  1157. }
  1158. }
  1159. /*
  1160. * Take a look at the first request on the ccw queue and check
  1161. * if it needs to be started.
  1162. */
  1163. static void __dasd_device_start_head(struct dasd_device *device)
  1164. {
  1165. struct dasd_ccw_req *cqr;
  1166. int rc;
  1167. if (list_empty(&device->ccw_queue))
  1168. return;
  1169. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
  1170. if (cqr->status != DASD_CQR_QUEUED)
  1171. return;
  1172. /* when device is stopped, return request to previous layer */
  1173. if (device->stopped) {
  1174. cqr->status = DASD_CQR_CLEARED;
  1175. dasd_schedule_device_bh(device);
  1176. return;
  1177. }
  1178. rc = device->discipline->start_IO(cqr);
  1179. if (rc == 0)
  1180. dasd_device_set_timer(device, cqr->expires);
  1181. else if (rc == -EACCES) {
  1182. dasd_schedule_device_bh(device);
  1183. } else
  1184. /* Hmpf, try again in 1/2 sec */
  1185. dasd_device_set_timer(device, 50);
  1186. }
  1187. /*
  1188. * Go through all request on the dasd_device request queue,
  1189. * terminate them on the cdev if necessary, and return them to the
  1190. * submitting layer via callback.
  1191. * Note:
  1192. * Make sure that all 'submitting layers' still exist when
  1193. * this function is called!. In other words, when 'device' is a base
  1194. * device then all block layer requests must have been removed before
  1195. * via dasd_flush_block_queue.
  1196. */
  1197. int dasd_flush_device_queue(struct dasd_device *device)
  1198. {
  1199. struct dasd_ccw_req *cqr, *n;
  1200. int rc;
  1201. struct list_head flush_queue;
  1202. INIT_LIST_HEAD(&flush_queue);
  1203. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1204. rc = 0;
  1205. list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
  1206. /* Check status and move request to flush_queue */
  1207. switch (cqr->status) {
  1208. case DASD_CQR_IN_IO:
  1209. rc = device->discipline->term_IO(cqr);
  1210. if (rc) {
  1211. /* unable to terminate requeust */
  1212. dev_err(&device->cdev->dev,
  1213. "Flushing the DASD request queue "
  1214. "failed for request %p\n", cqr);
  1215. /* stop flush processing */
  1216. goto finished;
  1217. }
  1218. break;
  1219. case DASD_CQR_QUEUED:
  1220. cqr->stopclk = get_clock();
  1221. cqr->status = DASD_CQR_CLEARED;
  1222. break;
  1223. default: /* no need to modify the others */
  1224. break;
  1225. }
  1226. list_move_tail(&cqr->devlist, &flush_queue);
  1227. }
  1228. finished:
  1229. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1230. /*
  1231. * After this point all requests must be in state CLEAR_PENDING,
  1232. * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
  1233. * one of the others.
  1234. */
  1235. list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
  1236. wait_event(dasd_flush_wq,
  1237. (cqr->status != DASD_CQR_CLEAR_PENDING));
  1238. /*
  1239. * Now set each request back to TERMINATED, DONE or NEED_ERP
  1240. * and call the callback function of flushed requests
  1241. */
  1242. __dasd_device_process_final_queue(device, &flush_queue);
  1243. return rc;
  1244. }
  1245. /*
  1246. * Acquire the device lock and process queues for the device.
  1247. */
  1248. static void dasd_device_tasklet(struct dasd_device *device)
  1249. {
  1250. struct list_head final_queue;
  1251. atomic_set (&device->tasklet_scheduled, 0);
  1252. INIT_LIST_HEAD(&final_queue);
  1253. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1254. /* Check expire time of first request on the ccw queue. */
  1255. __dasd_device_check_expire(device);
  1256. /* find final requests on ccw queue */
  1257. __dasd_device_process_ccw_queue(device, &final_queue);
  1258. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1259. /* Now call the callback function of requests with final status */
  1260. __dasd_device_process_final_queue(device, &final_queue);
  1261. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1262. /* Now check if the head of the ccw queue needs to be started. */
  1263. __dasd_device_start_head(device);
  1264. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1265. dasd_put_device(device);
  1266. }
  1267. /*
  1268. * Schedules a call to dasd_tasklet over the device tasklet.
  1269. */
  1270. void dasd_schedule_device_bh(struct dasd_device *device)
  1271. {
  1272. /* Protect against rescheduling. */
  1273. if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
  1274. return;
  1275. dasd_get_device(device);
  1276. tasklet_hi_schedule(&device->tasklet);
  1277. }
  1278. /*
  1279. * Queue a request to the head of the device ccw_queue.
  1280. * Start the I/O if possible.
  1281. */
  1282. void dasd_add_request_head(struct dasd_ccw_req *cqr)
  1283. {
  1284. struct dasd_device *device;
  1285. unsigned long flags;
  1286. device = cqr->startdev;
  1287. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1288. cqr->status = DASD_CQR_QUEUED;
  1289. list_add(&cqr->devlist, &device->ccw_queue);
  1290. /* let the bh start the request to keep them in order */
  1291. dasd_schedule_device_bh(device);
  1292. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1293. }
  1294. /*
  1295. * Queue a request to the tail of the device ccw_queue.
  1296. * Start the I/O if possible.
  1297. */
  1298. void dasd_add_request_tail(struct dasd_ccw_req *cqr)
  1299. {
  1300. struct dasd_device *device;
  1301. unsigned long flags;
  1302. device = cqr->startdev;
  1303. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1304. cqr->status = DASD_CQR_QUEUED;
  1305. list_add_tail(&cqr->devlist, &device->ccw_queue);
  1306. /* let the bh start the request to keep them in order */
  1307. dasd_schedule_device_bh(device);
  1308. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1309. }
  1310. /*
  1311. * Wakeup helper for the 'sleep_on' functions.
  1312. */
  1313. static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
  1314. {
  1315. wake_up((wait_queue_head_t *) data);
  1316. }
  1317. static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
  1318. {
  1319. struct dasd_device *device;
  1320. int rc;
  1321. device = cqr->startdev;
  1322. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1323. rc = ((cqr->status == DASD_CQR_DONE ||
  1324. cqr->status == DASD_CQR_NEED_ERP ||
  1325. cqr->status == DASD_CQR_TERMINATED) &&
  1326. list_empty(&cqr->devlist));
  1327. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1328. return rc;
  1329. }
  1330. /*
  1331. * Queue a request to the tail of the device ccw_queue and wait for
  1332. * it's completion.
  1333. */
  1334. int dasd_sleep_on(struct dasd_ccw_req *cqr)
  1335. {
  1336. struct dasd_device *device;
  1337. int rc;
  1338. device = cqr->startdev;
  1339. cqr->callback = dasd_wakeup_cb;
  1340. cqr->callback_data = (void *) &generic_waitq;
  1341. dasd_add_request_tail(cqr);
  1342. wait_event(generic_waitq, _wait_for_wakeup(cqr));
  1343. if (cqr->status == DASD_CQR_DONE)
  1344. rc = 0;
  1345. else if (cqr->intrc)
  1346. rc = cqr->intrc;
  1347. else
  1348. rc = -EIO;
  1349. return rc;
  1350. }
  1351. /*
  1352. * Queue a request to the tail of the device ccw_queue and wait
  1353. * interruptible for it's completion.
  1354. */
  1355. int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
  1356. {
  1357. struct dasd_device *device;
  1358. int rc;
  1359. device = cqr->startdev;
  1360. cqr->callback = dasd_wakeup_cb;
  1361. cqr->callback_data = (void *) &generic_waitq;
  1362. dasd_add_request_tail(cqr);
  1363. rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
  1364. if (rc == -ERESTARTSYS) {
  1365. dasd_cancel_req(cqr);
  1366. /* wait (non-interruptible) for final status */
  1367. wait_event(generic_waitq, _wait_for_wakeup(cqr));
  1368. cqr->intrc = rc;
  1369. }
  1370. if (cqr->status == DASD_CQR_DONE)
  1371. rc = 0;
  1372. else if (cqr->intrc)
  1373. rc = cqr->intrc;
  1374. else
  1375. rc = -EIO;
  1376. return rc;
  1377. }
  1378. /*
  1379. * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
  1380. * for eckd devices) the currently running request has to be terminated
  1381. * and be put back to status queued, before the special request is added
  1382. * to the head of the queue. Then the special request is waited on normally.
  1383. */
  1384. static inline int _dasd_term_running_cqr(struct dasd_device *device)
  1385. {
  1386. struct dasd_ccw_req *cqr;
  1387. if (list_empty(&device->ccw_queue))
  1388. return 0;
  1389. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
  1390. return device->discipline->term_IO(cqr);
  1391. }
  1392. int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
  1393. {
  1394. struct dasd_device *device;
  1395. int rc;
  1396. device = cqr->startdev;
  1397. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1398. rc = _dasd_term_running_cqr(device);
  1399. if (rc) {
  1400. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1401. return rc;
  1402. }
  1403. cqr->callback = dasd_wakeup_cb;
  1404. cqr->callback_data = (void *) &generic_waitq;
  1405. cqr->status = DASD_CQR_QUEUED;
  1406. list_add(&cqr->devlist, &device->ccw_queue);
  1407. /* let the bh start the request to keep them in order */
  1408. dasd_schedule_device_bh(device);
  1409. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1410. wait_event(generic_waitq, _wait_for_wakeup(cqr));
  1411. if (cqr->status == DASD_CQR_DONE)
  1412. rc = 0;
  1413. else if (cqr->intrc)
  1414. rc = cqr->intrc;
  1415. else
  1416. rc = -EIO;
  1417. return rc;
  1418. }
  1419. /*
  1420. * Cancels a request that was started with dasd_sleep_on_req.
  1421. * This is useful to timeout requests. The request will be
  1422. * terminated if it is currently in i/o.
  1423. * Returns 1 if the request has been terminated.
  1424. * 0 if there was no need to terminate the request (not started yet)
  1425. * negative error code if termination failed
  1426. * Cancellation of a request is an asynchronous operation! The calling
  1427. * function has to wait until the request is properly returned via callback.
  1428. */
  1429. int dasd_cancel_req(struct dasd_ccw_req *cqr)
  1430. {
  1431. struct dasd_device *device = cqr->startdev;
  1432. unsigned long flags;
  1433. int rc;
  1434. rc = 0;
  1435. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1436. switch (cqr->status) {
  1437. case DASD_CQR_QUEUED:
  1438. /* request was not started - just set to cleared */
  1439. cqr->status = DASD_CQR_CLEARED;
  1440. break;
  1441. case DASD_CQR_IN_IO:
  1442. /* request in IO - terminate IO and release again */
  1443. rc = device->discipline->term_IO(cqr);
  1444. if (rc) {
  1445. dev_err(&device->cdev->dev,
  1446. "Cancelling request %p failed with rc=%d\n",
  1447. cqr, rc);
  1448. } else {
  1449. cqr->stopclk = get_clock();
  1450. rc = 1;
  1451. }
  1452. break;
  1453. default: /* already finished or clear pending - do nothing */
  1454. break;
  1455. }
  1456. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1457. dasd_schedule_device_bh(device);
  1458. return rc;
  1459. }
  1460. /*
  1461. * SECTION: Operations of the dasd_block layer.
  1462. */
  1463. /*
  1464. * Timeout function for dasd_block. This is used when the block layer
  1465. * is waiting for something that may not come reliably, (e.g. a state
  1466. * change interrupt)
  1467. */
  1468. static void dasd_block_timeout(unsigned long ptr)
  1469. {
  1470. unsigned long flags;
  1471. struct dasd_block *block;
  1472. block = (struct dasd_block *) ptr;
  1473. spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
  1474. /* re-activate request queue */
  1475. block->base->stopped &= ~DASD_STOPPED_PENDING;
  1476. spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
  1477. dasd_schedule_block_bh(block);
  1478. }
  1479. /*
  1480. * Setup timeout for a dasd_block in jiffies.
  1481. */
  1482. void dasd_block_set_timer(struct dasd_block *block, int expires)
  1483. {
  1484. if (expires == 0)
  1485. del_timer(&block->timer);
  1486. else
  1487. mod_timer(&block->timer, jiffies + expires);
  1488. }
  1489. /*
  1490. * Clear timeout for a dasd_block.
  1491. */
  1492. void dasd_block_clear_timer(struct dasd_block *block)
  1493. {
  1494. del_timer(&block->timer);
  1495. }
  1496. /*
  1497. * Process finished error recovery ccw.
  1498. */
  1499. static inline void __dasd_block_process_erp(struct dasd_block *block,
  1500. struct dasd_ccw_req *cqr)
  1501. {
  1502. dasd_erp_fn_t erp_fn;
  1503. struct dasd_device *device = block->base;
  1504. if (cqr->status == DASD_CQR_DONE)
  1505. DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
  1506. else
  1507. dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
  1508. erp_fn = device->discipline->erp_postaction(cqr);
  1509. erp_fn(cqr);
  1510. }
  1511. /*
  1512. * Fetch requests from the block device queue.
  1513. */
  1514. static void __dasd_process_request_queue(struct dasd_block *block)
  1515. {
  1516. struct request_queue *queue;
  1517. struct request *req;
  1518. struct dasd_ccw_req *cqr;
  1519. struct dasd_device *basedev;
  1520. unsigned long flags;
  1521. queue = block->request_queue;
  1522. basedev = block->base;
  1523. /* No queue ? Then there is nothing to do. */
  1524. if (queue == NULL)
  1525. return;
  1526. /*
  1527. * We requeue request from the block device queue to the ccw
  1528. * queue only in two states. In state DASD_STATE_READY the
  1529. * partition detection is done and we need to requeue requests
  1530. * for that. State DASD_STATE_ONLINE is normal block device
  1531. * operation.
  1532. */
  1533. if (basedev->state < DASD_STATE_READY)
  1534. return;
  1535. /* Now we try to fetch requests from the request queue */
  1536. while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
  1537. if (basedev->features & DASD_FEATURE_READONLY &&
  1538. rq_data_dir(req) == WRITE) {
  1539. DBF_DEV_EVENT(DBF_ERR, basedev,
  1540. "Rejecting write request %p",
  1541. req);
  1542. blk_start_request(req);
  1543. __blk_end_request_all(req, -EIO);
  1544. continue;
  1545. }
  1546. cqr = basedev->discipline->build_cp(basedev, block, req);
  1547. if (IS_ERR(cqr)) {
  1548. if (PTR_ERR(cqr) == -EBUSY)
  1549. break; /* normal end condition */
  1550. if (PTR_ERR(cqr) == -ENOMEM)
  1551. break; /* terminate request queue loop */
  1552. if (PTR_ERR(cqr) == -EAGAIN) {
  1553. /*
  1554. * The current request cannot be build right
  1555. * now, we have to try later. If this request
  1556. * is the head-of-queue we stop the device
  1557. * for 1/2 second.
  1558. */
  1559. if (!list_empty(&block->ccw_queue))
  1560. break;
  1561. spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
  1562. basedev->stopped |= DASD_STOPPED_PENDING;
  1563. spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
  1564. dasd_block_set_timer(block, HZ/2);
  1565. break;
  1566. }
  1567. DBF_DEV_EVENT(DBF_ERR, basedev,
  1568. "CCW creation failed (rc=%ld) "
  1569. "on request %p",
  1570. PTR_ERR(cqr), req);
  1571. blk_start_request(req);
  1572. __blk_end_request_all(req, -EIO);
  1573. continue;
  1574. }
  1575. /*
  1576. * Note: callback is set to dasd_return_cqr_cb in
  1577. * __dasd_block_start_head to cover erp requests as well
  1578. */
  1579. cqr->callback_data = (void *) req;
  1580. cqr->status = DASD_CQR_FILLED;
  1581. blk_start_request(req);
  1582. list_add_tail(&cqr->blocklist, &block->ccw_queue);
  1583. dasd_profile_start(block, cqr, req);
  1584. }
  1585. }
  1586. static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
  1587. {
  1588. struct request *req;
  1589. int status;
  1590. int error = 0;
  1591. req = (struct request *) cqr->callback_data;
  1592. dasd_profile_end(cqr->block, cqr, req);
  1593. status = cqr->block->base->discipline->free_cp(cqr, req);
  1594. if (status <= 0)
  1595. error = status ? status : -EIO;
  1596. __blk_end_request_all(req, error);
  1597. }
  1598. /*
  1599. * Process ccw request queue.
  1600. */
  1601. static void __dasd_process_block_ccw_queue(struct dasd_block *block,
  1602. struct list_head *final_queue)
  1603. {
  1604. struct list_head *l, *n;
  1605. struct dasd_ccw_req *cqr;
  1606. dasd_erp_fn_t erp_fn;
  1607. unsigned long flags;
  1608. struct dasd_device *base = block->base;
  1609. restart:
  1610. /* Process request with final status. */
  1611. list_for_each_safe(l, n, &block->ccw_queue) {
  1612. cqr = list_entry(l, struct dasd_ccw_req, blocklist);
  1613. if (cqr->status != DASD_CQR_DONE &&
  1614. cqr->status != DASD_CQR_FAILED &&
  1615. cqr->status != DASD_CQR_NEED_ERP &&
  1616. cqr->status != DASD_CQR_TERMINATED)
  1617. continue;
  1618. if (cqr->status == DASD_CQR_TERMINATED) {
  1619. base->discipline->handle_terminated_request(cqr);
  1620. goto restart;
  1621. }
  1622. /* Process requests that may be recovered */
  1623. if (cqr->status == DASD_CQR_NEED_ERP) {
  1624. erp_fn = base->discipline->erp_action(cqr);
  1625. erp_fn(cqr);
  1626. goto restart;
  1627. }
  1628. /* log sense for fatal error */
  1629. if (cqr->status == DASD_CQR_FAILED) {
  1630. dasd_log_sense(cqr, &cqr->irb);
  1631. }
  1632. /* First of all call extended error reporting. */
  1633. if (dasd_eer_enabled(base) &&
  1634. cqr->status == DASD_CQR_FAILED) {
  1635. dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
  1636. /* restart request */
  1637. cqr->status = DASD_CQR_FILLED;
  1638. cqr->retries = 255;
  1639. spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
  1640. base->stopped |= DASD_STOPPED_QUIESCE;
  1641. spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
  1642. flags);
  1643. goto restart;
  1644. }
  1645. /* Process finished ERP request. */
  1646. if (cqr->refers) {
  1647. __dasd_block_process_erp(block, cqr);
  1648. goto restart;
  1649. }
  1650. /* Rechain finished requests to final queue */
  1651. cqr->endclk = get_clock();
  1652. list_move_tail(&cqr->blocklist, final_queue);
  1653. }
  1654. }
  1655. static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
  1656. {
  1657. dasd_schedule_block_bh(cqr->block);
  1658. }
  1659. static void __dasd_block_start_head(struct dasd_block *block)
  1660. {
  1661. struct dasd_ccw_req *cqr;
  1662. if (list_empty(&block->ccw_queue))
  1663. return;
  1664. /* We allways begin with the first requests on the queue, as some
  1665. * of previously started requests have to be enqueued on a
  1666. * dasd_device again for error recovery.
  1667. */
  1668. list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
  1669. if (cqr->status != DASD_CQR_FILLED)
  1670. continue;
  1671. /* Non-temporary stop condition will trigger fail fast */
  1672. if (block->base->stopped & ~DASD_STOPPED_PENDING &&
  1673. test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
  1674. (!dasd_eer_enabled(block->base))) {
  1675. cqr->status = DASD_CQR_FAILED;
  1676. dasd_schedule_block_bh(block);
  1677. continue;
  1678. }
  1679. /* Don't try to start requests if device is stopped */
  1680. if (block->base->stopped)
  1681. return;
  1682. /* just a fail safe check, should not happen */
  1683. if (!cqr->startdev)
  1684. cqr->startdev = block->base;
  1685. /* make sure that the requests we submit find their way back */
  1686. cqr->callback = dasd_return_cqr_cb;
  1687. dasd_add_request_tail(cqr);
  1688. }
  1689. }
  1690. /*
  1691. * Central dasd_block layer routine. Takes requests from the generic
  1692. * block layer request queue, creates ccw requests, enqueues them on
  1693. * a dasd_device and processes ccw requests that have been returned.
  1694. */
  1695. static void dasd_block_tasklet(struct dasd_block *block)
  1696. {
  1697. struct list_head final_queue;
  1698. struct list_head *l, *n;
  1699. struct dasd_ccw_req *cqr;
  1700. atomic_set(&block->tasklet_scheduled, 0);
  1701. INIT_LIST_HEAD(&final_queue);
  1702. spin_lock(&block->queue_lock);
  1703. /* Finish off requests on ccw queue */
  1704. __dasd_process_block_ccw_queue(block, &final_queue);
  1705. spin_unlock(&block->queue_lock);
  1706. /* Now call the callback function of requests with final status */
  1707. spin_lock_irq(&block->request_queue_lock);
  1708. list_for_each_safe(l, n, &final_queue) {
  1709. cqr = list_entry(l, struct dasd_ccw_req, blocklist);
  1710. list_del_init(&cqr->blocklist);
  1711. __dasd_cleanup_cqr(cqr);
  1712. }
  1713. spin_lock(&block->queue_lock);
  1714. /* Get new request from the block device request queue */
  1715. __dasd_process_request_queue(block);
  1716. /* Now check if the head of the ccw queue needs to be started. */
  1717. __dasd_block_start_head(block);
  1718. spin_unlock(&block->queue_lock);
  1719. spin_unlock_irq(&block->request_queue_lock);
  1720. dasd_put_device(block->base);
  1721. }
  1722. static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
  1723. {
  1724. wake_up(&dasd_flush_wq);
  1725. }
  1726. /*
  1727. * Go through all request on the dasd_block request queue, cancel them
  1728. * on the respective dasd_device, and return them to the generic
  1729. * block layer.
  1730. */
  1731. static int dasd_flush_block_queue(struct dasd_block *block)
  1732. {
  1733. struct dasd_ccw_req *cqr, *n;
  1734. int rc, i;
  1735. struct list_head flush_queue;
  1736. INIT_LIST_HEAD(&flush_queue);
  1737. spin_lock_bh(&block->queue_lock);
  1738. rc = 0;
  1739. restart:
  1740. list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
  1741. /* if this request currently owned by a dasd_device cancel it */
  1742. if (cqr->status >= DASD_CQR_QUEUED)
  1743. rc = dasd_cancel_req(cqr);
  1744. if (rc < 0)
  1745. break;
  1746. /* Rechain request (including erp chain) so it won't be
  1747. * touched by the dasd_block_tasklet anymore.
  1748. * Replace the callback so we notice when the request
  1749. * is returned from the dasd_device layer.
  1750. */
  1751. cqr->callback = _dasd_wake_block_flush_cb;
  1752. for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
  1753. list_move_tail(&cqr->blocklist, &flush_queue);
  1754. if (i > 1)
  1755. /* moved more than one request - need to restart */
  1756. goto restart;
  1757. }
  1758. spin_unlock_bh(&block->queue_lock);
  1759. /* Now call the callback function of flushed requests */
  1760. restart_cb:
  1761. list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
  1762. wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
  1763. /* Process finished ERP request. */
  1764. if (cqr->refers) {
  1765. spin_lock_bh(&block->queue_lock);
  1766. __dasd_block_process_erp(block, cqr);
  1767. spin_unlock_bh(&block->queue_lock);
  1768. /* restart list_for_xx loop since dasd_process_erp
  1769. * might remove multiple elements */
  1770. goto restart_cb;
  1771. }
  1772. /* call the callback function */
  1773. spin_lock_irq(&block->request_queue_lock);
  1774. cqr->endclk = get_clock();
  1775. list_del_init(&cqr->blocklist);
  1776. __dasd_cleanup_cqr(cqr);
  1777. spin_unlock_irq(&block->request_queue_lock);
  1778. }
  1779. return rc;
  1780. }
  1781. /*
  1782. * Schedules a call to dasd_tasklet over the device tasklet.
  1783. */
  1784. void dasd_schedule_block_bh(struct dasd_block *block)
  1785. {
  1786. /* Protect against rescheduling. */
  1787. if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
  1788. return;
  1789. /* life cycle of block is bound to it's base device */
  1790. dasd_get_device(block->base);
  1791. tasklet_hi_schedule(&block->tasklet);
  1792. }
  1793. /*
  1794. * SECTION: external block device operations
  1795. * (request queue handling, open, release, etc.)
  1796. */
  1797. /*
  1798. * Dasd request queue function. Called from ll_rw_blk.c
  1799. */
  1800. static void do_dasd_request(struct request_queue *queue)
  1801. {
  1802. struct dasd_block *block;
  1803. block = queue->queuedata;
  1804. spin_lock(&block->queue_lock);
  1805. /* Get new request from the block device request queue */
  1806. __dasd_process_request_queue(block);
  1807. /* Now check if the head of the ccw queue needs to be started. */
  1808. __dasd_block_start_head(block);
  1809. spin_unlock(&block->queue_lock);
  1810. }
  1811. /*
  1812. * Allocate and initialize request queue and default I/O scheduler.
  1813. */
  1814. static int dasd_alloc_queue(struct dasd_block *block)
  1815. {
  1816. int rc;
  1817. block->request_queue = blk_init_queue(do_dasd_request,
  1818. &block->request_queue_lock);
  1819. if (block->request_queue == NULL)
  1820. return -ENOMEM;
  1821. block->request_queue->queuedata = block;
  1822. elevator_exit(block->request_queue->elevator);
  1823. block->request_queue->elevator = NULL;
  1824. rc = elevator_init(block->request_queue, "deadline");
  1825. if (rc) {
  1826. blk_cleanup_queue(block->request_queue);
  1827. return rc;
  1828. }
  1829. return 0;
  1830. }
  1831. /*
  1832. * Allocate and initialize request queue.
  1833. */
  1834. static void dasd_setup_queue(struct dasd_block *block)
  1835. {
  1836. int max;
  1837. blk_queue_logical_block_size(block->request_queue, block->bp_block);
  1838. max = block->base->discipline->max_blocks << block->s2b_shift;
  1839. blk_queue_max_sectors(block->request_queue, max);
  1840. blk_queue_max_phys_segments(block->request_queue, -1L);
  1841. blk_queue_max_hw_segments(block->request_queue, -1L);
  1842. /* with page sized segments we can translate each segement into
  1843. * one idaw/tidaw
  1844. */
  1845. blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
  1846. blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
  1847. blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
  1848. }
  1849. /*
  1850. * Deactivate and free request queue.
  1851. */
  1852. static void dasd_free_queue(struct dasd_block *block)
  1853. {
  1854. if (block->request_queue) {
  1855. blk_cleanup_queue(block->request_queue);
  1856. block->request_queue = NULL;
  1857. }
  1858. }
  1859. /*
  1860. * Flush request on the request queue.
  1861. */
  1862. static void dasd_flush_request_queue(struct dasd_block *block)
  1863. {
  1864. struct request *req;
  1865. if (!block->request_queue)
  1866. return;
  1867. spin_lock_irq(&block->request_queue_lock);
  1868. while ((req = blk_fetch_request(block->request_queue)))
  1869. __blk_end_request_all(req, -EIO);
  1870. spin_unlock_irq(&block->request_queue_lock);
  1871. }
  1872. static int dasd_open(struct block_device *bdev, fmode_t mode)
  1873. {
  1874. struct dasd_block *block = bdev->bd_disk->private_data;
  1875. struct dasd_device *base = block->base;
  1876. int rc;
  1877. atomic_inc(&block->open_count);
  1878. if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
  1879. rc = -ENODEV;
  1880. goto unlock;
  1881. }
  1882. if (!try_module_get(base->discipline->owner)) {
  1883. rc = -EINVAL;
  1884. goto unlock;
  1885. }
  1886. if (dasd_probeonly) {
  1887. dev_info(&base->cdev->dev,
  1888. "Accessing the DASD failed because it is in "
  1889. "probeonly mode\n");
  1890. rc = -EPERM;
  1891. goto out;
  1892. }
  1893. if (base->state <= DASD_STATE_BASIC) {
  1894. DBF_DEV_EVENT(DBF_ERR, base, " %s",
  1895. " Cannot open unrecognized device");
  1896. rc = -ENODEV;
  1897. goto out;
  1898. }
  1899. return 0;
  1900. out:
  1901. module_put(base->discipline->owner);
  1902. unlock:
  1903. atomic_dec(&block->open_count);
  1904. return rc;
  1905. }
  1906. static int dasd_release(struct gendisk *disk, fmode_t mode)
  1907. {
  1908. struct dasd_block *block = disk->private_data;
  1909. atomic_dec(&block->open_count);
  1910. module_put(block->base->discipline->owner);
  1911. return 0;
  1912. }
  1913. /*
  1914. * Return disk geometry.
  1915. */
  1916. static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  1917. {
  1918. struct dasd_block *block;
  1919. struct dasd_device *base;
  1920. block = bdev->bd_disk->private_data;
  1921. base = block->base;
  1922. if (!block)
  1923. return -ENODEV;
  1924. if (!base->discipline ||
  1925. !base->discipline->fill_geometry)
  1926. return -EINVAL;
  1927. base->discipline->fill_geometry(block, geo);
  1928. geo->start = get_start_sect(bdev) >> block->s2b_shift;
  1929. return 0;
  1930. }
  1931. struct block_device_operations
  1932. dasd_device_operations = {
  1933. .owner = THIS_MODULE,
  1934. .open = dasd_open,
  1935. .release = dasd_release,
  1936. .ioctl = dasd_ioctl,
  1937. .compat_ioctl = dasd_ioctl,
  1938. .getgeo = dasd_getgeo,
  1939. };
  1940. /*******************************************************************************
  1941. * end of block device operations
  1942. */
  1943. static void
  1944. dasd_exit(void)
  1945. {
  1946. #ifdef CONFIG_PROC_FS
  1947. dasd_proc_exit();
  1948. #endif
  1949. dasd_eer_exit();
  1950. if (dasd_page_cache != NULL) {
  1951. kmem_cache_destroy(dasd_page_cache);
  1952. dasd_page_cache = NULL;
  1953. }
  1954. dasd_gendisk_exit();
  1955. dasd_devmap_exit();
  1956. if (dasd_debug_area != NULL) {
  1957. debug_unregister(dasd_debug_area);
  1958. dasd_debug_area = NULL;
  1959. }
  1960. }
  1961. /*
  1962. * SECTION: common functions for ccw_driver use
  1963. */
  1964. static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
  1965. {
  1966. struct ccw_device *cdev = data;
  1967. int ret;
  1968. ret = ccw_device_set_online(cdev);
  1969. if (ret)
  1970. pr_warning("%s: Setting the DASD online failed with rc=%d\n",
  1971. dev_name(&cdev->dev), ret);
  1972. else {
  1973. struct dasd_device *device = dasd_device_from_cdev(cdev);
  1974. wait_event(dasd_init_waitq, _wait_for_device(device));
  1975. dasd_put_device(device);
  1976. }
  1977. }
  1978. /*
  1979. * Initial attempt at a probe function. this can be simplified once
  1980. * the other detection code is gone.
  1981. */
  1982. int dasd_generic_probe(struct ccw_device *cdev,
  1983. struct dasd_discipline *discipline)
  1984. {
  1985. int ret;
  1986. ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
  1987. if (ret) {
  1988. DBF_EVENT(DBF_WARNING,
  1989. "dasd_generic_probe: could not set ccw-device options "
  1990. "for %s\n", dev_name(&cdev->dev));
  1991. return ret;
  1992. }
  1993. ret = dasd_add_sysfs_files(cdev);
  1994. if (ret) {
  1995. DBF_EVENT(DBF_WARNING,
  1996. "dasd_generic_probe: could not add sysfs entries "
  1997. "for %s\n", dev_name(&cdev->dev));
  1998. return ret;
  1999. }
  2000. cdev->handler = &dasd_int_handler;
  2001. /*
  2002. * Automatically online either all dasd devices (dasd_autodetect)
  2003. * or all devices specified with dasd= parameters during
  2004. * initial probe.
  2005. */
  2006. if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
  2007. (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
  2008. async_schedule(dasd_generic_auto_online, cdev);
  2009. return 0;
  2010. }
  2011. /*
  2012. * This will one day be called from a global not_oper handler.
  2013. * It is also used by driver_unregister during module unload.
  2014. */
  2015. void dasd_generic_remove(struct ccw_device *cdev)
  2016. {
  2017. struct dasd_device *device;
  2018. struct dasd_block *block;
  2019. cdev->handler = NULL;
  2020. dasd_remove_sysfs_files(cdev);
  2021. device = dasd_device_from_cdev(cdev);
  2022. if (IS_ERR(device))
  2023. return;
  2024. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  2025. /* Already doing offline processing */
  2026. dasd_put_device(device);
  2027. return;
  2028. }
  2029. /*
  2030. * This device is removed unconditionally. Set offline
  2031. * flag to prevent dasd_open from opening it while it is
  2032. * no quite down yet.
  2033. */
  2034. dasd_set_target_state(device, DASD_STATE_NEW);
  2035. /* dasd_delete_device destroys the device reference. */
  2036. block = device->block;
  2037. device->block = NULL;
  2038. dasd_delete_device(device);
  2039. /*
  2040. * life cycle of block is bound to device, so delete it after
  2041. * device was safely removed
  2042. */
  2043. if (block)
  2044. dasd_free_block(block);
  2045. }
  2046. /*
  2047. * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
  2048. * the device is detected for the first time and is supposed to be used
  2049. * or the user has started activation through sysfs.
  2050. */
  2051. int dasd_generic_set_online(struct ccw_device *cdev,
  2052. struct dasd_discipline *base_discipline)
  2053. {
  2054. struct dasd_discipline *discipline;
  2055. struct dasd_device *device;
  2056. int rc;
  2057. /* first online clears initial online feature flag */
  2058. dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
  2059. device = dasd_create_device(cdev);
  2060. if (IS_ERR(device))
  2061. return PTR_ERR(device);
  2062. discipline = base_discipline;
  2063. if (device->features & DASD_FEATURE_USEDIAG) {
  2064. if (!dasd_diag_discipline_pointer) {
  2065. pr_warning("%s Setting the DASD online failed because "
  2066. "of missing DIAG discipline\n",
  2067. dev_name(&cdev->dev));
  2068. dasd_delete_device(device);
  2069. return -ENODEV;
  2070. }
  2071. discipline = dasd_diag_discipline_pointer;
  2072. }
  2073. if (!try_module_get(base_discipline->owner)) {
  2074. dasd_delete_device(device);
  2075. return -EINVAL;
  2076. }
  2077. if (!try_module_get(discipline->owner)) {
  2078. module_put(base_discipline->owner);
  2079. dasd_delete_device(device);
  2080. return -EINVAL;
  2081. }
  2082. device->base_discipline = base_discipline;
  2083. device->discipline = discipline;
  2084. /* check_device will allocate block device if necessary */
  2085. rc = discipline->check_device(device);
  2086. if (rc) {
  2087. pr_warning("%s Setting the DASD online with discipline %s "
  2088. "failed with rc=%i\n",
  2089. dev_name(&cdev->dev), discipline->name, rc);
  2090. module_put(discipline->owner);
  2091. module_put(base_discipline->owner);
  2092. dasd_delete_device(device);
  2093. return rc;
  2094. }
  2095. dasd_set_target_state(device, DASD_STATE_ONLINE);
  2096. if (device->state <= DASD_STATE_KNOWN) {
  2097. pr_warning("%s Setting the DASD online failed because of a "
  2098. "missing discipline\n", dev_name(&cdev->dev));
  2099. rc = -ENODEV;
  2100. dasd_set_target_state(device, DASD_STATE_NEW);
  2101. if (device->block)
  2102. dasd_free_block(device->block);
  2103. dasd_delete_device(device);
  2104. } else
  2105. pr_debug("dasd_generic device %s found\n",
  2106. dev_name(&cdev->dev));
  2107. dasd_put_device(device);
  2108. return rc;
  2109. }
  2110. int dasd_generic_set_offline(struct ccw_device *cdev)
  2111. {
  2112. struct dasd_device *device;
  2113. struct dasd_block *block;
  2114. int max_count, open_count;
  2115. device = dasd_device_from_cdev(cdev);
  2116. if (IS_ERR(device))
  2117. return PTR_ERR(device);
  2118. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  2119. /* Already doing offline processing */
  2120. dasd_put_device(device);
  2121. return 0;
  2122. }
  2123. /*
  2124. * We must make sure that this device is currently not in use.
  2125. * The open_count is increased for every opener, that includes
  2126. * the blkdev_get in dasd_scan_partitions. We are only interested
  2127. * in the other openers.
  2128. */
  2129. if (device->block) {
  2130. max_count = device->block->bdev ? 0 : -1;
  2131. open_count = atomic_read(&device->block->open_count);
  2132. if (open_count > max_count) {
  2133. if (open_count > 0)
  2134. pr_warning("%s: The DASD cannot be set offline "
  2135. "with open count %i\n",
  2136. dev_name(&cdev->dev), open_count);
  2137. else
  2138. pr_warning("%s: The DASD cannot be set offline "
  2139. "while it is in use\n",
  2140. dev_name(&cdev->dev));
  2141. clear_bit(DASD_FLAG_OFFLINE, &device->flags);
  2142. dasd_put_device(device);
  2143. return -EBUSY;
  2144. }
  2145. }
  2146. dasd_set_target_state(device, DASD_STATE_NEW);
  2147. /* dasd_delete_device destroys the device reference. */
  2148. block = device->block;
  2149. device->block = NULL;
  2150. dasd_delete_device(device);
  2151. /*
  2152. * life cycle of block is bound to device, so delete it after
  2153. * device was safely removed
  2154. */
  2155. if (block)
  2156. dasd_free_block(block);
  2157. return 0;
  2158. }
  2159. int dasd_generic_notify(struct ccw_device *cdev, int event)
  2160. {
  2161. struct dasd_device *device;
  2162. struct dasd_ccw_req *cqr;
  2163. int ret;
  2164. device = dasd_device_from_cdev_locked(cdev);
  2165. if (IS_ERR(device))
  2166. return 0;
  2167. ret = 0;
  2168. switch (event) {
  2169. case CIO_GONE:
  2170. case CIO_BOXED:
  2171. case CIO_NO_PATH:
  2172. /* First of all call extended error reporting. */
  2173. dasd_eer_write(device, NULL, DASD_EER_NOPATH);
  2174. if (device->state < DASD_STATE_BASIC)
  2175. break;
  2176. /* Device is active. We want to keep it. */
  2177. list_for_each_entry(cqr, &device->ccw_queue, devlist)
  2178. if (cqr->status == DASD_CQR_IN_IO) {
  2179. cqr->status = DASD_CQR_QUEUED;
  2180. cqr->retries++;
  2181. }
  2182. device->stopped |= DASD_STOPPED_DC_WAIT;
  2183. dasd_device_clear_timer(device);
  2184. dasd_schedule_device_bh(device);
  2185. ret = 1;
  2186. break;
  2187. case CIO_OPER:
  2188. /* FIXME: add a sanity check. */
  2189. device->stopped &= ~DASD_STOPPED_DC_WAIT;
  2190. if (device->stopped & DASD_UNRESUMED_PM) {
  2191. device->stopped &= ~DASD_UNRESUMED_PM;
  2192. dasd_restore_device(device);
  2193. ret = 1;
  2194. break;
  2195. }
  2196. dasd_schedule_device_bh(device);
  2197. if (device->block)
  2198. dasd_schedule_block_bh(device->block);
  2199. ret = 1;
  2200. break;
  2201. }
  2202. dasd_put_device(device);
  2203. return ret;
  2204. }
  2205. int dasd_generic_pm_freeze(struct ccw_device *cdev)
  2206. {
  2207. struct dasd_ccw_req *cqr, *n;
  2208. int rc;
  2209. struct list_head freeze_queue;
  2210. struct dasd_device *device = dasd_device_from_cdev(cdev);
  2211. if (IS_ERR(device))
  2212. return PTR_ERR(device);
  2213. /* disallow new I/O */
  2214. device->stopped |= DASD_STOPPED_PM;
  2215. /* clear active requests */
  2216. INIT_LIST_HEAD(&freeze_queue);
  2217. spin_lock_irq(get_ccwdev_lock(cdev));
  2218. rc = 0;
  2219. list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
  2220. /* Check status and move request to flush_queue */
  2221. if (cqr->status == DASD_CQR_IN_IO) {
  2222. rc = device->discipline->term_IO(cqr);
  2223. if (rc) {
  2224. /* unable to terminate requeust */
  2225. dev_err(&device->cdev->dev,
  2226. "Unable to terminate request %p "
  2227. "on suspend\n", cqr);
  2228. spin_unlock_irq(get_ccwdev_lock(cdev));
  2229. dasd_put_device(device);
  2230. return rc;
  2231. }
  2232. }
  2233. list_move_tail(&cqr->devlist, &freeze_queue);
  2234. }
  2235. spin_unlock_irq(get_ccwdev_lock(cdev));
  2236. list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
  2237. wait_event(dasd_flush_wq,
  2238. (cqr->status != DASD_CQR_CLEAR_PENDING));
  2239. if (cqr->status == DASD_CQR_CLEARED)
  2240. cqr->status = DASD_CQR_QUEUED;
  2241. }
  2242. /* move freeze_queue to start of the ccw_queue */
  2243. spin_lock_irq(get_ccwdev_lock(cdev));
  2244. list_splice_tail(&freeze_queue, &device->ccw_queue);
  2245. spin_unlock_irq(get_ccwdev_lock(cdev));
  2246. if (device->discipline->freeze)
  2247. rc = device->discipline->freeze(device);
  2248. dasd_put_device(device);
  2249. return rc;
  2250. }
  2251. EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
  2252. int dasd_generic_restore_device(struct ccw_device *cdev)
  2253. {
  2254. struct dasd_device *device = dasd_device_from_cdev(cdev);
  2255. int rc = 0;
  2256. if (IS_ERR(device))
  2257. return PTR_ERR(device);
  2258. dasd_schedule_device_bh(device);
  2259. if (device->block)
  2260. dasd_schedule_block_bh(device->block);
  2261. if (device->discipline->restore)
  2262. rc = device->discipline->restore(device);
  2263. dasd_put_device(device);
  2264. return rc;
  2265. }
  2266. EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
  2267. static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
  2268. void *rdc_buffer,
  2269. int rdc_buffer_size,
  2270. char *magic)
  2271. {
  2272. struct dasd_ccw_req *cqr;
  2273. struct ccw1 *ccw;
  2274. cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
  2275. if (IS_ERR(cqr)) {
  2276. /* internal error 13 - Allocating the RDC request failed*/
  2277. dev_err(&device->cdev->dev,
  2278. "An error occurred in the DASD device driver, "
  2279. "reason=%s\n", "13");
  2280. return cqr;
  2281. }
  2282. ccw = cqr->cpaddr;
  2283. ccw->cmd_code = CCW_CMD_RDC;
  2284. ccw->cda = (__u32)(addr_t)rdc_buffer;
  2285. ccw->count = rdc_buffer_size;
  2286. cqr->startdev = device;
  2287. cqr->memdev = device;
  2288. cqr->expires = 10*HZ;
  2289. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  2290. cqr->retries = 2;
  2291. cqr->buildclk = get_clock();
  2292. cqr->status = DASD_CQR_FILLED;
  2293. return cqr;
  2294. }
  2295. int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
  2296. void *rdc_buffer, int rdc_buffer_size)
  2297. {
  2298. int ret;
  2299. struct dasd_ccw_req *cqr;
  2300. cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
  2301. magic);
  2302. if (IS_ERR(cqr))
  2303. return PTR_ERR(cqr);
  2304. ret = dasd_sleep_on(cqr);
  2305. dasd_sfree_request(cqr, cqr->memdev);
  2306. return ret;
  2307. }
  2308. EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
  2309. /*
  2310. * In command mode and transport mode we need to look for sense
  2311. * data in different places. The sense data itself is allways
  2312. * an array of 32 bytes, so we can unify the sense data access
  2313. * for both modes.
  2314. */
  2315. char *dasd_get_sense(struct irb *irb)
  2316. {
  2317. struct tsb *tsb = NULL;
  2318. char *sense = NULL;
  2319. if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
  2320. if (irb->scsw.tm.tcw)
  2321. tsb = tcw_get_tsb((struct tcw *)(unsigned long)
  2322. irb->scsw.tm.tcw);
  2323. if (tsb && tsb->length == 64 && tsb->flags)
  2324. switch (tsb->flags & 0x07) {
  2325. case 1: /* tsa_iostat */
  2326. sense = tsb->tsa.iostat.sense;
  2327. break;
  2328. case 2: /* tsa_ddpc */
  2329. sense = tsb->tsa.ddpc.sense;
  2330. break;
  2331. default:
  2332. /* currently we don't use interrogate data */
  2333. break;
  2334. }
  2335. } else if (irb->esw.esw0.erw.cons) {
  2336. sense = irb->ecw;
  2337. }
  2338. return sense;
  2339. }
  2340. EXPORT_SYMBOL_GPL(dasd_get_sense);
  2341. static int __init dasd_init(void)
  2342. {
  2343. int rc;
  2344. init_waitqueue_head(&dasd_init_waitq);
  2345. init_waitqueue_head(&dasd_flush_wq);
  2346. init_waitqueue_head(&generic_waitq);
  2347. /* register 'common' DASD debug area, used for all DBF_XXX calls */
  2348. dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
  2349. if (dasd_debug_area == NULL) {
  2350. rc = -ENOMEM;
  2351. goto failed;
  2352. }
  2353. debug_register_view(dasd_debug_area, &debug_sprintf_view);
  2354. debug_set_level(dasd_debug_area, DBF_WARNING);
  2355. DBF_EVENT(DBF_EMERG, "%s", "debug area created");
  2356. dasd_diag_discipline_pointer = NULL;
  2357. rc = dasd_devmap_init();
  2358. if (rc)
  2359. goto failed;
  2360. rc = dasd_gendisk_init();
  2361. if (rc)
  2362. goto failed;
  2363. rc = dasd_parse();
  2364. if (rc)
  2365. goto failed;
  2366. rc = dasd_eer_init();
  2367. if (rc)
  2368. goto failed;
  2369. #ifdef CONFIG_PROC_FS
  2370. rc = dasd_proc_init();
  2371. if (rc)
  2372. goto failed;
  2373. #endif
  2374. return 0;
  2375. failed:
  2376. pr_info("The DASD device driver could not be initialized\n");
  2377. dasd_exit();
  2378. return rc;
  2379. }
  2380. module_init(dasd_init);
  2381. module_exit(dasd_exit);
  2382. EXPORT_SYMBOL(dasd_debug_area);
  2383. EXPORT_SYMBOL(dasd_diag_discipline_pointer);
  2384. EXPORT_SYMBOL(dasd_add_request_head);
  2385. EXPORT_SYMBOL(dasd_add_request_tail);
  2386. EXPORT_SYMBOL(dasd_cancel_req);
  2387. EXPORT_SYMBOL(dasd_device_clear_timer);
  2388. EXPORT_SYMBOL(dasd_block_clear_timer);
  2389. EXPORT_SYMBOL(dasd_enable_device);
  2390. EXPORT_SYMBOL(dasd_int_handler);
  2391. EXPORT_SYMBOL(dasd_kfree_request);
  2392. EXPORT_SYMBOL(dasd_kick_device);
  2393. EXPORT_SYMBOL(dasd_kmalloc_request);
  2394. EXPORT_SYMBOL(dasd_schedule_device_bh);
  2395. EXPORT_SYMBOL(dasd_schedule_block_bh);
  2396. EXPORT_SYMBOL(dasd_set_target_state);
  2397. EXPORT_SYMBOL(dasd_device_set_timer);
  2398. EXPORT_SYMBOL(dasd_block_set_timer);
  2399. EXPORT_SYMBOL(dasd_sfree_request);
  2400. EXPORT_SYMBOL(dasd_sleep_on);
  2401. EXPORT_SYMBOL(dasd_sleep_on_immediatly);
  2402. EXPORT_SYMBOL(dasd_sleep_on_interruptible);
  2403. EXPORT_SYMBOL(dasd_smalloc_request);
  2404. EXPORT_SYMBOL(dasd_start_IO);
  2405. EXPORT_SYMBOL(dasd_term_IO);
  2406. EXPORT_SYMBOL_GPL(dasd_generic_probe);
  2407. EXPORT_SYMBOL_GPL(dasd_generic_remove);
  2408. EXPORT_SYMBOL_GPL(dasd_generic_notify);
  2409. EXPORT_SYMBOL_GPL(dasd_generic_set_online);
  2410. EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
  2411. EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
  2412. EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
  2413. EXPORT_SYMBOL_GPL(dasd_alloc_block);
  2414. EXPORT_SYMBOL_GPL(dasd_free_block);