dasd.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197
  1. /*
  2. * File...........: linux/drivers/s390/block/dasd.c
  3. * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  4. * Horst Hummel <Horst.Hummel@de.ibm.com>
  5. * Carsten Otte <Cotte@de.ibm.com>
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * Bugreports.to..: <Linux390@de.ibm.com>
  8. * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
  9. *
  10. */
  11. #include <linux/config.h>
  12. #include <linux/kmod.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/ctype.h>
  16. #include <linux/major.h>
  17. #include <linux/slab.h>
  18. #include <linux/buffer_head.h>
  19. #include <linux/hdreg.h>
  20. #include <linux/notifier.h>
  21. #include <asm/ccwdev.h>
  22. #include <asm/ebcdic.h>
  23. #include <asm/idals.h>
  24. #include <asm/todclk.h>
  25. /* This is ugly... */
  26. #define PRINTK_HEADER "dasd:"
  27. #include "dasd_int.h"
  28. /*
  29. * SECTION: Constant definitions to be used within this file
  30. */
  31. #define DASD_CHANQ_MAX_SIZE 4
  32. /*
  33. * SECTION: exported variables of dasd.c
  34. */
  35. debug_info_t *dasd_debug_area;
  36. struct dasd_discipline *dasd_diag_discipline_pointer;
  37. MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  38. MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  39. " Copyright 2000 IBM Corporation");
  40. MODULE_SUPPORTED_DEVICE("dasd");
  41. MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
  42. MODULE_LICENSE("GPL");
  43. /*
  44. * SECTION: prototypes for static functions of dasd.c
  45. */
  46. static int dasd_alloc_queue(struct dasd_device * device);
  47. static void dasd_setup_queue(struct dasd_device * device);
  48. static void dasd_free_queue(struct dasd_device * device);
  49. static void dasd_flush_request_queue(struct dasd_device *);
  50. static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  51. static void dasd_flush_ccw_queue(struct dasd_device *, int);
  52. static void dasd_tasklet(struct dasd_device *);
  53. static void do_kick_device(void *data);
  54. static void dasd_disable_eer(struct dasd_device *device);
  55. /*
  56. * SECTION: Operations on the device structure.
  57. */
  58. static wait_queue_head_t dasd_init_waitq;
  59. /*
  60. * Allocate memory for a new device structure.
  61. */
  62. struct dasd_device *
  63. dasd_alloc_device(void)
  64. {
  65. struct dasd_device *device;
  66. device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
  67. if (device == NULL)
  68. return ERR_PTR(-ENOMEM);
  69. memset(device, 0, sizeof (struct dasd_device));
  70. /* open_count = 0 means device online but not in use */
  71. atomic_set(&device->open_count, -1);
  72. /* Get two pages for normal block device operations. */
  73. device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
  74. if (device->ccw_mem == NULL) {
  75. kfree(device);
  76. return ERR_PTR(-ENOMEM);
  77. }
  78. /* Get one page for error recovery. */
  79. device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
  80. if (device->erp_mem == NULL) {
  81. free_pages((unsigned long) device->ccw_mem, 1);
  82. kfree(device);
  83. return ERR_PTR(-ENOMEM);
  84. }
  85. dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
  86. dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
  87. spin_lock_init(&device->mem_lock);
  88. spin_lock_init(&device->request_queue_lock);
  89. atomic_set (&device->tasklet_scheduled, 0);
  90. tasklet_init(&device->tasklet,
  91. (void (*)(unsigned long)) dasd_tasklet,
  92. (unsigned long) device);
  93. INIT_LIST_HEAD(&device->ccw_queue);
  94. init_timer(&device->timer);
  95. INIT_WORK(&device->kick_work, do_kick_device, device);
  96. device->state = DASD_STATE_NEW;
  97. device->target = DASD_STATE_NEW;
  98. return device;
  99. }
  100. /*
  101. * Free memory of a device structure.
  102. */
  103. void
  104. dasd_free_device(struct dasd_device *device)
  105. {
  106. kfree(device->private);
  107. free_page((unsigned long) device->erp_mem);
  108. free_pages((unsigned long) device->ccw_mem, 1);
  109. kfree(device);
  110. }
  111. /*
  112. * Make a new device known to the system.
  113. */
  114. static inline int
  115. dasd_state_new_to_known(struct dasd_device *device)
  116. {
  117. int rc;
  118. /*
  119. * As long as the device is not in state DASD_STATE_NEW we want to
  120. * keep the reference count > 0.
  121. */
  122. dasd_get_device(device);
  123. rc = dasd_alloc_queue(device);
  124. if (rc) {
  125. dasd_put_device(device);
  126. return rc;
  127. }
  128. device->state = DASD_STATE_KNOWN;
  129. return 0;
  130. }
  131. /*
  132. * Let the system forget about a device.
  133. */
  134. static inline void
  135. dasd_state_known_to_new(struct dasd_device * device)
  136. {
  137. /* disable extended error reporting for this device */
  138. dasd_disable_eer(device);
  139. /* Forget the discipline information. */
  140. device->discipline = NULL;
  141. device->state = DASD_STATE_NEW;
  142. dasd_free_queue(device);
  143. /* Give up reference we took in dasd_state_new_to_known. */
  144. dasd_put_device(device);
  145. }
  146. /*
  147. * Request the irq line for the device.
  148. */
  149. static inline int
  150. dasd_state_known_to_basic(struct dasd_device * device)
  151. {
  152. int rc;
  153. /* Allocate and register gendisk structure. */
  154. rc = dasd_gendisk_alloc(device);
  155. if (rc)
  156. return rc;
  157. /* register 'device' debug area, used for all DBF_DEV_XXX calls */
  158. device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
  159. 8 * sizeof (long));
  160. debug_register_view(device->debug_area, &debug_sprintf_view);
  161. debug_set_level(device->debug_area, DBF_EMERG);
  162. DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
  163. device->state = DASD_STATE_BASIC;
  164. return 0;
  165. }
  166. /*
  167. * Release the irq line for the device. Terminate any running i/o.
  168. */
  169. static inline void
  170. dasd_state_basic_to_known(struct dasd_device * device)
  171. {
  172. dasd_gendisk_free(device);
  173. dasd_flush_ccw_queue(device, 1);
  174. DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
  175. if (device->debug_area != NULL) {
  176. debug_unregister(device->debug_area);
  177. device->debug_area = NULL;
  178. }
  179. device->state = DASD_STATE_KNOWN;
  180. }
  181. /*
  182. * Do the initial analysis. The do_analysis function may return
  183. * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
  184. * until the discipline decides to continue the startup sequence
  185. * by calling the function dasd_change_state. The eckd disciplines
  186. * uses this to start a ccw that detects the format. The completion
  187. * interrupt for this detection ccw uses the kernel event daemon to
  188. * trigger the call to dasd_change_state. All this is done in the
  189. * discipline code, see dasd_eckd.c.
  190. * After the analysis ccw is done (do_analysis returned 0 or error)
  191. * the block device is setup. Either a fake disk is added to allow
  192. * formatting or a proper device request queue is created.
  193. */
  194. static inline int
  195. dasd_state_basic_to_ready(struct dasd_device * device)
  196. {
  197. int rc;
  198. rc = 0;
  199. if (device->discipline->do_analysis != NULL)
  200. rc = device->discipline->do_analysis(device);
  201. if (rc)
  202. return rc;
  203. dasd_setup_queue(device);
  204. device->state = DASD_STATE_READY;
  205. if (dasd_scan_partitions(device) != 0)
  206. device->state = DASD_STATE_BASIC;
  207. return 0;
  208. }
  209. /*
  210. * Remove device from block device layer. Destroy dirty buffers.
  211. * Forget format information. Check if the target level is basic
  212. * and if it is create fake disk for formatting.
  213. */
  214. static inline void
  215. dasd_state_ready_to_basic(struct dasd_device * device)
  216. {
  217. dasd_flush_ccw_queue(device, 0);
  218. dasd_destroy_partitions(device);
  219. dasd_flush_request_queue(device);
  220. device->blocks = 0;
  221. device->bp_block = 0;
  222. device->s2b_shift = 0;
  223. device->state = DASD_STATE_BASIC;
  224. }
  225. /*
  226. * Make the device online and schedule the bottom half to start
  227. * the requeueing of requests from the linux request queue to the
  228. * ccw queue.
  229. */
  230. static inline int
  231. dasd_state_ready_to_online(struct dasd_device * device)
  232. {
  233. device->state = DASD_STATE_ONLINE;
  234. dasd_schedule_bh(device);
  235. return 0;
  236. }
  237. /*
  238. * Stop the requeueing of requests again.
  239. */
  240. static inline void
  241. dasd_state_online_to_ready(struct dasd_device * device)
  242. {
  243. device->state = DASD_STATE_READY;
  244. }
  245. /*
  246. * Device startup state changes.
  247. */
  248. static inline int
  249. dasd_increase_state(struct dasd_device *device)
  250. {
  251. int rc;
  252. rc = 0;
  253. if (device->state == DASD_STATE_NEW &&
  254. device->target >= DASD_STATE_KNOWN)
  255. rc = dasd_state_new_to_known(device);
  256. if (!rc &&
  257. device->state == DASD_STATE_KNOWN &&
  258. device->target >= DASD_STATE_BASIC)
  259. rc = dasd_state_known_to_basic(device);
  260. if (!rc &&
  261. device->state == DASD_STATE_BASIC &&
  262. device->target >= DASD_STATE_READY)
  263. rc = dasd_state_basic_to_ready(device);
  264. if (!rc &&
  265. device->state == DASD_STATE_READY &&
  266. device->target >= DASD_STATE_ONLINE)
  267. rc = dasd_state_ready_to_online(device);
  268. return rc;
  269. }
  270. /*
  271. * Device shutdown state changes.
  272. */
  273. static inline int
  274. dasd_decrease_state(struct dasd_device *device)
  275. {
  276. if (device->state == DASD_STATE_ONLINE &&
  277. device->target <= DASD_STATE_READY)
  278. dasd_state_online_to_ready(device);
  279. if (device->state == DASD_STATE_READY &&
  280. device->target <= DASD_STATE_BASIC)
  281. dasd_state_ready_to_basic(device);
  282. if (device->state == DASD_STATE_BASIC &&
  283. device->target <= DASD_STATE_KNOWN)
  284. dasd_state_basic_to_known(device);
  285. if (device->state == DASD_STATE_KNOWN &&
  286. device->target <= DASD_STATE_NEW)
  287. dasd_state_known_to_new(device);
  288. return 0;
  289. }
  290. /*
  291. * This is the main startup/shutdown routine.
  292. */
  293. static void
  294. dasd_change_state(struct dasd_device *device)
  295. {
  296. int rc;
  297. if (device->state == device->target)
  298. /* Already where we want to go today... */
  299. return;
  300. if (device->state < device->target)
  301. rc = dasd_increase_state(device);
  302. else
  303. rc = dasd_decrease_state(device);
  304. if (rc && rc != -EAGAIN)
  305. device->target = device->state;
  306. if (device->state == device->target)
  307. wake_up(&dasd_init_waitq);
  308. }
  309. /*
  310. * Kick starter for devices that did not complete the startup/shutdown
  311. * procedure or were sleeping because of a pending state.
  312. * dasd_kick_device will schedule a call do do_kick_device to the kernel
  313. * event daemon.
  314. */
  315. static void
  316. do_kick_device(void *data)
  317. {
  318. struct dasd_device *device;
  319. device = (struct dasd_device *) data;
  320. dasd_change_state(device);
  321. dasd_schedule_bh(device);
  322. dasd_put_device(device);
  323. }
  324. void
  325. dasd_kick_device(struct dasd_device *device)
  326. {
  327. dasd_get_device(device);
  328. /* queue call to dasd_kick_device to the kernel event daemon. */
  329. schedule_work(&device->kick_work);
  330. }
  331. /*
  332. * Set the target state for a device and starts the state change.
  333. */
  334. void
  335. dasd_set_target_state(struct dasd_device *device, int target)
  336. {
  337. /* If we are in probeonly mode stop at DASD_STATE_READY. */
  338. if (dasd_probeonly && target > DASD_STATE_READY)
  339. target = DASD_STATE_READY;
  340. if (device->target != target) {
  341. if (device->state == target)
  342. wake_up(&dasd_init_waitq);
  343. device->target = target;
  344. }
  345. if (device->state != device->target)
  346. dasd_change_state(device);
  347. }
  348. /*
  349. * Enable devices with device numbers in [from..to].
  350. */
  351. static inline int
  352. _wait_for_device(struct dasd_device *device)
  353. {
  354. return (device->state == device->target);
  355. }
  356. void
  357. dasd_enable_device(struct dasd_device *device)
  358. {
  359. dasd_set_target_state(device, DASD_STATE_ONLINE);
  360. if (device->state <= DASD_STATE_KNOWN)
  361. /* No discipline for device found. */
  362. dasd_set_target_state(device, DASD_STATE_NEW);
  363. /* Now wait for the devices to come up. */
  364. wait_event(dasd_init_waitq, _wait_for_device(device));
  365. }
  366. /*
  367. * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
  368. */
  369. #ifdef CONFIG_DASD_PROFILE
  370. struct dasd_profile_info_t dasd_global_profile;
  371. unsigned int dasd_profile_level = DASD_PROFILE_OFF;
  372. /*
  373. * Increments counter in global and local profiling structures.
  374. */
  375. #define dasd_profile_counter(value, counter, device) \
  376. { \
  377. int index; \
  378. for (index = 0; index < 31 && value >> (2+index); index++); \
  379. dasd_global_profile.counter[index]++; \
  380. device->profile.counter[index]++; \
  381. }
  382. /*
  383. * Add profiling information for cqr before execution.
  384. */
  385. static inline void
  386. dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
  387. struct request *req)
  388. {
  389. struct list_head *l;
  390. unsigned int counter;
  391. if (dasd_profile_level != DASD_PROFILE_ON)
  392. return;
  393. /* count the length of the chanq for statistics */
  394. counter = 0;
  395. list_for_each(l, &device->ccw_queue)
  396. if (++counter >= 31)
  397. break;
  398. dasd_global_profile.dasd_io_nr_req[counter]++;
  399. device->profile.dasd_io_nr_req[counter]++;
  400. }
  401. /*
  402. * Add profiling information for cqr after execution.
  403. */
  404. static inline void
  405. dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
  406. struct request *req)
  407. {
  408. long strtime, irqtime, endtime, tottime; /* in microseconds */
  409. long tottimeps, sectors;
  410. if (dasd_profile_level != DASD_PROFILE_ON)
  411. return;
  412. sectors = req->nr_sectors;
  413. if (!cqr->buildclk || !cqr->startclk ||
  414. !cqr->stopclk || !cqr->endclk ||
  415. !sectors)
  416. return;
  417. strtime = ((cqr->startclk - cqr->buildclk) >> 12);
  418. irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
  419. endtime = ((cqr->endclk - cqr->stopclk) >> 12);
  420. tottime = ((cqr->endclk - cqr->buildclk) >> 12);
  421. tottimeps = tottime / sectors;
  422. if (!dasd_global_profile.dasd_io_reqs)
  423. memset(&dasd_global_profile, 0,
  424. sizeof (struct dasd_profile_info_t));
  425. dasd_global_profile.dasd_io_reqs++;
  426. dasd_global_profile.dasd_io_sects += sectors;
  427. if (!device->profile.dasd_io_reqs)
  428. memset(&device->profile, 0,
  429. sizeof (struct dasd_profile_info_t));
  430. device->profile.dasd_io_reqs++;
  431. device->profile.dasd_io_sects += sectors;
  432. dasd_profile_counter(sectors, dasd_io_secs, device);
  433. dasd_profile_counter(tottime, dasd_io_times, device);
  434. dasd_profile_counter(tottimeps, dasd_io_timps, device);
  435. dasd_profile_counter(strtime, dasd_io_time1, device);
  436. dasd_profile_counter(irqtime, dasd_io_time2, device);
  437. dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
  438. dasd_profile_counter(endtime, dasd_io_time3, device);
  439. }
  440. #else
  441. #define dasd_profile_start(device, cqr, req) do {} while (0)
  442. #define dasd_profile_end(device, cqr, req) do {} while (0)
  443. #endif /* CONFIG_DASD_PROFILE */
  444. /*
  445. * Allocate memory for a channel program with 'cplength' channel
  446. * command words and 'datasize' additional space. There are two
  447. * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
  448. * memory and 2) dasd_smalloc_request uses the static ccw memory
  449. * that gets allocated for each device.
  450. */
  451. struct dasd_ccw_req *
  452. dasd_kmalloc_request(char *magic, int cplength, int datasize,
  453. struct dasd_device * device)
  454. {
  455. struct dasd_ccw_req *cqr;
  456. /* Sanity checks */
  457. if ( magic == NULL || datasize > PAGE_SIZE ||
  458. (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
  459. BUG();
  460. cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
  461. if (cqr == NULL)
  462. return ERR_PTR(-ENOMEM);
  463. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  464. cqr->cpaddr = NULL;
  465. if (cplength > 0) {
  466. cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
  467. GFP_ATOMIC | GFP_DMA);
  468. if (cqr->cpaddr == NULL) {
  469. kfree(cqr);
  470. return ERR_PTR(-ENOMEM);
  471. }
  472. memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
  473. }
  474. cqr->data = NULL;
  475. if (datasize > 0) {
  476. cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
  477. if (cqr->data == NULL) {
  478. kfree(cqr->cpaddr);
  479. kfree(cqr);
  480. return ERR_PTR(-ENOMEM);
  481. }
  482. memset(cqr->data, 0, datasize);
  483. }
  484. strncpy((char *) &cqr->magic, magic, 4);
  485. ASCEBC((char *) &cqr->magic, 4);
  486. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  487. dasd_get_device(device);
  488. return cqr;
  489. }
  490. struct dasd_ccw_req *
  491. dasd_smalloc_request(char *magic, int cplength, int datasize,
  492. struct dasd_device * device)
  493. {
  494. unsigned long flags;
  495. struct dasd_ccw_req *cqr;
  496. char *data;
  497. int size;
  498. /* Sanity checks */
  499. if ( magic == NULL || datasize > PAGE_SIZE ||
  500. (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
  501. BUG();
  502. size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
  503. if (cplength > 0)
  504. size += cplength * sizeof(struct ccw1);
  505. if (datasize > 0)
  506. size += datasize;
  507. spin_lock_irqsave(&device->mem_lock, flags);
  508. cqr = (struct dasd_ccw_req *)
  509. dasd_alloc_chunk(&device->ccw_chunks, size);
  510. spin_unlock_irqrestore(&device->mem_lock, flags);
  511. if (cqr == NULL)
  512. return ERR_PTR(-ENOMEM);
  513. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  514. data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
  515. cqr->cpaddr = NULL;
  516. if (cplength > 0) {
  517. cqr->cpaddr = (struct ccw1 *) data;
  518. data += cplength*sizeof(struct ccw1);
  519. memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
  520. }
  521. cqr->data = NULL;
  522. if (datasize > 0) {
  523. cqr->data = data;
  524. memset(cqr->data, 0, datasize);
  525. }
  526. strncpy((char *) &cqr->magic, magic, 4);
  527. ASCEBC((char *) &cqr->magic, 4);
  528. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  529. dasd_get_device(device);
  530. return cqr;
  531. }
  532. /*
  533. * Free memory of a channel program. This function needs to free all the
  534. * idal lists that might have been created by dasd_set_cda and the
  535. * struct dasd_ccw_req itself.
  536. */
  537. void
  538. dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
  539. {
  540. #ifdef CONFIG_64BIT
  541. struct ccw1 *ccw;
  542. /* Clear any idals used for the request. */
  543. ccw = cqr->cpaddr;
  544. do {
  545. clear_normalized_cda(ccw);
  546. } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
  547. #endif
  548. kfree(cqr->cpaddr);
  549. kfree(cqr->data);
  550. kfree(cqr);
  551. dasd_put_device(device);
  552. }
  553. void
  554. dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
  555. {
  556. unsigned long flags;
  557. spin_lock_irqsave(&device->mem_lock, flags);
  558. dasd_free_chunk(&device->ccw_chunks, cqr);
  559. spin_unlock_irqrestore(&device->mem_lock, flags);
  560. dasd_put_device(device);
  561. }
  562. /*
  563. * Check discipline magic in cqr.
  564. */
  565. static inline int
  566. dasd_check_cqr(struct dasd_ccw_req *cqr)
  567. {
  568. struct dasd_device *device;
  569. if (cqr == NULL)
  570. return -EINVAL;
  571. device = cqr->device;
  572. if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
  573. DEV_MESSAGE(KERN_WARNING, device,
  574. " dasd_ccw_req 0x%08x magic doesn't match"
  575. " discipline 0x%08x",
  576. cqr->magic,
  577. *(unsigned int *) device->discipline->name);
  578. return -EINVAL;
  579. }
  580. return 0;
  581. }
  582. /*
  583. * Terminate the current i/o and set the request to clear_pending.
  584. * Timer keeps device runnig.
  585. * ccw_device_clear can fail if the i/o subsystem
  586. * is in a bad mood.
  587. */
  588. int
  589. dasd_term_IO(struct dasd_ccw_req * cqr)
  590. {
  591. struct dasd_device *device;
  592. int retries, rc;
  593. /* Check the cqr */
  594. rc = dasd_check_cqr(cqr);
  595. if (rc)
  596. return rc;
  597. retries = 0;
  598. device = (struct dasd_device *) cqr->device;
  599. while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
  600. rc = ccw_device_clear(device->cdev, (long) cqr);
  601. switch (rc) {
  602. case 0: /* termination successful */
  603. cqr->retries--;
  604. cqr->status = DASD_CQR_CLEAR;
  605. cqr->stopclk = get_clock();
  606. DBF_DEV_EVENT(DBF_DEBUG, device,
  607. "terminate cqr %p successful",
  608. cqr);
  609. break;
  610. case -ENODEV:
  611. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  612. "device gone, retry");
  613. break;
  614. case -EIO:
  615. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  616. "I/O error, retry");
  617. break;
  618. case -EINVAL:
  619. case -EBUSY:
  620. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  621. "device busy, retry later");
  622. break;
  623. default:
  624. DEV_MESSAGE(KERN_ERR, device,
  625. "line %d unknown RC=%d, please "
  626. "report to linux390@de.ibm.com",
  627. __LINE__, rc);
  628. BUG();
  629. break;
  630. }
  631. retries++;
  632. }
  633. dasd_schedule_bh(device);
  634. return rc;
  635. }
  636. /*
  637. * Start the i/o. This start_IO can fail if the channel is really busy.
  638. * In that case set up a timer to start the request later.
  639. */
  640. int
  641. dasd_start_IO(struct dasd_ccw_req * cqr)
  642. {
  643. struct dasd_device *device;
  644. int rc;
  645. /* Check the cqr */
  646. rc = dasd_check_cqr(cqr);
  647. if (rc)
  648. return rc;
  649. device = (struct dasd_device *) cqr->device;
  650. if (cqr->retries < 0) {
  651. DEV_MESSAGE(KERN_DEBUG, device,
  652. "start_IO: request %p (%02x/%i) - no retry left.",
  653. cqr, cqr->status, cqr->retries);
  654. cqr->status = DASD_CQR_FAILED;
  655. return -EIO;
  656. }
  657. cqr->startclk = get_clock();
  658. cqr->starttime = jiffies;
  659. cqr->retries--;
  660. rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
  661. cqr->lpm, 0);
  662. switch (rc) {
  663. case 0:
  664. cqr->status = DASD_CQR_IN_IO;
  665. DBF_DEV_EVENT(DBF_DEBUG, device,
  666. "start_IO: request %p started successful",
  667. cqr);
  668. break;
  669. case -EBUSY:
  670. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  671. "start_IO: device busy, retry later");
  672. break;
  673. case -ETIMEDOUT:
  674. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  675. "start_IO: request timeout, retry later");
  676. break;
  677. case -EACCES:
  678. /* -EACCES indicates that the request used only a
  679. * subset of the available pathes and all these
  680. * pathes are gone.
  681. * Do a retry with all available pathes.
  682. */
  683. cqr->lpm = LPM_ANYPATH;
  684. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  685. "start_IO: selected pathes gone,"
  686. " retry on all pathes");
  687. break;
  688. case -ENODEV:
  689. case -EIO:
  690. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  691. "start_IO: device gone, retry");
  692. break;
  693. default:
  694. DEV_MESSAGE(KERN_ERR, device,
  695. "line %d unknown RC=%d, please report"
  696. " to linux390@de.ibm.com", __LINE__, rc);
  697. BUG();
  698. break;
  699. }
  700. return rc;
  701. }
  702. /*
  703. * Timeout function for dasd devices. This is used for different purposes
  704. * 1) missing interrupt handler for normal operation
  705. * 2) delayed start of request where start_IO failed with -EBUSY
  706. * 3) timeout for missing state change interrupts
  707. * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
  708. * DASD_CQR_QUEUED for 2) and 3).
  709. */
  710. static void
  711. dasd_timeout_device(unsigned long ptr)
  712. {
  713. unsigned long flags;
  714. struct dasd_device *device;
  715. device = (struct dasd_device *) ptr;
  716. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  717. /* re-activate request queue */
  718. device->stopped &= ~DASD_STOPPED_PENDING;
  719. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  720. dasd_schedule_bh(device);
  721. }
  722. /*
  723. * Setup timeout for a device in jiffies.
  724. */
  725. void
  726. dasd_set_timer(struct dasd_device *device, int expires)
  727. {
  728. if (expires == 0) {
  729. if (timer_pending(&device->timer))
  730. del_timer(&device->timer);
  731. return;
  732. }
  733. if (timer_pending(&device->timer)) {
  734. if (mod_timer(&device->timer, jiffies + expires))
  735. return;
  736. }
  737. device->timer.function = dasd_timeout_device;
  738. device->timer.data = (unsigned long) device;
  739. device->timer.expires = jiffies + expires;
  740. add_timer(&device->timer);
  741. }
  742. /*
  743. * Clear timeout for a device.
  744. */
  745. void
  746. dasd_clear_timer(struct dasd_device *device)
  747. {
  748. if (timer_pending(&device->timer))
  749. del_timer(&device->timer);
  750. }
  751. static void
  752. dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
  753. {
  754. struct dasd_ccw_req *cqr;
  755. struct dasd_device *device;
  756. cqr = (struct dasd_ccw_req *) intparm;
  757. if (cqr->status != DASD_CQR_IN_IO) {
  758. MESSAGE(KERN_DEBUG,
  759. "invalid status in handle_killed_request: "
  760. "bus_id %s, status %02x",
  761. cdev->dev.bus_id, cqr->status);
  762. return;
  763. }
  764. device = (struct dasd_device *) cqr->device;
  765. if (device == NULL ||
  766. device != dasd_device_from_cdev(cdev) ||
  767. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  768. MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
  769. cdev->dev.bus_id);
  770. return;
  771. }
  772. /* Schedule request to be retried. */
  773. cqr->status = DASD_CQR_QUEUED;
  774. dasd_clear_timer(device);
  775. dasd_schedule_bh(device);
  776. dasd_put_device(device);
  777. }
  778. static void
  779. dasd_handle_state_change_pending(struct dasd_device *device)
  780. {
  781. struct dasd_ccw_req *cqr;
  782. struct list_head *l, *n;
  783. /* first of all call extended error reporting */
  784. dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL);
  785. device->stopped &= ~DASD_STOPPED_PENDING;
  786. /* restart all 'running' IO on queue */
  787. list_for_each_safe(l, n, &device->ccw_queue) {
  788. cqr = list_entry(l, struct dasd_ccw_req, list);
  789. if (cqr->status == DASD_CQR_IN_IO) {
  790. cqr->status = DASD_CQR_QUEUED;
  791. }
  792. }
  793. dasd_clear_timer(device);
  794. dasd_schedule_bh(device);
  795. }
  796. /*
  797. * Interrupt handler for "normal" ssch-io based dasd devices.
  798. */
  799. void
  800. dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
  801. struct irb *irb)
  802. {
  803. struct dasd_ccw_req *cqr, *next;
  804. struct dasd_device *device;
  805. unsigned long long now;
  806. int expires;
  807. dasd_era_t era;
  808. char mask;
  809. if (IS_ERR(irb)) {
  810. switch (PTR_ERR(irb)) {
  811. case -EIO:
  812. dasd_handle_killed_request(cdev, intparm);
  813. break;
  814. case -ETIMEDOUT:
  815. printk(KERN_WARNING"%s(%s): request timed out\n",
  816. __FUNCTION__, cdev->dev.bus_id);
  817. //FIXME - dasd uses own timeout interface...
  818. break;
  819. default:
  820. printk(KERN_WARNING"%s(%s): unknown error %ld\n",
  821. __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
  822. }
  823. return;
  824. }
  825. now = get_clock();
  826. DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
  827. cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
  828. (unsigned int) intparm);
  829. /* first of all check for state change pending interrupt */
  830. mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
  831. if ((irb->scsw.dstat & mask) == mask) {
  832. device = dasd_device_from_cdev(cdev);
  833. if (!IS_ERR(device)) {
  834. dasd_handle_state_change_pending(device);
  835. dasd_put_device(device);
  836. }
  837. return;
  838. }
  839. cqr = (struct dasd_ccw_req *) intparm;
  840. /* check for unsolicited interrupts */
  841. if (cqr == NULL) {
  842. MESSAGE(KERN_DEBUG,
  843. "unsolicited interrupt received: bus_id %s",
  844. cdev->dev.bus_id);
  845. return;
  846. }
  847. device = (struct dasd_device *) cqr->device;
  848. if (device == NULL ||
  849. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  850. MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
  851. cdev->dev.bus_id);
  852. return;
  853. }
  854. /* Check for clear pending */
  855. if (cqr->status == DASD_CQR_CLEAR &&
  856. irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
  857. cqr->status = DASD_CQR_QUEUED;
  858. dasd_clear_timer(device);
  859. dasd_schedule_bh(device);
  860. return;
  861. }
  862. /* check status - the request might have been killed by dyn detach */
  863. if (cqr->status != DASD_CQR_IN_IO) {
  864. MESSAGE(KERN_DEBUG,
  865. "invalid status: bus_id %s, status %02x",
  866. cdev->dev.bus_id, cqr->status);
  867. return;
  868. }
  869. DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
  870. ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
  871. /* Find out the appropriate era_action. */
  872. if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
  873. era = dasd_era_fatal;
  874. else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
  875. irb->scsw.cstat == 0 &&
  876. !irb->esw.esw0.erw.cons)
  877. era = dasd_era_none;
  878. else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
  879. era = dasd_era_fatal; /* don't recover this request */
  880. else if (irb->esw.esw0.erw.cons)
  881. era = device->discipline->examine_error(cqr, irb);
  882. else
  883. era = dasd_era_recover;
  884. DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
  885. expires = 0;
  886. if (era == dasd_era_none) {
  887. cqr->status = DASD_CQR_DONE;
  888. cqr->stopclk = now;
  889. /* Start first request on queue if possible -> fast_io. */
  890. if (cqr->list.next != &device->ccw_queue) {
  891. next = list_entry(cqr->list.next,
  892. struct dasd_ccw_req, list);
  893. if ((next->status == DASD_CQR_QUEUED) &&
  894. (!device->stopped)) {
  895. if (device->discipline->start_IO(next) == 0)
  896. expires = next->expires;
  897. else
  898. DEV_MESSAGE(KERN_DEBUG, device, "%s",
  899. "Interrupt fastpath "
  900. "failed!");
  901. }
  902. }
  903. } else { /* error */
  904. memcpy(&cqr->irb, irb, sizeof (struct irb));
  905. #ifdef ERP_DEBUG
  906. /* dump sense data */
  907. dasd_log_sense(cqr, irb);
  908. #endif
  909. switch (era) {
  910. case dasd_era_fatal:
  911. cqr->status = DASD_CQR_FAILED;
  912. cqr->stopclk = now;
  913. break;
  914. case dasd_era_recover:
  915. cqr->status = DASD_CQR_ERROR;
  916. break;
  917. default:
  918. BUG();
  919. }
  920. }
  921. if (expires != 0)
  922. dasd_set_timer(device, expires);
  923. else
  924. dasd_clear_timer(device);
  925. dasd_schedule_bh(device);
  926. }
  927. /*
  928. * posts the buffer_cache about a finalized request
  929. */
  930. static inline void
  931. dasd_end_request(struct request *req, int uptodate)
  932. {
  933. if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
  934. BUG();
  935. add_disk_randomness(req->rq_disk);
  936. end_that_request_last(req, uptodate);
  937. }
  938. /*
  939. * Process finished error recovery ccw.
  940. */
  941. static inline void
  942. __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
  943. {
  944. dasd_erp_fn_t erp_fn;
  945. if (cqr->status == DASD_CQR_DONE)
  946. DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
  947. else
  948. DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
  949. erp_fn = device->discipline->erp_postaction(cqr);
  950. erp_fn(cqr);
  951. }
  952. /*
  953. * Process ccw request queue.
  954. */
  955. static inline void
  956. __dasd_process_ccw_queue(struct dasd_device * device,
  957. struct list_head *final_queue)
  958. {
  959. struct list_head *l, *n;
  960. struct dasd_ccw_req *cqr;
  961. dasd_erp_fn_t erp_fn;
  962. restart:
  963. /* Process request with final status. */
  964. list_for_each_safe(l, n, &device->ccw_queue) {
  965. cqr = list_entry(l, struct dasd_ccw_req, list);
  966. /* Stop list processing at the first non-final request. */
  967. if (cqr->status != DASD_CQR_DONE &&
  968. cqr->status != DASD_CQR_FAILED &&
  969. cqr->status != DASD_CQR_ERROR)
  970. break;
  971. /* Process requests with DASD_CQR_ERROR */
  972. if (cqr->status == DASD_CQR_ERROR) {
  973. if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
  974. cqr->status = DASD_CQR_FAILED;
  975. cqr->stopclk = get_clock();
  976. } else {
  977. if (cqr->irb.esw.esw0.erw.cons) {
  978. erp_fn = device->discipline->
  979. erp_action(cqr);
  980. erp_fn(cqr);
  981. } else
  982. dasd_default_erp_action(cqr);
  983. }
  984. goto restart;
  985. }
  986. /* first of all call extended error reporting */
  987. if (device->eer && cqr->status == DASD_CQR_FAILED) {
  988. dasd_write_eer_trigger(DASD_EER_FATALERROR,
  989. device, cqr);
  990. /* restart request */
  991. cqr->status = DASD_CQR_QUEUED;
  992. cqr->retries = 255;
  993. device->stopped |= DASD_STOPPED_QUIESCE;
  994. goto restart;
  995. }
  996. /* Process finished ERP request. */
  997. if (cqr->refers) {
  998. __dasd_process_erp(device, cqr);
  999. goto restart;
  1000. }
  1001. /* Rechain finished requests to final queue */
  1002. cqr->endclk = get_clock();
  1003. list_move_tail(&cqr->list, final_queue);
  1004. }
  1005. }
  1006. static void
  1007. dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
  1008. {
  1009. struct request *req;
  1010. struct dasd_device *device;
  1011. int status;
  1012. req = (struct request *) data;
  1013. device = cqr->device;
  1014. dasd_profile_end(device, cqr, req);
  1015. status = cqr->device->discipline->free_cp(cqr,req);
  1016. spin_lock_irq(&device->request_queue_lock);
  1017. dasd_end_request(req, status);
  1018. spin_unlock_irq(&device->request_queue_lock);
  1019. }
  1020. /*
  1021. * Fetch requests from the block device queue.
  1022. */
  1023. static inline void
  1024. __dasd_process_blk_queue(struct dasd_device * device)
  1025. {
  1026. request_queue_t *queue;
  1027. struct request *req;
  1028. struct dasd_ccw_req *cqr;
  1029. int nr_queued;
  1030. queue = device->request_queue;
  1031. /* No queue ? Then there is nothing to do. */
  1032. if (queue == NULL)
  1033. return;
  1034. /*
  1035. * We requeue request from the block device queue to the ccw
  1036. * queue only in two states. In state DASD_STATE_READY the
  1037. * partition detection is done and we need to requeue requests
  1038. * for that. State DASD_STATE_ONLINE is normal block device
  1039. * operation.
  1040. */
  1041. if (device->state != DASD_STATE_READY &&
  1042. device->state != DASD_STATE_ONLINE)
  1043. return;
  1044. nr_queued = 0;
  1045. /* Now we try to fetch requests from the request queue */
  1046. list_for_each_entry(cqr, &device->ccw_queue, list)
  1047. if (cqr->status == DASD_CQR_QUEUED)
  1048. nr_queued++;
  1049. while (!blk_queue_plugged(queue) &&
  1050. elv_next_request(queue) &&
  1051. nr_queued < DASD_CHANQ_MAX_SIZE) {
  1052. req = elv_next_request(queue);
  1053. if (device->features & DASD_FEATURE_READONLY &&
  1054. rq_data_dir(req) == WRITE) {
  1055. DBF_DEV_EVENT(DBF_ERR, device,
  1056. "Rejecting write request %p",
  1057. req);
  1058. blkdev_dequeue_request(req);
  1059. dasd_end_request(req, 0);
  1060. continue;
  1061. }
  1062. if (device->stopped & DASD_STOPPED_DC_EIO) {
  1063. blkdev_dequeue_request(req);
  1064. dasd_end_request(req, 0);
  1065. continue;
  1066. }
  1067. cqr = device->discipline->build_cp(device, req);
  1068. if (IS_ERR(cqr)) {
  1069. if (PTR_ERR(cqr) == -ENOMEM)
  1070. break; /* terminate request queue loop */
  1071. DBF_DEV_EVENT(DBF_ERR, device,
  1072. "CCW creation failed (rc=%ld) "
  1073. "on request %p",
  1074. PTR_ERR(cqr), req);
  1075. blkdev_dequeue_request(req);
  1076. dasd_end_request(req, 0);
  1077. continue;
  1078. }
  1079. cqr->callback = dasd_end_request_cb;
  1080. cqr->callback_data = (void *) req;
  1081. cqr->status = DASD_CQR_QUEUED;
  1082. blkdev_dequeue_request(req);
  1083. list_add_tail(&cqr->list, &device->ccw_queue);
  1084. dasd_profile_start(device, cqr, req);
  1085. nr_queued++;
  1086. }
  1087. }
  1088. /*
  1089. * Take a look at the first request on the ccw queue and check
  1090. * if it reached its expire time. If so, terminate the IO.
  1091. */
  1092. static inline void
  1093. __dasd_check_expire(struct dasd_device * device)
  1094. {
  1095. struct dasd_ccw_req *cqr;
  1096. if (list_empty(&device->ccw_queue))
  1097. return;
  1098. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1099. if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
  1100. if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
  1101. if (device->discipline->term_IO(cqr) != 0)
  1102. /* Hmpf, try again in 1/10 sec */
  1103. dasd_set_timer(device, 10);
  1104. }
  1105. }
  1106. }
  1107. /*
  1108. * Take a look at the first request on the ccw queue and check
  1109. * if it needs to be started.
  1110. */
  1111. static inline void
  1112. __dasd_start_head(struct dasd_device * device)
  1113. {
  1114. struct dasd_ccw_req *cqr;
  1115. int rc;
  1116. if (list_empty(&device->ccw_queue))
  1117. return;
  1118. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1119. /* check FAILFAST */
  1120. if (device->stopped & ~DASD_STOPPED_PENDING &&
  1121. test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
  1122. (!device->eer)) {
  1123. cqr->status = DASD_CQR_FAILED;
  1124. dasd_schedule_bh(device);
  1125. }
  1126. if ((cqr->status == DASD_CQR_QUEUED) &&
  1127. (!device->stopped)) {
  1128. /* try to start the first I/O that can be started */
  1129. rc = device->discipline->start_IO(cqr);
  1130. if (rc == 0)
  1131. dasd_set_timer(device, cqr->expires);
  1132. else if (rc == -EACCES) {
  1133. dasd_schedule_bh(device);
  1134. } else
  1135. /* Hmpf, try again in 1/2 sec */
  1136. dasd_set_timer(device, 50);
  1137. }
  1138. }
  1139. /*
  1140. * Remove requests from the ccw queue.
  1141. */
  1142. static void
  1143. dasd_flush_ccw_queue(struct dasd_device * device, int all)
  1144. {
  1145. struct list_head flush_queue;
  1146. struct list_head *l, *n;
  1147. struct dasd_ccw_req *cqr;
  1148. INIT_LIST_HEAD(&flush_queue);
  1149. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1150. list_for_each_safe(l, n, &device->ccw_queue) {
  1151. cqr = list_entry(l, struct dasd_ccw_req, list);
  1152. /* Flush all request or only block device requests? */
  1153. if (all == 0 && cqr->callback == dasd_end_request_cb)
  1154. continue;
  1155. if (cqr->status == DASD_CQR_IN_IO)
  1156. device->discipline->term_IO(cqr);
  1157. if (cqr->status != DASD_CQR_DONE ||
  1158. cqr->status != DASD_CQR_FAILED) {
  1159. cqr->status = DASD_CQR_FAILED;
  1160. cqr->stopclk = get_clock();
  1161. }
  1162. /* Process finished ERP request. */
  1163. if (cqr->refers) {
  1164. __dasd_process_erp(device, cqr);
  1165. continue;
  1166. }
  1167. /* Rechain request on device request queue */
  1168. cqr->endclk = get_clock();
  1169. list_move_tail(&cqr->list, &flush_queue);
  1170. }
  1171. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1172. /* Now call the callback function of flushed requests */
  1173. list_for_each_safe(l, n, &flush_queue) {
  1174. cqr = list_entry(l, struct dasd_ccw_req, list);
  1175. if (cqr->callback != NULL)
  1176. (cqr->callback)(cqr, cqr->callback_data);
  1177. }
  1178. }
  1179. /*
  1180. * Acquire the device lock and process queues for the device.
  1181. */
  1182. static void
  1183. dasd_tasklet(struct dasd_device * device)
  1184. {
  1185. struct list_head final_queue;
  1186. struct list_head *l, *n;
  1187. struct dasd_ccw_req *cqr;
  1188. atomic_set (&device->tasklet_scheduled, 0);
  1189. INIT_LIST_HEAD(&final_queue);
  1190. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1191. /* Check expire time of first request on the ccw queue. */
  1192. __dasd_check_expire(device);
  1193. /* Finish off requests on ccw queue */
  1194. __dasd_process_ccw_queue(device, &final_queue);
  1195. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1196. /* Now call the callback function of requests with final status */
  1197. list_for_each_safe(l, n, &final_queue) {
  1198. cqr = list_entry(l, struct dasd_ccw_req, list);
  1199. list_del_init(&cqr->list);
  1200. if (cqr->callback != NULL)
  1201. (cqr->callback)(cqr, cqr->callback_data);
  1202. }
  1203. spin_lock_irq(&device->request_queue_lock);
  1204. spin_lock(get_ccwdev_lock(device->cdev));
  1205. /* Get new request from the block device request queue */
  1206. __dasd_process_blk_queue(device);
  1207. /* Now check if the head of the ccw queue needs to be started. */
  1208. __dasd_start_head(device);
  1209. spin_unlock(get_ccwdev_lock(device->cdev));
  1210. spin_unlock_irq(&device->request_queue_lock);
  1211. dasd_put_device(device);
  1212. }
  1213. /*
  1214. * Schedules a call to dasd_tasklet over the device tasklet.
  1215. */
  1216. void
  1217. dasd_schedule_bh(struct dasd_device * device)
  1218. {
  1219. /* Protect against rescheduling. */
  1220. if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
  1221. return;
  1222. dasd_get_device(device);
  1223. tasklet_hi_schedule(&device->tasklet);
  1224. }
  1225. /*
  1226. * Queue a request to the head of the ccw_queue. Start the I/O if
  1227. * possible.
  1228. */
  1229. void
  1230. dasd_add_request_head(struct dasd_ccw_req *req)
  1231. {
  1232. struct dasd_device *device;
  1233. unsigned long flags;
  1234. device = req->device;
  1235. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1236. req->status = DASD_CQR_QUEUED;
  1237. req->device = device;
  1238. list_add(&req->list, &device->ccw_queue);
  1239. /* let the bh start the request to keep them in order */
  1240. dasd_schedule_bh(device);
  1241. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1242. }
  1243. /*
  1244. * Queue a request to the tail of the ccw_queue. Start the I/O if
  1245. * possible.
  1246. */
  1247. void
  1248. dasd_add_request_tail(struct dasd_ccw_req *req)
  1249. {
  1250. struct dasd_device *device;
  1251. unsigned long flags;
  1252. device = req->device;
  1253. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1254. req->status = DASD_CQR_QUEUED;
  1255. req->device = device;
  1256. list_add_tail(&req->list, &device->ccw_queue);
  1257. /* let the bh start the request to keep them in order */
  1258. dasd_schedule_bh(device);
  1259. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1260. }
  1261. /*
  1262. * Wakeup callback.
  1263. */
  1264. static void
  1265. dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
  1266. {
  1267. wake_up((wait_queue_head_t *) data);
  1268. }
  1269. static inline int
  1270. _wait_for_wakeup(struct dasd_ccw_req *cqr)
  1271. {
  1272. struct dasd_device *device;
  1273. int rc;
  1274. device = cqr->device;
  1275. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1276. rc = ((cqr->status == DASD_CQR_DONE ||
  1277. cqr->status == DASD_CQR_FAILED) &&
  1278. list_empty(&cqr->list));
  1279. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1280. return rc;
  1281. }
  1282. /*
  1283. * Attempts to start a special ccw queue and waits for its completion.
  1284. */
  1285. int
  1286. dasd_sleep_on(struct dasd_ccw_req * cqr)
  1287. {
  1288. wait_queue_head_t wait_q;
  1289. struct dasd_device *device;
  1290. int rc;
  1291. device = cqr->device;
  1292. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1293. init_waitqueue_head (&wait_q);
  1294. cqr->callback = dasd_wakeup_cb;
  1295. cqr->callback_data = (void *) &wait_q;
  1296. cqr->status = DASD_CQR_QUEUED;
  1297. list_add_tail(&cqr->list, &device->ccw_queue);
  1298. /* let the bh start the request to keep them in order */
  1299. dasd_schedule_bh(device);
  1300. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1301. wait_event(wait_q, _wait_for_wakeup(cqr));
  1302. /* Request status is either done or failed. */
  1303. rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  1304. return rc;
  1305. }
  1306. /*
  1307. * Attempts to start a special ccw queue and wait interruptible
  1308. * for its completion.
  1309. */
  1310. int
  1311. dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
  1312. {
  1313. wait_queue_head_t wait_q;
  1314. struct dasd_device *device;
  1315. int rc, finished;
  1316. device = cqr->device;
  1317. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1318. init_waitqueue_head (&wait_q);
  1319. cqr->callback = dasd_wakeup_cb;
  1320. cqr->callback_data = (void *) &wait_q;
  1321. cqr->status = DASD_CQR_QUEUED;
  1322. list_add_tail(&cqr->list, &device->ccw_queue);
  1323. /* let the bh start the request to keep them in order */
  1324. dasd_schedule_bh(device);
  1325. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1326. finished = 0;
  1327. while (!finished) {
  1328. rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
  1329. if (rc != -ERESTARTSYS) {
  1330. /* Request is final (done or failed) */
  1331. rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
  1332. break;
  1333. }
  1334. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1335. switch (cqr->status) {
  1336. case DASD_CQR_IN_IO:
  1337. /* terminate runnig cqr */
  1338. if (device->discipline->term_IO) {
  1339. cqr->retries = -1;
  1340. device->discipline->term_IO(cqr);
  1341. /*nished =
  1342. * wait (non-interruptible) for final status
  1343. * because signal ist still pending
  1344. */
  1345. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1346. wait_event(wait_q, _wait_for_wakeup(cqr));
  1347. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1348. rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
  1349. finished = 1;
  1350. }
  1351. break;
  1352. case DASD_CQR_QUEUED:
  1353. /* request */
  1354. list_del_init(&cqr->list);
  1355. rc = -EIO;
  1356. finished = 1;
  1357. break;
  1358. default:
  1359. /* cqr with 'non-interruptable' status - just wait */
  1360. break;
  1361. }
  1362. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1363. }
  1364. return rc;
  1365. }
  1366. /*
  1367. * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
  1368. * for eckd devices) the currently running request has to be terminated
  1369. * and be put back to status queued, before the special request is added
  1370. * to the head of the queue. Then the special request is waited on normally.
  1371. */
  1372. static inline int
  1373. _dasd_term_running_cqr(struct dasd_device *device)
  1374. {
  1375. struct dasd_ccw_req *cqr;
  1376. int rc;
  1377. if (list_empty(&device->ccw_queue))
  1378. return 0;
  1379. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1380. rc = device->discipline->term_IO(cqr);
  1381. if (rc == 0) {
  1382. /* termination successful */
  1383. cqr->status = DASD_CQR_QUEUED;
  1384. cqr->startclk = cqr->stopclk = 0;
  1385. cqr->starttime = 0;
  1386. }
  1387. return rc;
  1388. }
  1389. int
  1390. dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
  1391. {
  1392. wait_queue_head_t wait_q;
  1393. struct dasd_device *device;
  1394. int rc;
  1395. device = cqr->device;
  1396. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1397. rc = _dasd_term_running_cqr(device);
  1398. if (rc) {
  1399. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1400. return rc;
  1401. }
  1402. init_waitqueue_head (&wait_q);
  1403. cqr->callback = dasd_wakeup_cb;
  1404. cqr->callback_data = (void *) &wait_q;
  1405. cqr->status = DASD_CQR_QUEUED;
  1406. list_add(&cqr->list, &device->ccw_queue);
  1407. /* let the bh start the request to keep them in order */
  1408. dasd_schedule_bh(device);
  1409. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1410. wait_event(wait_q, _wait_for_wakeup(cqr));
  1411. /* Request status is either done or failed. */
  1412. rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  1413. return rc;
  1414. }
  1415. /*
  1416. * Cancels a request that was started with dasd_sleep_on_req.
  1417. * This is useful to timeout requests. The request will be
  1418. * terminated if it is currently in i/o.
  1419. * Returns 1 if the request has been terminated.
  1420. */
  1421. int
  1422. dasd_cancel_req(struct dasd_ccw_req *cqr)
  1423. {
  1424. struct dasd_device *device = cqr->device;
  1425. unsigned long flags;
  1426. int rc;
  1427. rc = 0;
  1428. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1429. switch (cqr->status) {
  1430. case DASD_CQR_QUEUED:
  1431. /* request was not started - just set to failed */
  1432. cqr->status = DASD_CQR_FAILED;
  1433. break;
  1434. case DASD_CQR_IN_IO:
  1435. /* request in IO - terminate IO and release again */
  1436. if (device->discipline->term_IO(cqr) != 0)
  1437. /* what to do if unable to terminate ??????
  1438. e.g. not _IN_IO */
  1439. cqr->status = DASD_CQR_FAILED;
  1440. cqr->stopclk = get_clock();
  1441. rc = 1;
  1442. break;
  1443. case DASD_CQR_DONE:
  1444. case DASD_CQR_FAILED:
  1445. /* already finished - do nothing */
  1446. break;
  1447. default:
  1448. DEV_MESSAGE(KERN_ALERT, device,
  1449. "invalid status %02x in request",
  1450. cqr->status);
  1451. BUG();
  1452. }
  1453. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1454. dasd_schedule_bh(device);
  1455. return rc;
  1456. }
  1457. /*
  1458. * SECTION: Block device operations (request queue, partitions, open, release).
  1459. */
  1460. /*
  1461. * Dasd request queue function. Called from ll_rw_blk.c
  1462. */
  1463. static void
  1464. do_dasd_request(request_queue_t * queue)
  1465. {
  1466. struct dasd_device *device;
  1467. device = (struct dasd_device *) queue->queuedata;
  1468. spin_lock(get_ccwdev_lock(device->cdev));
  1469. /* Get new request from the block device request queue */
  1470. __dasd_process_blk_queue(device);
  1471. /* Now check if the head of the ccw queue needs to be started. */
  1472. __dasd_start_head(device);
  1473. spin_unlock(get_ccwdev_lock(device->cdev));
  1474. }
  1475. /*
  1476. * Allocate and initialize request queue and default I/O scheduler.
  1477. */
  1478. static int
  1479. dasd_alloc_queue(struct dasd_device * device)
  1480. {
  1481. int rc;
  1482. device->request_queue = blk_init_queue(do_dasd_request,
  1483. &device->request_queue_lock);
  1484. if (device->request_queue == NULL)
  1485. return -ENOMEM;
  1486. device->request_queue->queuedata = device;
  1487. elevator_exit(device->request_queue->elevator);
  1488. rc = elevator_init(device->request_queue, "deadline");
  1489. if (rc) {
  1490. blk_cleanup_queue(device->request_queue);
  1491. return rc;
  1492. }
  1493. return 0;
  1494. }
  1495. /*
  1496. * Allocate and initialize request queue.
  1497. */
  1498. static void
  1499. dasd_setup_queue(struct dasd_device * device)
  1500. {
  1501. int max;
  1502. blk_queue_hardsect_size(device->request_queue, device->bp_block);
  1503. max = device->discipline->max_blocks << device->s2b_shift;
  1504. blk_queue_max_sectors(device->request_queue, max);
  1505. blk_queue_max_phys_segments(device->request_queue, -1L);
  1506. blk_queue_max_hw_segments(device->request_queue, -1L);
  1507. blk_queue_max_segment_size(device->request_queue, -1L);
  1508. blk_queue_segment_boundary(device->request_queue, -1L);
  1509. blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
  1510. }
  1511. /*
  1512. * Deactivate and free request queue.
  1513. */
  1514. static void
  1515. dasd_free_queue(struct dasd_device * device)
  1516. {
  1517. if (device->request_queue) {
  1518. blk_cleanup_queue(device->request_queue);
  1519. device->request_queue = NULL;
  1520. }
  1521. }
  1522. /*
  1523. * Flush request on the request queue.
  1524. */
  1525. static void
  1526. dasd_flush_request_queue(struct dasd_device * device)
  1527. {
  1528. struct request *req;
  1529. if (!device->request_queue)
  1530. return;
  1531. spin_lock_irq(&device->request_queue_lock);
  1532. while (!list_empty(&device->request_queue->queue_head)) {
  1533. req = elv_next_request(device->request_queue);
  1534. if (req == NULL)
  1535. break;
  1536. dasd_end_request(req, 0);
  1537. blkdev_dequeue_request(req);
  1538. }
  1539. spin_unlock_irq(&device->request_queue_lock);
  1540. }
  1541. static int
  1542. dasd_open(struct inode *inp, struct file *filp)
  1543. {
  1544. struct gendisk *disk = inp->i_bdev->bd_disk;
  1545. struct dasd_device *device = disk->private_data;
  1546. int rc;
  1547. atomic_inc(&device->open_count);
  1548. if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1549. rc = -ENODEV;
  1550. goto unlock;
  1551. }
  1552. if (!try_module_get(device->discipline->owner)) {
  1553. rc = -EINVAL;
  1554. goto unlock;
  1555. }
  1556. if (dasd_probeonly) {
  1557. DEV_MESSAGE(KERN_INFO, device, "%s",
  1558. "No access to device due to probeonly mode");
  1559. rc = -EPERM;
  1560. goto out;
  1561. }
  1562. if (device->state < DASD_STATE_BASIC) {
  1563. DBF_DEV_EVENT(DBF_ERR, device, " %s",
  1564. " Cannot open unrecognized device");
  1565. rc = -ENODEV;
  1566. goto out;
  1567. }
  1568. return 0;
  1569. out:
  1570. module_put(device->discipline->owner);
  1571. unlock:
  1572. atomic_dec(&device->open_count);
  1573. return rc;
  1574. }
  1575. static int
  1576. dasd_release(struct inode *inp, struct file *filp)
  1577. {
  1578. struct gendisk *disk = inp->i_bdev->bd_disk;
  1579. struct dasd_device *device = disk->private_data;
  1580. atomic_dec(&device->open_count);
  1581. module_put(device->discipline->owner);
  1582. return 0;
  1583. }
  1584. /*
  1585. * Return disk geometry.
  1586. */
  1587. static int
  1588. dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  1589. {
  1590. struct dasd_device *device;
  1591. device = bdev->bd_disk->private_data;
  1592. if (!device)
  1593. return -ENODEV;
  1594. if (!device->discipline ||
  1595. !device->discipline->fill_geometry)
  1596. return -EINVAL;
  1597. device->discipline->fill_geometry(device, geo);
  1598. geo->start = get_start_sect(bdev) >> device->s2b_shift;
  1599. return 0;
  1600. }
  1601. struct block_device_operations
  1602. dasd_device_operations = {
  1603. .owner = THIS_MODULE,
  1604. .open = dasd_open,
  1605. .release = dasd_release,
  1606. .ioctl = dasd_ioctl,
  1607. .compat_ioctl = dasd_compat_ioctl,
  1608. .getgeo = dasd_getgeo,
  1609. };
  1610. static void
  1611. dasd_exit(void)
  1612. {
  1613. #ifdef CONFIG_PROC_FS
  1614. dasd_proc_exit();
  1615. #endif
  1616. dasd_ioctl_exit();
  1617. if (dasd_page_cache != NULL) {
  1618. kmem_cache_destroy(dasd_page_cache);
  1619. dasd_page_cache = NULL;
  1620. }
  1621. dasd_gendisk_exit();
  1622. dasd_devmap_exit();
  1623. devfs_remove("dasd");
  1624. if (dasd_debug_area != NULL) {
  1625. debug_unregister(dasd_debug_area);
  1626. dasd_debug_area = NULL;
  1627. }
  1628. }
  1629. /*
  1630. * SECTION: common functions for ccw_driver use
  1631. */
  1632. /*
  1633. * Initial attempt at a probe function. this can be simplified once
  1634. * the other detection code is gone.
  1635. */
  1636. int
  1637. dasd_generic_probe (struct ccw_device *cdev,
  1638. struct dasd_discipline *discipline)
  1639. {
  1640. int ret;
  1641. ret = dasd_add_sysfs_files(cdev);
  1642. if (ret) {
  1643. printk(KERN_WARNING
  1644. "dasd_generic_probe: could not add sysfs entries "
  1645. "for %s\n", cdev->dev.bus_id);
  1646. } else {
  1647. cdev->handler = &dasd_int_handler;
  1648. }
  1649. return ret;
  1650. }
  1651. /*
  1652. * This will one day be called from a global not_oper handler.
  1653. * It is also used by driver_unregister during module unload.
  1654. */
  1655. void
  1656. dasd_generic_remove (struct ccw_device *cdev)
  1657. {
  1658. struct dasd_device *device;
  1659. cdev->handler = NULL;
  1660. dasd_remove_sysfs_files(cdev);
  1661. device = dasd_device_from_cdev(cdev);
  1662. if (IS_ERR(device))
  1663. return;
  1664. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1665. /* Already doing offline processing */
  1666. dasd_put_device(device);
  1667. return;
  1668. }
  1669. /*
  1670. * This device is removed unconditionally. Set offline
  1671. * flag to prevent dasd_open from opening it while it is
  1672. * no quite down yet.
  1673. */
  1674. dasd_set_target_state(device, DASD_STATE_NEW);
  1675. /* dasd_delete_device destroys the device reference. */
  1676. dasd_delete_device(device);
  1677. }
  1678. /*
  1679. * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
  1680. * the device is detected for the first time and is supposed to be used
  1681. * or the user has started activation through sysfs.
  1682. */
  1683. int
  1684. dasd_generic_set_online (struct ccw_device *cdev,
  1685. struct dasd_discipline *discipline)
  1686. {
  1687. struct dasd_device *device;
  1688. int rc;
  1689. device = dasd_create_device(cdev);
  1690. if (IS_ERR(device))
  1691. return PTR_ERR(device);
  1692. if (device->features & DASD_FEATURE_USEDIAG) {
  1693. if (!dasd_diag_discipline_pointer) {
  1694. printk (KERN_WARNING
  1695. "dasd_generic couldn't online device %s "
  1696. "- discipline DIAG not available\n",
  1697. cdev->dev.bus_id);
  1698. dasd_delete_device(device);
  1699. return -ENODEV;
  1700. }
  1701. discipline = dasd_diag_discipline_pointer;
  1702. }
  1703. device->discipline = discipline;
  1704. rc = discipline->check_device(device);
  1705. if (rc) {
  1706. printk (KERN_WARNING
  1707. "dasd_generic couldn't online device %s "
  1708. "with discipline %s rc=%i\n",
  1709. cdev->dev.bus_id, discipline->name, rc);
  1710. dasd_delete_device(device);
  1711. return rc;
  1712. }
  1713. dasd_set_target_state(device, DASD_STATE_ONLINE);
  1714. if (device->state <= DASD_STATE_KNOWN) {
  1715. printk (KERN_WARNING
  1716. "dasd_generic discipline not found for %s\n",
  1717. cdev->dev.bus_id);
  1718. rc = -ENODEV;
  1719. dasd_set_target_state(device, DASD_STATE_NEW);
  1720. dasd_delete_device(device);
  1721. } else
  1722. pr_debug("dasd_generic device %s found\n",
  1723. cdev->dev.bus_id);
  1724. /* FIXME: we have to wait for the root device but we don't want
  1725. * to wait for each single device but for all at once. */
  1726. wait_event(dasd_init_waitq, _wait_for_device(device));
  1727. dasd_put_device(device);
  1728. return rc;
  1729. }
  1730. int
  1731. dasd_generic_set_offline (struct ccw_device *cdev)
  1732. {
  1733. struct dasd_device *device;
  1734. int max_count;
  1735. device = dasd_device_from_cdev(cdev);
  1736. if (IS_ERR(device))
  1737. return PTR_ERR(device);
  1738. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1739. /* Already doing offline processing */
  1740. dasd_put_device(device);
  1741. return 0;
  1742. }
  1743. /*
  1744. * We must make sure that this device is currently not in use.
  1745. * The open_count is increased for every opener, that includes
  1746. * the blkdev_get in dasd_scan_partitions. We are only interested
  1747. * in the other openers.
  1748. */
  1749. max_count = device->bdev ? 0 : -1;
  1750. if (atomic_read(&device->open_count) > max_count) {
  1751. printk (KERN_WARNING "Can't offline dasd device with open"
  1752. " count = %i.\n",
  1753. atomic_read(&device->open_count));
  1754. clear_bit(DASD_FLAG_OFFLINE, &device->flags);
  1755. dasd_put_device(device);
  1756. return -EBUSY;
  1757. }
  1758. dasd_set_target_state(device, DASD_STATE_NEW);
  1759. /* dasd_delete_device destroys the device reference. */
  1760. dasd_delete_device(device);
  1761. return 0;
  1762. }
  1763. int
  1764. dasd_generic_notify(struct ccw_device *cdev, int event)
  1765. {
  1766. struct dasd_device *device;
  1767. struct dasd_ccw_req *cqr;
  1768. unsigned long flags;
  1769. int ret;
  1770. device = dasd_device_from_cdev(cdev);
  1771. if (IS_ERR(device))
  1772. return 0;
  1773. spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
  1774. ret = 0;
  1775. switch (event) {
  1776. case CIO_GONE:
  1777. case CIO_NO_PATH:
  1778. /* first of all call extended error reporting */
  1779. dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL);
  1780. if (device->state < DASD_STATE_BASIC)
  1781. break;
  1782. /* Device is active. We want to keep it. */
  1783. if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
  1784. list_for_each_entry(cqr, &device->ccw_queue, list)
  1785. if (cqr->status == DASD_CQR_IN_IO)
  1786. cqr->status = DASD_CQR_FAILED;
  1787. device->stopped |= DASD_STOPPED_DC_EIO;
  1788. } else {
  1789. list_for_each_entry(cqr, &device->ccw_queue, list)
  1790. if (cqr->status == DASD_CQR_IN_IO) {
  1791. cqr->status = DASD_CQR_QUEUED;
  1792. cqr->retries++;
  1793. }
  1794. device->stopped |= DASD_STOPPED_DC_WAIT;
  1795. dasd_set_timer(device, 0);
  1796. }
  1797. dasd_schedule_bh(device);
  1798. ret = 1;
  1799. break;
  1800. case CIO_OPER:
  1801. /* FIXME: add a sanity check. */
  1802. device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
  1803. dasd_schedule_bh(device);
  1804. ret = 1;
  1805. break;
  1806. }
  1807. spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
  1808. dasd_put_device(device);
  1809. return ret;
  1810. }
  1811. /*
  1812. * Automatically online either all dasd devices (dasd_autodetect) or
  1813. * all devices specified with dasd= parameters.
  1814. */
  1815. static int
  1816. __dasd_auto_online(struct device *dev, void *data)
  1817. {
  1818. struct ccw_device *cdev;
  1819. cdev = to_ccwdev(dev);
  1820. if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
  1821. ccw_device_set_online(cdev);
  1822. return 0;
  1823. }
  1824. void
  1825. dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
  1826. {
  1827. struct device_driver *drv;
  1828. drv = get_driver(&dasd_discipline_driver->driver);
  1829. driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
  1830. put_driver(drv);
  1831. }
  1832. /*
  1833. * notifications for extended error reports
  1834. */
  1835. static struct notifier_block *dasd_eer_chain;
  1836. int
  1837. dasd_register_eer_notifier(struct notifier_block *nb)
  1838. {
  1839. return notifier_chain_register(&dasd_eer_chain, nb);
  1840. }
  1841. int
  1842. dasd_unregister_eer_notifier(struct notifier_block *nb)
  1843. {
  1844. return notifier_chain_unregister(&dasd_eer_chain, nb);
  1845. }
  1846. /*
  1847. * Notify the registered error reporting module of a problem
  1848. */
  1849. void
  1850. dasd_write_eer_trigger(unsigned int id, struct dasd_device *device,
  1851. struct dasd_ccw_req *cqr)
  1852. {
  1853. if (device->eer) {
  1854. struct dasd_eer_trigger temp;
  1855. temp.id = id;
  1856. temp.device = device;
  1857. temp.cqr = cqr;
  1858. notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER,
  1859. (void *)&temp);
  1860. }
  1861. }
  1862. /*
  1863. * Tell the registered error reporting module to disable error reporting for
  1864. * a given device and to cleanup any private data structures on that device.
  1865. */
  1866. static void
  1867. dasd_disable_eer(struct dasd_device *device)
  1868. {
  1869. notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device);
  1870. }
  1871. static int __init
  1872. dasd_init(void)
  1873. {
  1874. int rc;
  1875. init_waitqueue_head(&dasd_init_waitq);
  1876. /* register 'common' DASD debug area, used for all DBF_XXX calls */
  1877. dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
  1878. if (dasd_debug_area == NULL) {
  1879. rc = -ENOMEM;
  1880. goto failed;
  1881. }
  1882. debug_register_view(dasd_debug_area, &debug_sprintf_view);
  1883. debug_set_level(dasd_debug_area, DBF_EMERG);
  1884. DBF_EVENT(DBF_EMERG, "%s", "debug area created");
  1885. dasd_diag_discipline_pointer = NULL;
  1886. rc = devfs_mk_dir("dasd");
  1887. if (rc)
  1888. goto failed;
  1889. rc = dasd_devmap_init();
  1890. if (rc)
  1891. goto failed;
  1892. rc = dasd_gendisk_init();
  1893. if (rc)
  1894. goto failed;
  1895. rc = dasd_parse();
  1896. if (rc)
  1897. goto failed;
  1898. rc = dasd_ioctl_init();
  1899. if (rc)
  1900. goto failed;
  1901. #ifdef CONFIG_PROC_FS
  1902. rc = dasd_proc_init();
  1903. if (rc)
  1904. goto failed;
  1905. #endif
  1906. return 0;
  1907. failed:
  1908. MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
  1909. dasd_exit();
  1910. return rc;
  1911. }
  1912. module_init(dasd_init);
  1913. module_exit(dasd_exit);
  1914. EXPORT_SYMBOL(dasd_debug_area);
  1915. EXPORT_SYMBOL(dasd_diag_discipline_pointer);
  1916. EXPORT_SYMBOL(dasd_add_request_head);
  1917. EXPORT_SYMBOL(dasd_add_request_tail);
  1918. EXPORT_SYMBOL(dasd_cancel_req);
  1919. EXPORT_SYMBOL(dasd_clear_timer);
  1920. EXPORT_SYMBOL(dasd_enable_device);
  1921. EXPORT_SYMBOL(dasd_int_handler);
  1922. EXPORT_SYMBOL(dasd_kfree_request);
  1923. EXPORT_SYMBOL(dasd_kick_device);
  1924. EXPORT_SYMBOL(dasd_kmalloc_request);
  1925. EXPORT_SYMBOL(dasd_schedule_bh);
  1926. EXPORT_SYMBOL(dasd_set_target_state);
  1927. EXPORT_SYMBOL(dasd_set_timer);
  1928. EXPORT_SYMBOL(dasd_sfree_request);
  1929. EXPORT_SYMBOL(dasd_sleep_on);
  1930. EXPORT_SYMBOL(dasd_sleep_on_immediatly);
  1931. EXPORT_SYMBOL(dasd_sleep_on_interruptible);
  1932. EXPORT_SYMBOL(dasd_smalloc_request);
  1933. EXPORT_SYMBOL(dasd_start_IO);
  1934. EXPORT_SYMBOL(dasd_term_IO);
  1935. EXPORT_SYMBOL_GPL(dasd_generic_probe);
  1936. EXPORT_SYMBOL_GPL(dasd_generic_remove);
  1937. EXPORT_SYMBOL_GPL(dasd_generic_notify);
  1938. EXPORT_SYMBOL_GPL(dasd_generic_set_online);
  1939. EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
  1940. EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
  1941. EXPORT_SYMBOL(dasd_register_eer_notifier);
  1942. EXPORT_SYMBOL(dasd_unregister_eer_notifier);
  1943. EXPORT_SYMBOL(dasd_write_eer_trigger);
  1944. /*
  1945. * Overrides for Emacs so that we follow Linus's tabbing style.
  1946. * Emacs will notice this stuff at the end of the file and automatically
  1947. * adjust the settings for this buffer only. This must remain at the end
  1948. * of the file.
  1949. * ---------------------------------------------------------------------------
  1950. * Local variables:
  1951. * c-indent-level: 4
  1952. * c-brace-imaginary-offset: 0
  1953. * c-brace-offset: -4
  1954. * c-argdecl-indent: 4
  1955. * c-label-offset: -4
  1956. * c-continued-statement-offset: 4
  1957. * c-continued-brace-offset: 0
  1958. * indent-tabs-mode: 1
  1959. * tab-width: 8
  1960. * End:
  1961. */