dasd.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164
  1. /*
  2. * File...........: linux/drivers/s390/block/dasd.c
  3. * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  4. * Horst Hummel <Horst.Hummel@de.ibm.com>
  5. * Carsten Otte <Cotte@de.ibm.com>
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * Bugreports.to..: <Linux390@de.ibm.com>
  8. * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
  9. *
  10. */
  11. #include <linux/kmod.h>
  12. #include <linux/init.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/ctype.h>
  15. #include <linux/major.h>
  16. #include <linux/slab.h>
  17. #include <linux/buffer_head.h>
  18. #include <linux/hdreg.h>
  19. #include <asm/ccwdev.h>
  20. #include <asm/ebcdic.h>
  21. #include <asm/idals.h>
  22. #include <asm/todclk.h>
  23. /* This is ugly... */
  24. #define PRINTK_HEADER "dasd:"
  25. #include "dasd_int.h"
  26. /*
  27. * SECTION: Constant definitions to be used within this file
  28. */
  29. #define DASD_CHANQ_MAX_SIZE 4
  30. /*
  31. * SECTION: exported variables of dasd.c
  32. */
  33. debug_info_t *dasd_debug_area;
  34. struct dasd_discipline *dasd_diag_discipline_pointer;
  35. MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  36. MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  37. " Copyright 2000 IBM Corporation");
  38. MODULE_SUPPORTED_DEVICE("dasd");
  39. MODULE_LICENSE("GPL");
  40. /*
  41. * SECTION: prototypes for static functions of dasd.c
  42. */
  43. static int dasd_alloc_queue(struct dasd_device * device);
  44. static void dasd_setup_queue(struct dasd_device * device);
  45. static void dasd_free_queue(struct dasd_device * device);
  46. static void dasd_flush_request_queue(struct dasd_device *);
  47. static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  48. static void dasd_flush_ccw_queue(struct dasd_device *, int);
  49. static void dasd_tasklet(struct dasd_device *);
  50. static void do_kick_device(void *data);
  51. /*
  52. * SECTION: Operations on the device structure.
  53. */
  54. static wait_queue_head_t dasd_init_waitq;
  55. /*
  56. * Allocate memory for a new device structure.
  57. */
  58. struct dasd_device *
  59. dasd_alloc_device(void)
  60. {
  61. struct dasd_device *device;
  62. device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC);
  63. if (device == NULL)
  64. return ERR_PTR(-ENOMEM);
  65. /* open_count = 0 means device online but not in use */
  66. atomic_set(&device->open_count, -1);
  67. /* Get two pages for normal block device operations. */
  68. device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
  69. if (device->ccw_mem == NULL) {
  70. kfree(device);
  71. return ERR_PTR(-ENOMEM);
  72. }
  73. /* Get one page for error recovery. */
  74. device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
  75. if (device->erp_mem == NULL) {
  76. free_pages((unsigned long) device->ccw_mem, 1);
  77. kfree(device);
  78. return ERR_PTR(-ENOMEM);
  79. }
  80. dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
  81. dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
  82. spin_lock_init(&device->mem_lock);
  83. spin_lock_init(&device->request_queue_lock);
  84. atomic_set (&device->tasklet_scheduled, 0);
  85. tasklet_init(&device->tasklet,
  86. (void (*)(unsigned long)) dasd_tasklet,
  87. (unsigned long) device);
  88. INIT_LIST_HEAD(&device->ccw_queue);
  89. init_timer(&device->timer);
  90. INIT_WORK(&device->kick_work, do_kick_device, device);
  91. device->state = DASD_STATE_NEW;
  92. device->target = DASD_STATE_NEW;
  93. return device;
  94. }
  95. /*
  96. * Free memory of a device structure.
  97. */
  98. void
  99. dasd_free_device(struct dasd_device *device)
  100. {
  101. kfree(device->private);
  102. free_page((unsigned long) device->erp_mem);
  103. free_pages((unsigned long) device->ccw_mem, 1);
  104. kfree(device);
  105. }
  106. /*
  107. * Make a new device known to the system.
  108. */
  109. static inline int
  110. dasd_state_new_to_known(struct dasd_device *device)
  111. {
  112. int rc;
  113. /*
  114. * As long as the device is not in state DASD_STATE_NEW we want to
  115. * keep the reference count > 0.
  116. */
  117. dasd_get_device(device);
  118. rc = dasd_alloc_queue(device);
  119. if (rc) {
  120. dasd_put_device(device);
  121. return rc;
  122. }
  123. device->state = DASD_STATE_KNOWN;
  124. return 0;
  125. }
  126. /*
  127. * Let the system forget about a device.
  128. */
  129. static inline void
  130. dasd_state_known_to_new(struct dasd_device * device)
  131. {
  132. /* Disable extended error reporting for this device. */
  133. dasd_eer_disable(device);
  134. /* Forget the discipline information. */
  135. if (device->discipline)
  136. module_put(device->discipline->owner);
  137. device->discipline = NULL;
  138. if (device->base_discipline)
  139. module_put(device->base_discipline->owner);
  140. device->base_discipline = NULL;
  141. device->state = DASD_STATE_NEW;
  142. dasd_free_queue(device);
  143. /* Give up reference we took in dasd_state_new_to_known. */
  144. dasd_put_device(device);
  145. }
  146. /*
  147. * Request the irq line for the device.
  148. */
  149. static inline int
  150. dasd_state_known_to_basic(struct dasd_device * device)
  151. {
  152. int rc;
  153. /* Allocate and register gendisk structure. */
  154. rc = dasd_gendisk_alloc(device);
  155. if (rc)
  156. return rc;
  157. /* register 'device' debug area, used for all DBF_DEV_XXX calls */
  158. device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
  159. 8 * sizeof (long));
  160. debug_register_view(device->debug_area, &debug_sprintf_view);
  161. debug_set_level(device->debug_area, DBF_EMERG);
  162. DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
  163. device->state = DASD_STATE_BASIC;
  164. return 0;
  165. }
  166. /*
  167. * Release the irq line for the device. Terminate any running i/o.
  168. */
  169. static inline void
  170. dasd_state_basic_to_known(struct dasd_device * device)
  171. {
  172. dasd_gendisk_free(device);
  173. dasd_flush_ccw_queue(device, 1);
  174. DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
  175. if (device->debug_area != NULL) {
  176. debug_unregister(device->debug_area);
  177. device->debug_area = NULL;
  178. }
  179. device->state = DASD_STATE_KNOWN;
  180. }
  181. /*
  182. * Do the initial analysis. The do_analysis function may return
  183. * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
  184. * until the discipline decides to continue the startup sequence
  185. * by calling the function dasd_change_state. The eckd disciplines
  186. * uses this to start a ccw that detects the format. The completion
  187. * interrupt for this detection ccw uses the kernel event daemon to
  188. * trigger the call to dasd_change_state. All this is done in the
  189. * discipline code, see dasd_eckd.c.
  190. * After the analysis ccw is done (do_analysis returned 0) the block
  191. * device is setup.
  192. * In case the analysis returns an error, the device setup is stopped
  193. * (a fake disk was already added to allow formatting).
  194. */
  195. static inline int
  196. dasd_state_basic_to_ready(struct dasd_device * device)
  197. {
  198. int rc;
  199. rc = 0;
  200. if (device->discipline->do_analysis != NULL)
  201. rc = device->discipline->do_analysis(device);
  202. if (rc) {
  203. if (rc != -EAGAIN)
  204. device->state = DASD_STATE_UNFMT;
  205. return rc;
  206. }
  207. /* make disk known with correct capacity */
  208. dasd_setup_queue(device);
  209. set_capacity(device->gdp, device->blocks << device->s2b_shift);
  210. device->state = DASD_STATE_READY;
  211. rc = dasd_scan_partitions(device);
  212. if (rc)
  213. device->state = DASD_STATE_BASIC;
  214. return rc;
  215. }
  216. /*
  217. * Remove device from block device layer. Destroy dirty buffers.
  218. * Forget format information. Check if the target level is basic
  219. * and if it is create fake disk for formatting.
  220. */
  221. static inline void
  222. dasd_state_ready_to_basic(struct dasd_device * device)
  223. {
  224. dasd_flush_ccw_queue(device, 0);
  225. dasd_destroy_partitions(device);
  226. dasd_flush_request_queue(device);
  227. device->blocks = 0;
  228. device->bp_block = 0;
  229. device->s2b_shift = 0;
  230. device->state = DASD_STATE_BASIC;
  231. }
  232. /*
  233. * Back to basic.
  234. */
  235. static inline void
  236. dasd_state_unfmt_to_basic(struct dasd_device * device)
  237. {
  238. device->state = DASD_STATE_BASIC;
  239. }
  240. /*
  241. * Make the device online and schedule the bottom half to start
  242. * the requeueing of requests from the linux request queue to the
  243. * ccw queue.
  244. */
  245. static inline int
  246. dasd_state_ready_to_online(struct dasd_device * device)
  247. {
  248. device->state = DASD_STATE_ONLINE;
  249. dasd_schedule_bh(device);
  250. return 0;
  251. }
  252. /*
  253. * Stop the requeueing of requests again.
  254. */
  255. static inline void
  256. dasd_state_online_to_ready(struct dasd_device * device)
  257. {
  258. device->state = DASD_STATE_READY;
  259. }
  260. /*
  261. * Device startup state changes.
  262. */
  263. static inline int
  264. dasd_increase_state(struct dasd_device *device)
  265. {
  266. int rc;
  267. rc = 0;
  268. if (device->state == DASD_STATE_NEW &&
  269. device->target >= DASD_STATE_KNOWN)
  270. rc = dasd_state_new_to_known(device);
  271. if (!rc &&
  272. device->state == DASD_STATE_KNOWN &&
  273. device->target >= DASD_STATE_BASIC)
  274. rc = dasd_state_known_to_basic(device);
  275. if (!rc &&
  276. device->state == DASD_STATE_BASIC &&
  277. device->target >= DASD_STATE_READY)
  278. rc = dasd_state_basic_to_ready(device);
  279. if (!rc &&
  280. device->state == DASD_STATE_UNFMT &&
  281. device->target > DASD_STATE_UNFMT)
  282. rc = -EPERM;
  283. if (!rc &&
  284. device->state == DASD_STATE_READY &&
  285. device->target >= DASD_STATE_ONLINE)
  286. rc = dasd_state_ready_to_online(device);
  287. return rc;
  288. }
  289. /*
  290. * Device shutdown state changes.
  291. */
  292. static inline int
  293. dasd_decrease_state(struct dasd_device *device)
  294. {
  295. if (device->state == DASD_STATE_ONLINE &&
  296. device->target <= DASD_STATE_READY)
  297. dasd_state_online_to_ready(device);
  298. if (device->state == DASD_STATE_READY &&
  299. device->target <= DASD_STATE_BASIC)
  300. dasd_state_ready_to_basic(device);
  301. if (device->state == DASD_STATE_UNFMT &&
  302. device->target <= DASD_STATE_BASIC)
  303. dasd_state_unfmt_to_basic(device);
  304. if (device->state == DASD_STATE_BASIC &&
  305. device->target <= DASD_STATE_KNOWN)
  306. dasd_state_basic_to_known(device);
  307. if (device->state == DASD_STATE_KNOWN &&
  308. device->target <= DASD_STATE_NEW)
  309. dasd_state_known_to_new(device);
  310. return 0;
  311. }
  312. /*
  313. * This is the main startup/shutdown routine.
  314. */
  315. static void
  316. dasd_change_state(struct dasd_device *device)
  317. {
  318. int rc;
  319. if (device->state == device->target)
  320. /* Already where we want to go today... */
  321. return;
  322. if (device->state < device->target)
  323. rc = dasd_increase_state(device);
  324. else
  325. rc = dasd_decrease_state(device);
  326. if (rc && rc != -EAGAIN)
  327. device->target = device->state;
  328. if (device->state == device->target)
  329. wake_up(&dasd_init_waitq);
  330. }
  331. /*
  332. * Kick starter for devices that did not complete the startup/shutdown
  333. * procedure or were sleeping because of a pending state.
  334. * dasd_kick_device will schedule a call do do_kick_device to the kernel
  335. * event daemon.
  336. */
  337. static void
  338. do_kick_device(void *data)
  339. {
  340. struct dasd_device *device;
  341. device = (struct dasd_device *) data;
  342. dasd_change_state(device);
  343. dasd_schedule_bh(device);
  344. dasd_put_device(device);
  345. }
  346. void
  347. dasd_kick_device(struct dasd_device *device)
  348. {
  349. dasd_get_device(device);
  350. /* queue call to dasd_kick_device to the kernel event daemon. */
  351. schedule_work(&device->kick_work);
  352. }
  353. /*
  354. * Set the target state for a device and starts the state change.
  355. */
  356. void
  357. dasd_set_target_state(struct dasd_device *device, int target)
  358. {
  359. /* If we are in probeonly mode stop at DASD_STATE_READY. */
  360. if (dasd_probeonly && target > DASD_STATE_READY)
  361. target = DASD_STATE_READY;
  362. if (device->target != target) {
  363. if (device->state == target)
  364. wake_up(&dasd_init_waitq);
  365. device->target = target;
  366. }
  367. if (device->state != device->target)
  368. dasd_change_state(device);
  369. }
  370. /*
  371. * Enable devices with device numbers in [from..to].
  372. */
  373. static inline int
  374. _wait_for_device(struct dasd_device *device)
  375. {
  376. return (device->state == device->target);
  377. }
  378. void
  379. dasd_enable_device(struct dasd_device *device)
  380. {
  381. dasd_set_target_state(device, DASD_STATE_ONLINE);
  382. if (device->state <= DASD_STATE_KNOWN)
  383. /* No discipline for device found. */
  384. dasd_set_target_state(device, DASD_STATE_NEW);
  385. /* Now wait for the devices to come up. */
  386. wait_event(dasd_init_waitq, _wait_for_device(device));
  387. }
  388. /*
  389. * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
  390. */
  391. #ifdef CONFIG_DASD_PROFILE
  392. struct dasd_profile_info_t dasd_global_profile;
  393. unsigned int dasd_profile_level = DASD_PROFILE_OFF;
  394. /*
  395. * Increments counter in global and local profiling structures.
  396. */
  397. #define dasd_profile_counter(value, counter, device) \
  398. { \
  399. int index; \
  400. for (index = 0; index < 31 && value >> (2+index); index++); \
  401. dasd_global_profile.counter[index]++; \
  402. device->profile.counter[index]++; \
  403. }
  404. /*
  405. * Add profiling information for cqr before execution.
  406. */
  407. static inline void
  408. dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
  409. struct request *req)
  410. {
  411. struct list_head *l;
  412. unsigned int counter;
  413. if (dasd_profile_level != DASD_PROFILE_ON)
  414. return;
  415. /* count the length of the chanq for statistics */
  416. counter = 0;
  417. list_for_each(l, &device->ccw_queue)
  418. if (++counter >= 31)
  419. break;
  420. dasd_global_profile.dasd_io_nr_req[counter]++;
  421. device->profile.dasd_io_nr_req[counter]++;
  422. }
  423. /*
  424. * Add profiling information for cqr after execution.
  425. */
  426. static inline void
  427. dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
  428. struct request *req)
  429. {
  430. long strtime, irqtime, endtime, tottime; /* in microseconds */
  431. long tottimeps, sectors;
  432. if (dasd_profile_level != DASD_PROFILE_ON)
  433. return;
  434. sectors = req->nr_sectors;
  435. if (!cqr->buildclk || !cqr->startclk ||
  436. !cqr->stopclk || !cqr->endclk ||
  437. !sectors)
  438. return;
  439. strtime = ((cqr->startclk - cqr->buildclk) >> 12);
  440. irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
  441. endtime = ((cqr->endclk - cqr->stopclk) >> 12);
  442. tottime = ((cqr->endclk - cqr->buildclk) >> 12);
  443. tottimeps = tottime / sectors;
  444. if (!dasd_global_profile.dasd_io_reqs)
  445. memset(&dasd_global_profile, 0,
  446. sizeof (struct dasd_profile_info_t));
  447. dasd_global_profile.dasd_io_reqs++;
  448. dasd_global_profile.dasd_io_sects += sectors;
  449. if (!device->profile.dasd_io_reqs)
  450. memset(&device->profile, 0,
  451. sizeof (struct dasd_profile_info_t));
  452. device->profile.dasd_io_reqs++;
  453. device->profile.dasd_io_sects += sectors;
  454. dasd_profile_counter(sectors, dasd_io_secs, device);
  455. dasd_profile_counter(tottime, dasd_io_times, device);
  456. dasd_profile_counter(tottimeps, dasd_io_timps, device);
  457. dasd_profile_counter(strtime, dasd_io_time1, device);
  458. dasd_profile_counter(irqtime, dasd_io_time2, device);
  459. dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
  460. dasd_profile_counter(endtime, dasd_io_time3, device);
  461. }
  462. #else
  463. #define dasd_profile_start(device, cqr, req) do {} while (0)
  464. #define dasd_profile_end(device, cqr, req) do {} while (0)
  465. #endif /* CONFIG_DASD_PROFILE */
  466. /*
  467. * Allocate memory for a channel program with 'cplength' channel
  468. * command words and 'datasize' additional space. There are two
  469. * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
  470. * memory and 2) dasd_smalloc_request uses the static ccw memory
  471. * that gets allocated for each device.
  472. */
  473. struct dasd_ccw_req *
  474. dasd_kmalloc_request(char *magic, int cplength, int datasize,
  475. struct dasd_device * device)
  476. {
  477. struct dasd_ccw_req *cqr;
  478. /* Sanity checks */
  479. BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
  480. (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
  481. cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
  482. if (cqr == NULL)
  483. return ERR_PTR(-ENOMEM);
  484. cqr->cpaddr = NULL;
  485. if (cplength > 0) {
  486. cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
  487. GFP_ATOMIC | GFP_DMA);
  488. if (cqr->cpaddr == NULL) {
  489. kfree(cqr);
  490. return ERR_PTR(-ENOMEM);
  491. }
  492. }
  493. cqr->data = NULL;
  494. if (datasize > 0) {
  495. cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
  496. if (cqr->data == NULL) {
  497. kfree(cqr->cpaddr);
  498. kfree(cqr);
  499. return ERR_PTR(-ENOMEM);
  500. }
  501. }
  502. strncpy((char *) &cqr->magic, magic, 4);
  503. ASCEBC((char *) &cqr->magic, 4);
  504. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  505. dasd_get_device(device);
  506. return cqr;
  507. }
  508. struct dasd_ccw_req *
  509. dasd_smalloc_request(char *magic, int cplength, int datasize,
  510. struct dasd_device * device)
  511. {
  512. unsigned long flags;
  513. struct dasd_ccw_req *cqr;
  514. char *data;
  515. int size;
  516. /* Sanity checks */
  517. BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
  518. (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
  519. size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
  520. if (cplength > 0)
  521. size += cplength * sizeof(struct ccw1);
  522. if (datasize > 0)
  523. size += datasize;
  524. spin_lock_irqsave(&device->mem_lock, flags);
  525. cqr = (struct dasd_ccw_req *)
  526. dasd_alloc_chunk(&device->ccw_chunks, size);
  527. spin_unlock_irqrestore(&device->mem_lock, flags);
  528. if (cqr == NULL)
  529. return ERR_PTR(-ENOMEM);
  530. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  531. data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
  532. cqr->cpaddr = NULL;
  533. if (cplength > 0) {
  534. cqr->cpaddr = (struct ccw1 *) data;
  535. data += cplength*sizeof(struct ccw1);
  536. memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
  537. }
  538. cqr->data = NULL;
  539. if (datasize > 0) {
  540. cqr->data = data;
  541. memset(cqr->data, 0, datasize);
  542. }
  543. strncpy((char *) &cqr->magic, magic, 4);
  544. ASCEBC((char *) &cqr->magic, 4);
  545. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  546. dasd_get_device(device);
  547. return cqr;
  548. }
  549. /*
  550. * Free memory of a channel program. This function needs to free all the
  551. * idal lists that might have been created by dasd_set_cda and the
  552. * struct dasd_ccw_req itself.
  553. */
  554. void
  555. dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
  556. {
  557. #ifdef CONFIG_64BIT
  558. struct ccw1 *ccw;
  559. /* Clear any idals used for the request. */
  560. ccw = cqr->cpaddr;
  561. do {
  562. clear_normalized_cda(ccw);
  563. } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
  564. #endif
  565. kfree(cqr->cpaddr);
  566. kfree(cqr->data);
  567. kfree(cqr);
  568. dasd_put_device(device);
  569. }
  570. void
  571. dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
  572. {
  573. unsigned long flags;
  574. spin_lock_irqsave(&device->mem_lock, flags);
  575. dasd_free_chunk(&device->ccw_chunks, cqr);
  576. spin_unlock_irqrestore(&device->mem_lock, flags);
  577. dasd_put_device(device);
  578. }
  579. /*
  580. * Check discipline magic in cqr.
  581. */
  582. static inline int
  583. dasd_check_cqr(struct dasd_ccw_req *cqr)
  584. {
  585. struct dasd_device *device;
  586. if (cqr == NULL)
  587. return -EINVAL;
  588. device = cqr->device;
  589. if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
  590. DEV_MESSAGE(KERN_WARNING, device,
  591. " dasd_ccw_req 0x%08x magic doesn't match"
  592. " discipline 0x%08x",
  593. cqr->magic,
  594. *(unsigned int *) device->discipline->name);
  595. return -EINVAL;
  596. }
  597. return 0;
  598. }
  599. /*
  600. * Terminate the current i/o and set the request to clear_pending.
  601. * Timer keeps device runnig.
  602. * ccw_device_clear can fail if the i/o subsystem
  603. * is in a bad mood.
  604. */
  605. int
  606. dasd_term_IO(struct dasd_ccw_req * cqr)
  607. {
  608. struct dasd_device *device;
  609. int retries, rc;
  610. /* Check the cqr */
  611. rc = dasd_check_cqr(cqr);
  612. if (rc)
  613. return rc;
  614. retries = 0;
  615. device = (struct dasd_device *) cqr->device;
  616. while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
  617. rc = ccw_device_clear(device->cdev, (long) cqr);
  618. switch (rc) {
  619. case 0: /* termination successful */
  620. cqr->retries--;
  621. cqr->status = DASD_CQR_CLEAR;
  622. cqr->stopclk = get_clock();
  623. DBF_DEV_EVENT(DBF_DEBUG, device,
  624. "terminate cqr %p successful",
  625. cqr);
  626. break;
  627. case -ENODEV:
  628. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  629. "device gone, retry");
  630. break;
  631. case -EIO:
  632. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  633. "I/O error, retry");
  634. break;
  635. case -EINVAL:
  636. case -EBUSY:
  637. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  638. "device busy, retry later");
  639. break;
  640. default:
  641. DEV_MESSAGE(KERN_ERR, device,
  642. "line %d unknown RC=%d, please "
  643. "report to linux390@de.ibm.com",
  644. __LINE__, rc);
  645. BUG();
  646. break;
  647. }
  648. retries++;
  649. }
  650. dasd_schedule_bh(device);
  651. return rc;
  652. }
  653. /*
  654. * Start the i/o. This start_IO can fail if the channel is really busy.
  655. * In that case set up a timer to start the request later.
  656. */
  657. int
  658. dasd_start_IO(struct dasd_ccw_req * cqr)
  659. {
  660. struct dasd_device *device;
  661. int rc;
  662. /* Check the cqr */
  663. rc = dasd_check_cqr(cqr);
  664. if (rc)
  665. return rc;
  666. device = (struct dasd_device *) cqr->device;
  667. if (cqr->retries < 0) {
  668. DEV_MESSAGE(KERN_DEBUG, device,
  669. "start_IO: request %p (%02x/%i) - no retry left.",
  670. cqr, cqr->status, cqr->retries);
  671. cqr->status = DASD_CQR_FAILED;
  672. return -EIO;
  673. }
  674. cqr->startclk = get_clock();
  675. cqr->starttime = jiffies;
  676. cqr->retries--;
  677. rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
  678. cqr->lpm, 0);
  679. switch (rc) {
  680. case 0:
  681. cqr->status = DASD_CQR_IN_IO;
  682. DBF_DEV_EVENT(DBF_DEBUG, device,
  683. "start_IO: request %p started successful",
  684. cqr);
  685. break;
  686. case -EBUSY:
  687. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  688. "start_IO: device busy, retry later");
  689. break;
  690. case -ETIMEDOUT:
  691. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  692. "start_IO: request timeout, retry later");
  693. break;
  694. case -EACCES:
  695. /* -EACCES indicates that the request used only a
  696. * subset of the available pathes and all these
  697. * pathes are gone.
  698. * Do a retry with all available pathes.
  699. */
  700. cqr->lpm = LPM_ANYPATH;
  701. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  702. "start_IO: selected pathes gone,"
  703. " retry on all pathes");
  704. break;
  705. case -ENODEV:
  706. case -EIO:
  707. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  708. "start_IO: device gone, retry");
  709. break;
  710. default:
  711. DEV_MESSAGE(KERN_ERR, device,
  712. "line %d unknown RC=%d, please report"
  713. " to linux390@de.ibm.com", __LINE__, rc);
  714. BUG();
  715. break;
  716. }
  717. return rc;
  718. }
  719. /*
  720. * Timeout function for dasd devices. This is used for different purposes
  721. * 1) missing interrupt handler for normal operation
  722. * 2) delayed start of request where start_IO failed with -EBUSY
  723. * 3) timeout for missing state change interrupts
  724. * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
  725. * DASD_CQR_QUEUED for 2) and 3).
  726. */
  727. static void
  728. dasd_timeout_device(unsigned long ptr)
  729. {
  730. unsigned long flags;
  731. struct dasd_device *device;
  732. device = (struct dasd_device *) ptr;
  733. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  734. /* re-activate request queue */
  735. device->stopped &= ~DASD_STOPPED_PENDING;
  736. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  737. dasd_schedule_bh(device);
  738. }
  739. /*
  740. * Setup timeout for a device in jiffies.
  741. */
  742. void
  743. dasd_set_timer(struct dasd_device *device, int expires)
  744. {
  745. if (expires == 0) {
  746. if (timer_pending(&device->timer))
  747. del_timer(&device->timer);
  748. return;
  749. }
  750. if (timer_pending(&device->timer)) {
  751. if (mod_timer(&device->timer, jiffies + expires))
  752. return;
  753. }
  754. device->timer.function = dasd_timeout_device;
  755. device->timer.data = (unsigned long) device;
  756. device->timer.expires = jiffies + expires;
  757. add_timer(&device->timer);
  758. }
  759. /*
  760. * Clear timeout for a device.
  761. */
  762. void
  763. dasd_clear_timer(struct dasd_device *device)
  764. {
  765. if (timer_pending(&device->timer))
  766. del_timer(&device->timer);
  767. }
  768. static void
  769. dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
  770. {
  771. struct dasd_ccw_req *cqr;
  772. struct dasd_device *device;
  773. cqr = (struct dasd_ccw_req *) intparm;
  774. if (cqr->status != DASD_CQR_IN_IO) {
  775. MESSAGE(KERN_DEBUG,
  776. "invalid status in handle_killed_request: "
  777. "bus_id %s, status %02x",
  778. cdev->dev.bus_id, cqr->status);
  779. return;
  780. }
  781. device = (struct dasd_device *) cqr->device;
  782. if (device == NULL ||
  783. device != dasd_device_from_cdev(cdev) ||
  784. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  785. MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
  786. cdev->dev.bus_id);
  787. return;
  788. }
  789. /* Schedule request to be retried. */
  790. cqr->status = DASD_CQR_QUEUED;
  791. dasd_clear_timer(device);
  792. dasd_schedule_bh(device);
  793. dasd_put_device(device);
  794. }
  795. static void
  796. dasd_handle_state_change_pending(struct dasd_device *device)
  797. {
  798. struct dasd_ccw_req *cqr;
  799. struct list_head *l, *n;
  800. /* First of all start sense subsystem status request. */
  801. dasd_eer_snss(device);
  802. device->stopped &= ~DASD_STOPPED_PENDING;
  803. /* restart all 'running' IO on queue */
  804. list_for_each_safe(l, n, &device->ccw_queue) {
  805. cqr = list_entry(l, struct dasd_ccw_req, list);
  806. if (cqr->status == DASD_CQR_IN_IO) {
  807. cqr->status = DASD_CQR_QUEUED;
  808. }
  809. }
  810. dasd_clear_timer(device);
  811. dasd_schedule_bh(device);
  812. }
  813. /*
  814. * Interrupt handler for "normal" ssch-io based dasd devices.
  815. */
  816. void
  817. dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
  818. struct irb *irb)
  819. {
  820. struct dasd_ccw_req *cqr, *next;
  821. struct dasd_device *device;
  822. unsigned long long now;
  823. int expires;
  824. dasd_era_t era;
  825. char mask;
  826. if (IS_ERR(irb)) {
  827. switch (PTR_ERR(irb)) {
  828. case -EIO:
  829. dasd_handle_killed_request(cdev, intparm);
  830. break;
  831. case -ETIMEDOUT:
  832. printk(KERN_WARNING"%s(%s): request timed out\n",
  833. __FUNCTION__, cdev->dev.bus_id);
  834. //FIXME - dasd uses own timeout interface...
  835. break;
  836. default:
  837. printk(KERN_WARNING"%s(%s): unknown error %ld\n",
  838. __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
  839. }
  840. return;
  841. }
  842. now = get_clock();
  843. DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
  844. cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
  845. (unsigned int) intparm);
  846. /* first of all check for state change pending interrupt */
  847. mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
  848. if ((irb->scsw.dstat & mask) == mask) {
  849. device = dasd_device_from_cdev(cdev);
  850. if (!IS_ERR(device)) {
  851. dasd_handle_state_change_pending(device);
  852. dasd_put_device(device);
  853. }
  854. return;
  855. }
  856. cqr = (struct dasd_ccw_req *) intparm;
  857. /* check for unsolicited interrupts */
  858. if (cqr == NULL) {
  859. MESSAGE(KERN_DEBUG,
  860. "unsolicited interrupt received: bus_id %s",
  861. cdev->dev.bus_id);
  862. return;
  863. }
  864. device = (struct dasd_device *) cqr->device;
  865. if (device == NULL ||
  866. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  867. MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
  868. cdev->dev.bus_id);
  869. return;
  870. }
  871. /* Check for clear pending */
  872. if (cqr->status == DASD_CQR_CLEAR &&
  873. irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
  874. cqr->status = DASD_CQR_QUEUED;
  875. dasd_clear_timer(device);
  876. dasd_schedule_bh(device);
  877. return;
  878. }
  879. /* check status - the request might have been killed by dyn detach */
  880. if (cqr->status != DASD_CQR_IN_IO) {
  881. MESSAGE(KERN_DEBUG,
  882. "invalid status: bus_id %s, status %02x",
  883. cdev->dev.bus_id, cqr->status);
  884. return;
  885. }
  886. DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
  887. ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
  888. /* Find out the appropriate era_action. */
  889. if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
  890. era = dasd_era_fatal;
  891. else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
  892. irb->scsw.cstat == 0 &&
  893. !irb->esw.esw0.erw.cons)
  894. era = dasd_era_none;
  895. else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
  896. era = dasd_era_fatal; /* don't recover this request */
  897. else if (irb->esw.esw0.erw.cons)
  898. era = device->discipline->examine_error(cqr, irb);
  899. else
  900. era = dasd_era_recover;
  901. DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
  902. expires = 0;
  903. if (era == dasd_era_none) {
  904. cqr->status = DASD_CQR_DONE;
  905. cqr->stopclk = now;
  906. /* Start first request on queue if possible -> fast_io. */
  907. if (cqr->list.next != &device->ccw_queue) {
  908. next = list_entry(cqr->list.next,
  909. struct dasd_ccw_req, list);
  910. if ((next->status == DASD_CQR_QUEUED) &&
  911. (!device->stopped)) {
  912. if (device->discipline->start_IO(next) == 0)
  913. expires = next->expires;
  914. else
  915. DEV_MESSAGE(KERN_DEBUG, device, "%s",
  916. "Interrupt fastpath "
  917. "failed!");
  918. }
  919. }
  920. } else { /* error */
  921. memcpy(&cqr->irb, irb, sizeof (struct irb));
  922. #ifdef ERP_DEBUG
  923. /* dump sense data */
  924. dasd_log_sense(cqr, irb);
  925. #endif
  926. switch (era) {
  927. case dasd_era_fatal:
  928. cqr->status = DASD_CQR_FAILED;
  929. cqr->stopclk = now;
  930. break;
  931. case dasd_era_recover:
  932. cqr->status = DASD_CQR_ERROR;
  933. break;
  934. default:
  935. BUG();
  936. }
  937. }
  938. if (expires != 0)
  939. dasd_set_timer(device, expires);
  940. else
  941. dasd_clear_timer(device);
  942. dasd_schedule_bh(device);
  943. }
  944. /*
  945. * posts the buffer_cache about a finalized request
  946. */
  947. static inline void
  948. dasd_end_request(struct request *req, int uptodate)
  949. {
  950. if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
  951. BUG();
  952. add_disk_randomness(req->rq_disk);
  953. end_that_request_last(req, uptodate);
  954. }
  955. /*
  956. * Process finished error recovery ccw.
  957. */
  958. static inline void
  959. __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
  960. {
  961. dasd_erp_fn_t erp_fn;
  962. if (cqr->status == DASD_CQR_DONE)
  963. DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
  964. else
  965. DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
  966. erp_fn = device->discipline->erp_postaction(cqr);
  967. erp_fn(cqr);
  968. }
  969. /*
  970. * Process ccw request queue.
  971. */
  972. static inline void
  973. __dasd_process_ccw_queue(struct dasd_device * device,
  974. struct list_head *final_queue)
  975. {
  976. struct list_head *l, *n;
  977. struct dasd_ccw_req *cqr;
  978. dasd_erp_fn_t erp_fn;
  979. restart:
  980. /* Process request with final status. */
  981. list_for_each_safe(l, n, &device->ccw_queue) {
  982. cqr = list_entry(l, struct dasd_ccw_req, list);
  983. /* Stop list processing at the first non-final request. */
  984. if (cqr->status != DASD_CQR_DONE &&
  985. cqr->status != DASD_CQR_FAILED &&
  986. cqr->status != DASD_CQR_ERROR)
  987. break;
  988. /* Process requests with DASD_CQR_ERROR */
  989. if (cqr->status == DASD_CQR_ERROR) {
  990. if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
  991. cqr->status = DASD_CQR_FAILED;
  992. cqr->stopclk = get_clock();
  993. } else {
  994. if (cqr->irb.esw.esw0.erw.cons) {
  995. erp_fn = device->discipline->
  996. erp_action(cqr);
  997. erp_fn(cqr);
  998. } else
  999. dasd_default_erp_action(cqr);
  1000. }
  1001. goto restart;
  1002. }
  1003. /* First of all call extended error reporting. */
  1004. if (dasd_eer_enabled(device) &&
  1005. cqr->status == DASD_CQR_FAILED) {
  1006. dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
  1007. /* restart request */
  1008. cqr->status = DASD_CQR_QUEUED;
  1009. cqr->retries = 255;
  1010. device->stopped |= DASD_STOPPED_QUIESCE;
  1011. goto restart;
  1012. }
  1013. /* Process finished ERP request. */
  1014. if (cqr->refers) {
  1015. __dasd_process_erp(device, cqr);
  1016. goto restart;
  1017. }
  1018. /* Rechain finished requests to final queue */
  1019. cqr->endclk = get_clock();
  1020. list_move_tail(&cqr->list, final_queue);
  1021. }
  1022. }
  1023. static void
  1024. dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
  1025. {
  1026. struct request *req;
  1027. struct dasd_device *device;
  1028. int status;
  1029. req = (struct request *) data;
  1030. device = cqr->device;
  1031. dasd_profile_end(device, cqr, req);
  1032. status = cqr->device->discipline->free_cp(cqr,req);
  1033. spin_lock_irq(&device->request_queue_lock);
  1034. dasd_end_request(req, status);
  1035. spin_unlock_irq(&device->request_queue_lock);
  1036. }
  1037. /*
  1038. * Fetch requests from the block device queue.
  1039. */
  1040. static inline void
  1041. __dasd_process_blk_queue(struct dasd_device * device)
  1042. {
  1043. request_queue_t *queue;
  1044. struct request *req;
  1045. struct dasd_ccw_req *cqr;
  1046. int nr_queued;
  1047. queue = device->request_queue;
  1048. /* No queue ? Then there is nothing to do. */
  1049. if (queue == NULL)
  1050. return;
  1051. /*
  1052. * We requeue request from the block device queue to the ccw
  1053. * queue only in two states. In state DASD_STATE_READY the
  1054. * partition detection is done and we need to requeue requests
  1055. * for that. State DASD_STATE_ONLINE is normal block device
  1056. * operation.
  1057. */
  1058. if (device->state != DASD_STATE_READY &&
  1059. device->state != DASD_STATE_ONLINE)
  1060. return;
  1061. nr_queued = 0;
  1062. /* Now we try to fetch requests from the request queue */
  1063. list_for_each_entry(cqr, &device->ccw_queue, list)
  1064. if (cqr->status == DASD_CQR_QUEUED)
  1065. nr_queued++;
  1066. while (!blk_queue_plugged(queue) &&
  1067. elv_next_request(queue) &&
  1068. nr_queued < DASD_CHANQ_MAX_SIZE) {
  1069. req = elv_next_request(queue);
  1070. if (device->features & DASD_FEATURE_READONLY &&
  1071. rq_data_dir(req) == WRITE) {
  1072. DBF_DEV_EVENT(DBF_ERR, device,
  1073. "Rejecting write request %p",
  1074. req);
  1075. blkdev_dequeue_request(req);
  1076. dasd_end_request(req, 0);
  1077. continue;
  1078. }
  1079. if (device->stopped & DASD_STOPPED_DC_EIO) {
  1080. blkdev_dequeue_request(req);
  1081. dasd_end_request(req, 0);
  1082. continue;
  1083. }
  1084. cqr = device->discipline->build_cp(device, req);
  1085. if (IS_ERR(cqr)) {
  1086. if (PTR_ERR(cqr) == -ENOMEM)
  1087. break; /* terminate request queue loop */
  1088. DBF_DEV_EVENT(DBF_ERR, device,
  1089. "CCW creation failed (rc=%ld) "
  1090. "on request %p",
  1091. PTR_ERR(cqr), req);
  1092. blkdev_dequeue_request(req);
  1093. dasd_end_request(req, 0);
  1094. continue;
  1095. }
  1096. cqr->callback = dasd_end_request_cb;
  1097. cqr->callback_data = (void *) req;
  1098. cqr->status = DASD_CQR_QUEUED;
  1099. blkdev_dequeue_request(req);
  1100. list_add_tail(&cqr->list, &device->ccw_queue);
  1101. dasd_profile_start(device, cqr, req);
  1102. nr_queued++;
  1103. }
  1104. }
  1105. /*
  1106. * Take a look at the first request on the ccw queue and check
  1107. * if it reached its expire time. If so, terminate the IO.
  1108. */
  1109. static inline void
  1110. __dasd_check_expire(struct dasd_device * device)
  1111. {
  1112. struct dasd_ccw_req *cqr;
  1113. if (list_empty(&device->ccw_queue))
  1114. return;
  1115. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1116. if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
  1117. if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
  1118. if (device->discipline->term_IO(cqr) != 0)
  1119. /* Hmpf, try again in 1/10 sec */
  1120. dasd_set_timer(device, 10);
  1121. }
  1122. }
  1123. }
  1124. /*
  1125. * Take a look at the first request on the ccw queue and check
  1126. * if it needs to be started.
  1127. */
  1128. static inline void
  1129. __dasd_start_head(struct dasd_device * device)
  1130. {
  1131. struct dasd_ccw_req *cqr;
  1132. int rc;
  1133. if (list_empty(&device->ccw_queue))
  1134. return;
  1135. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1136. if (cqr->status != DASD_CQR_QUEUED)
  1137. return;
  1138. /* Non-temporary stop condition will trigger fail fast */
  1139. if (device->stopped & ~DASD_STOPPED_PENDING &&
  1140. test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
  1141. (!dasd_eer_enabled(device))) {
  1142. cqr->status = DASD_CQR_FAILED;
  1143. dasd_schedule_bh(device);
  1144. return;
  1145. }
  1146. /* Don't try to start requests if device is stopped */
  1147. if (device->stopped)
  1148. return;
  1149. rc = device->discipline->start_IO(cqr);
  1150. if (rc == 0)
  1151. dasd_set_timer(device, cqr->expires);
  1152. else if (rc == -EACCES) {
  1153. dasd_schedule_bh(device);
  1154. } else
  1155. /* Hmpf, try again in 1/2 sec */
  1156. dasd_set_timer(device, 50);
  1157. }
  1158. /*
  1159. * Remove requests from the ccw queue.
  1160. */
  1161. static void
  1162. dasd_flush_ccw_queue(struct dasd_device * device, int all)
  1163. {
  1164. struct list_head flush_queue;
  1165. struct list_head *l, *n;
  1166. struct dasd_ccw_req *cqr;
  1167. INIT_LIST_HEAD(&flush_queue);
  1168. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1169. list_for_each_safe(l, n, &device->ccw_queue) {
  1170. cqr = list_entry(l, struct dasd_ccw_req, list);
  1171. /* Flush all request or only block device requests? */
  1172. if (all == 0 && cqr->callback == dasd_end_request_cb)
  1173. continue;
  1174. if (cqr->status == DASD_CQR_IN_IO)
  1175. device->discipline->term_IO(cqr);
  1176. if (cqr->status != DASD_CQR_DONE ||
  1177. cqr->status != DASD_CQR_FAILED) {
  1178. cqr->status = DASD_CQR_FAILED;
  1179. cqr->stopclk = get_clock();
  1180. }
  1181. /* Process finished ERP request. */
  1182. if (cqr->refers) {
  1183. __dasd_process_erp(device, cqr);
  1184. continue;
  1185. }
  1186. /* Rechain request on device request queue */
  1187. cqr->endclk = get_clock();
  1188. list_move_tail(&cqr->list, &flush_queue);
  1189. }
  1190. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1191. /* Now call the callback function of flushed requests */
  1192. list_for_each_safe(l, n, &flush_queue) {
  1193. cqr = list_entry(l, struct dasd_ccw_req, list);
  1194. if (cqr->callback != NULL)
  1195. (cqr->callback)(cqr, cqr->callback_data);
  1196. }
  1197. }
  1198. /*
  1199. * Acquire the device lock and process queues for the device.
  1200. */
  1201. static void
  1202. dasd_tasklet(struct dasd_device * device)
  1203. {
  1204. struct list_head final_queue;
  1205. struct list_head *l, *n;
  1206. struct dasd_ccw_req *cqr;
  1207. atomic_set (&device->tasklet_scheduled, 0);
  1208. INIT_LIST_HEAD(&final_queue);
  1209. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1210. /* Check expire time of first request on the ccw queue. */
  1211. __dasd_check_expire(device);
  1212. /* Finish off requests on ccw queue */
  1213. __dasd_process_ccw_queue(device, &final_queue);
  1214. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1215. /* Now call the callback function of requests with final status */
  1216. list_for_each_safe(l, n, &final_queue) {
  1217. cqr = list_entry(l, struct dasd_ccw_req, list);
  1218. list_del_init(&cqr->list);
  1219. if (cqr->callback != NULL)
  1220. (cqr->callback)(cqr, cqr->callback_data);
  1221. }
  1222. spin_lock_irq(&device->request_queue_lock);
  1223. spin_lock(get_ccwdev_lock(device->cdev));
  1224. /* Get new request from the block device request queue */
  1225. __dasd_process_blk_queue(device);
  1226. /* Now check if the head of the ccw queue needs to be started. */
  1227. __dasd_start_head(device);
  1228. spin_unlock(get_ccwdev_lock(device->cdev));
  1229. spin_unlock_irq(&device->request_queue_lock);
  1230. dasd_put_device(device);
  1231. }
  1232. /*
  1233. * Schedules a call to dasd_tasklet over the device tasklet.
  1234. */
  1235. void
  1236. dasd_schedule_bh(struct dasd_device * device)
  1237. {
  1238. /* Protect against rescheduling. */
  1239. if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
  1240. return;
  1241. dasd_get_device(device);
  1242. tasklet_hi_schedule(&device->tasklet);
  1243. }
  1244. /*
  1245. * Queue a request to the head of the ccw_queue. Start the I/O if
  1246. * possible.
  1247. */
  1248. void
  1249. dasd_add_request_head(struct dasd_ccw_req *req)
  1250. {
  1251. struct dasd_device *device;
  1252. unsigned long flags;
  1253. device = req->device;
  1254. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1255. req->status = DASD_CQR_QUEUED;
  1256. req->device = device;
  1257. list_add(&req->list, &device->ccw_queue);
  1258. /* let the bh start the request to keep them in order */
  1259. dasd_schedule_bh(device);
  1260. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1261. }
  1262. /*
  1263. * Queue a request to the tail of the ccw_queue. Start the I/O if
  1264. * possible.
  1265. */
  1266. void
  1267. dasd_add_request_tail(struct dasd_ccw_req *req)
  1268. {
  1269. struct dasd_device *device;
  1270. unsigned long flags;
  1271. device = req->device;
  1272. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1273. req->status = DASD_CQR_QUEUED;
  1274. req->device = device;
  1275. list_add_tail(&req->list, &device->ccw_queue);
  1276. /* let the bh start the request to keep them in order */
  1277. dasd_schedule_bh(device);
  1278. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1279. }
  1280. /*
  1281. * Wakeup callback.
  1282. */
  1283. static void
  1284. dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
  1285. {
  1286. wake_up((wait_queue_head_t *) data);
  1287. }
  1288. static inline int
  1289. _wait_for_wakeup(struct dasd_ccw_req *cqr)
  1290. {
  1291. struct dasd_device *device;
  1292. int rc;
  1293. device = cqr->device;
  1294. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1295. rc = ((cqr->status == DASD_CQR_DONE ||
  1296. cqr->status == DASD_CQR_FAILED) &&
  1297. list_empty(&cqr->list));
  1298. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1299. return rc;
  1300. }
  1301. /*
  1302. * Attempts to start a special ccw queue and waits for its completion.
  1303. */
  1304. int
  1305. dasd_sleep_on(struct dasd_ccw_req * cqr)
  1306. {
  1307. wait_queue_head_t wait_q;
  1308. struct dasd_device *device;
  1309. int rc;
  1310. device = cqr->device;
  1311. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1312. init_waitqueue_head (&wait_q);
  1313. cqr->callback = dasd_wakeup_cb;
  1314. cqr->callback_data = (void *) &wait_q;
  1315. cqr->status = DASD_CQR_QUEUED;
  1316. list_add_tail(&cqr->list, &device->ccw_queue);
  1317. /* let the bh start the request to keep them in order */
  1318. dasd_schedule_bh(device);
  1319. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1320. wait_event(wait_q, _wait_for_wakeup(cqr));
  1321. /* Request status is either done or failed. */
  1322. rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  1323. return rc;
  1324. }
  1325. /*
  1326. * Attempts to start a special ccw queue and wait interruptible
  1327. * for its completion.
  1328. */
  1329. int
  1330. dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
  1331. {
  1332. wait_queue_head_t wait_q;
  1333. struct dasd_device *device;
  1334. int rc, finished;
  1335. device = cqr->device;
  1336. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1337. init_waitqueue_head (&wait_q);
  1338. cqr->callback = dasd_wakeup_cb;
  1339. cqr->callback_data = (void *) &wait_q;
  1340. cqr->status = DASD_CQR_QUEUED;
  1341. list_add_tail(&cqr->list, &device->ccw_queue);
  1342. /* let the bh start the request to keep them in order */
  1343. dasd_schedule_bh(device);
  1344. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1345. finished = 0;
  1346. while (!finished) {
  1347. rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
  1348. if (rc != -ERESTARTSYS) {
  1349. /* Request is final (done or failed) */
  1350. rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
  1351. break;
  1352. }
  1353. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1354. switch (cqr->status) {
  1355. case DASD_CQR_IN_IO:
  1356. /* terminate runnig cqr */
  1357. if (device->discipline->term_IO) {
  1358. cqr->retries = -1;
  1359. device->discipline->term_IO(cqr);
  1360. /*nished =
  1361. * wait (non-interruptible) for final status
  1362. * because signal ist still pending
  1363. */
  1364. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1365. wait_event(wait_q, _wait_for_wakeup(cqr));
  1366. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1367. rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
  1368. finished = 1;
  1369. }
  1370. break;
  1371. case DASD_CQR_QUEUED:
  1372. /* request */
  1373. list_del_init(&cqr->list);
  1374. rc = -EIO;
  1375. finished = 1;
  1376. break;
  1377. default:
  1378. /* cqr with 'non-interruptable' status - just wait */
  1379. break;
  1380. }
  1381. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1382. }
  1383. return rc;
  1384. }
  1385. /*
  1386. * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
  1387. * for eckd devices) the currently running request has to be terminated
  1388. * and be put back to status queued, before the special request is added
  1389. * to the head of the queue. Then the special request is waited on normally.
  1390. */
  1391. static inline int
  1392. _dasd_term_running_cqr(struct dasd_device *device)
  1393. {
  1394. struct dasd_ccw_req *cqr;
  1395. int rc;
  1396. if (list_empty(&device->ccw_queue))
  1397. return 0;
  1398. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1399. rc = device->discipline->term_IO(cqr);
  1400. if (rc == 0) {
  1401. /* termination successful */
  1402. cqr->status = DASD_CQR_QUEUED;
  1403. cqr->startclk = cqr->stopclk = 0;
  1404. cqr->starttime = 0;
  1405. }
  1406. return rc;
  1407. }
  1408. int
  1409. dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
  1410. {
  1411. wait_queue_head_t wait_q;
  1412. struct dasd_device *device;
  1413. int rc;
  1414. device = cqr->device;
  1415. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1416. rc = _dasd_term_running_cqr(device);
  1417. if (rc) {
  1418. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1419. return rc;
  1420. }
  1421. init_waitqueue_head (&wait_q);
  1422. cqr->callback = dasd_wakeup_cb;
  1423. cqr->callback_data = (void *) &wait_q;
  1424. cqr->status = DASD_CQR_QUEUED;
  1425. list_add(&cqr->list, &device->ccw_queue);
  1426. /* let the bh start the request to keep them in order */
  1427. dasd_schedule_bh(device);
  1428. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1429. wait_event(wait_q, _wait_for_wakeup(cqr));
  1430. /* Request status is either done or failed. */
  1431. rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  1432. return rc;
  1433. }
  1434. /*
  1435. * Cancels a request that was started with dasd_sleep_on_req.
  1436. * This is useful to timeout requests. The request will be
  1437. * terminated if it is currently in i/o.
  1438. * Returns 1 if the request has been terminated.
  1439. */
  1440. int
  1441. dasd_cancel_req(struct dasd_ccw_req *cqr)
  1442. {
  1443. struct dasd_device *device = cqr->device;
  1444. unsigned long flags;
  1445. int rc;
  1446. rc = 0;
  1447. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1448. switch (cqr->status) {
  1449. case DASD_CQR_QUEUED:
  1450. /* request was not started - just set to failed */
  1451. cqr->status = DASD_CQR_FAILED;
  1452. break;
  1453. case DASD_CQR_IN_IO:
  1454. /* request in IO - terminate IO and release again */
  1455. if (device->discipline->term_IO(cqr) != 0)
  1456. /* what to do if unable to terminate ??????
  1457. e.g. not _IN_IO */
  1458. cqr->status = DASD_CQR_FAILED;
  1459. cqr->stopclk = get_clock();
  1460. rc = 1;
  1461. break;
  1462. case DASD_CQR_DONE:
  1463. case DASD_CQR_FAILED:
  1464. /* already finished - do nothing */
  1465. break;
  1466. default:
  1467. DEV_MESSAGE(KERN_ALERT, device,
  1468. "invalid status %02x in request",
  1469. cqr->status);
  1470. BUG();
  1471. }
  1472. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1473. dasd_schedule_bh(device);
  1474. return rc;
  1475. }
  1476. /*
  1477. * SECTION: Block device operations (request queue, partitions, open, release).
  1478. */
  1479. /*
  1480. * Dasd request queue function. Called from ll_rw_blk.c
  1481. */
  1482. static void
  1483. do_dasd_request(request_queue_t * queue)
  1484. {
  1485. struct dasd_device *device;
  1486. device = (struct dasd_device *) queue->queuedata;
  1487. spin_lock(get_ccwdev_lock(device->cdev));
  1488. /* Get new request from the block device request queue */
  1489. __dasd_process_blk_queue(device);
  1490. /* Now check if the head of the ccw queue needs to be started. */
  1491. __dasd_start_head(device);
  1492. spin_unlock(get_ccwdev_lock(device->cdev));
  1493. }
  1494. /*
  1495. * Allocate and initialize request queue and default I/O scheduler.
  1496. */
  1497. static int
  1498. dasd_alloc_queue(struct dasd_device * device)
  1499. {
  1500. int rc;
  1501. device->request_queue = blk_init_queue(do_dasd_request,
  1502. &device->request_queue_lock);
  1503. if (device->request_queue == NULL)
  1504. return -ENOMEM;
  1505. device->request_queue->queuedata = device;
  1506. elevator_exit(device->request_queue->elevator);
  1507. rc = elevator_init(device->request_queue, "deadline");
  1508. if (rc) {
  1509. blk_cleanup_queue(device->request_queue);
  1510. return rc;
  1511. }
  1512. return 0;
  1513. }
  1514. /*
  1515. * Allocate and initialize request queue.
  1516. */
  1517. static void
  1518. dasd_setup_queue(struct dasd_device * device)
  1519. {
  1520. int max;
  1521. blk_queue_hardsect_size(device->request_queue, device->bp_block);
  1522. max = device->discipline->max_blocks << device->s2b_shift;
  1523. blk_queue_max_sectors(device->request_queue, max);
  1524. blk_queue_max_phys_segments(device->request_queue, -1L);
  1525. blk_queue_max_hw_segments(device->request_queue, -1L);
  1526. blk_queue_max_segment_size(device->request_queue, -1L);
  1527. blk_queue_segment_boundary(device->request_queue, -1L);
  1528. blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
  1529. }
  1530. /*
  1531. * Deactivate and free request queue.
  1532. */
  1533. static void
  1534. dasd_free_queue(struct dasd_device * device)
  1535. {
  1536. if (device->request_queue) {
  1537. blk_cleanup_queue(device->request_queue);
  1538. device->request_queue = NULL;
  1539. }
  1540. }
  1541. /*
  1542. * Flush request on the request queue.
  1543. */
  1544. static void
  1545. dasd_flush_request_queue(struct dasd_device * device)
  1546. {
  1547. struct request *req;
  1548. if (!device->request_queue)
  1549. return;
  1550. spin_lock_irq(&device->request_queue_lock);
  1551. while (!list_empty(&device->request_queue->queue_head)) {
  1552. req = elv_next_request(device->request_queue);
  1553. if (req == NULL)
  1554. break;
  1555. dasd_end_request(req, 0);
  1556. blkdev_dequeue_request(req);
  1557. }
  1558. spin_unlock_irq(&device->request_queue_lock);
  1559. }
  1560. static int
  1561. dasd_open(struct inode *inp, struct file *filp)
  1562. {
  1563. struct gendisk *disk = inp->i_bdev->bd_disk;
  1564. struct dasd_device *device = disk->private_data;
  1565. int rc;
  1566. atomic_inc(&device->open_count);
  1567. if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1568. rc = -ENODEV;
  1569. goto unlock;
  1570. }
  1571. if (!try_module_get(device->discipline->owner)) {
  1572. rc = -EINVAL;
  1573. goto unlock;
  1574. }
  1575. if (dasd_probeonly) {
  1576. DEV_MESSAGE(KERN_INFO, device, "%s",
  1577. "No access to device due to probeonly mode");
  1578. rc = -EPERM;
  1579. goto out;
  1580. }
  1581. if (device->state <= DASD_STATE_BASIC) {
  1582. DBF_DEV_EVENT(DBF_ERR, device, " %s",
  1583. " Cannot open unrecognized device");
  1584. rc = -ENODEV;
  1585. goto out;
  1586. }
  1587. return 0;
  1588. out:
  1589. module_put(device->discipline->owner);
  1590. unlock:
  1591. atomic_dec(&device->open_count);
  1592. return rc;
  1593. }
  1594. static int
  1595. dasd_release(struct inode *inp, struct file *filp)
  1596. {
  1597. struct gendisk *disk = inp->i_bdev->bd_disk;
  1598. struct dasd_device *device = disk->private_data;
  1599. atomic_dec(&device->open_count);
  1600. module_put(device->discipline->owner);
  1601. return 0;
  1602. }
  1603. /*
  1604. * Return disk geometry.
  1605. */
  1606. static int
  1607. dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  1608. {
  1609. struct dasd_device *device;
  1610. device = bdev->bd_disk->private_data;
  1611. if (!device)
  1612. return -ENODEV;
  1613. if (!device->discipline ||
  1614. !device->discipline->fill_geometry)
  1615. return -EINVAL;
  1616. device->discipline->fill_geometry(device, geo);
  1617. geo->start = get_start_sect(bdev) >> device->s2b_shift;
  1618. return 0;
  1619. }
  1620. struct block_device_operations
  1621. dasd_device_operations = {
  1622. .owner = THIS_MODULE,
  1623. .open = dasd_open,
  1624. .release = dasd_release,
  1625. .ioctl = dasd_ioctl,
  1626. .compat_ioctl = dasd_compat_ioctl,
  1627. .getgeo = dasd_getgeo,
  1628. };
  1629. static void
  1630. dasd_exit(void)
  1631. {
  1632. #ifdef CONFIG_PROC_FS
  1633. dasd_proc_exit();
  1634. #endif
  1635. dasd_eer_exit();
  1636. if (dasd_page_cache != NULL) {
  1637. kmem_cache_destroy(dasd_page_cache);
  1638. dasd_page_cache = NULL;
  1639. }
  1640. dasd_gendisk_exit();
  1641. dasd_devmap_exit();
  1642. if (dasd_debug_area != NULL) {
  1643. debug_unregister(dasd_debug_area);
  1644. dasd_debug_area = NULL;
  1645. }
  1646. }
  1647. /*
  1648. * SECTION: common functions for ccw_driver use
  1649. */
  1650. /*
  1651. * Initial attempt at a probe function. this can be simplified once
  1652. * the other detection code is gone.
  1653. */
  1654. int
  1655. dasd_generic_probe (struct ccw_device *cdev,
  1656. struct dasd_discipline *discipline)
  1657. {
  1658. int ret;
  1659. ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
  1660. if (ret) {
  1661. printk(KERN_WARNING
  1662. "dasd_generic_probe: could not set ccw-device options "
  1663. "for %s\n", cdev->dev.bus_id);
  1664. return ret;
  1665. }
  1666. ret = dasd_add_sysfs_files(cdev);
  1667. if (ret) {
  1668. printk(KERN_WARNING
  1669. "dasd_generic_probe: could not add sysfs entries "
  1670. "for %s\n", cdev->dev.bus_id);
  1671. return ret;
  1672. }
  1673. cdev->handler = &dasd_int_handler;
  1674. /*
  1675. * Automatically online either all dasd devices (dasd_autodetect)
  1676. * or all devices specified with dasd= parameters during
  1677. * initial probe.
  1678. */
  1679. if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
  1680. (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0))
  1681. ret = ccw_device_set_online(cdev);
  1682. if (ret)
  1683. printk(KERN_WARNING
  1684. "dasd_generic_probe: could not initially online "
  1685. "ccw-device %s\n", cdev->dev.bus_id);
  1686. return ret;
  1687. }
  1688. /*
  1689. * This will one day be called from a global not_oper handler.
  1690. * It is also used by driver_unregister during module unload.
  1691. */
  1692. void
  1693. dasd_generic_remove (struct ccw_device *cdev)
  1694. {
  1695. struct dasd_device *device;
  1696. cdev->handler = NULL;
  1697. dasd_remove_sysfs_files(cdev);
  1698. device = dasd_device_from_cdev(cdev);
  1699. if (IS_ERR(device))
  1700. return;
  1701. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1702. /* Already doing offline processing */
  1703. dasd_put_device(device);
  1704. return;
  1705. }
  1706. /*
  1707. * This device is removed unconditionally. Set offline
  1708. * flag to prevent dasd_open from opening it while it is
  1709. * no quite down yet.
  1710. */
  1711. dasd_set_target_state(device, DASD_STATE_NEW);
  1712. /* dasd_delete_device destroys the device reference. */
  1713. dasd_delete_device(device);
  1714. }
  1715. /*
  1716. * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
  1717. * the device is detected for the first time and is supposed to be used
  1718. * or the user has started activation through sysfs.
  1719. */
  1720. int
  1721. dasd_generic_set_online (struct ccw_device *cdev,
  1722. struct dasd_discipline *base_discipline)
  1723. {
  1724. struct dasd_discipline *discipline;
  1725. struct dasd_device *device;
  1726. int rc;
  1727. /* first online clears initial online feature flag */
  1728. dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
  1729. device = dasd_create_device(cdev);
  1730. if (IS_ERR(device))
  1731. return PTR_ERR(device);
  1732. discipline = base_discipline;
  1733. if (device->features & DASD_FEATURE_USEDIAG) {
  1734. if (!dasd_diag_discipline_pointer) {
  1735. printk (KERN_WARNING
  1736. "dasd_generic couldn't online device %s "
  1737. "- discipline DIAG not available\n",
  1738. cdev->dev.bus_id);
  1739. dasd_delete_device(device);
  1740. return -ENODEV;
  1741. }
  1742. discipline = dasd_diag_discipline_pointer;
  1743. }
  1744. if (!try_module_get(base_discipline->owner)) {
  1745. dasd_delete_device(device);
  1746. return -EINVAL;
  1747. }
  1748. if (!try_module_get(discipline->owner)) {
  1749. module_put(base_discipline->owner);
  1750. dasd_delete_device(device);
  1751. return -EINVAL;
  1752. }
  1753. device->base_discipline = base_discipline;
  1754. device->discipline = discipline;
  1755. rc = discipline->check_device(device);
  1756. if (rc) {
  1757. printk (KERN_WARNING
  1758. "dasd_generic couldn't online device %s "
  1759. "with discipline %s rc=%i\n",
  1760. cdev->dev.bus_id, discipline->name, rc);
  1761. module_put(discipline->owner);
  1762. module_put(base_discipline->owner);
  1763. dasd_delete_device(device);
  1764. return rc;
  1765. }
  1766. dasd_set_target_state(device, DASD_STATE_ONLINE);
  1767. if (device->state <= DASD_STATE_KNOWN) {
  1768. printk (KERN_WARNING
  1769. "dasd_generic discipline not found for %s\n",
  1770. cdev->dev.bus_id);
  1771. rc = -ENODEV;
  1772. dasd_set_target_state(device, DASD_STATE_NEW);
  1773. dasd_delete_device(device);
  1774. } else
  1775. pr_debug("dasd_generic device %s found\n",
  1776. cdev->dev.bus_id);
  1777. /* FIXME: we have to wait for the root device but we don't want
  1778. * to wait for each single device but for all at once. */
  1779. wait_event(dasd_init_waitq, _wait_for_device(device));
  1780. dasd_put_device(device);
  1781. return rc;
  1782. }
  1783. int
  1784. dasd_generic_set_offline (struct ccw_device *cdev)
  1785. {
  1786. struct dasd_device *device;
  1787. int max_count, open_count;
  1788. device = dasd_device_from_cdev(cdev);
  1789. if (IS_ERR(device))
  1790. return PTR_ERR(device);
  1791. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1792. /* Already doing offline processing */
  1793. dasd_put_device(device);
  1794. return 0;
  1795. }
  1796. /*
  1797. * We must make sure that this device is currently not in use.
  1798. * The open_count is increased for every opener, that includes
  1799. * the blkdev_get in dasd_scan_partitions. We are only interested
  1800. * in the other openers.
  1801. */
  1802. max_count = device->bdev ? 0 : -1;
  1803. open_count = (int) atomic_read(&device->open_count);
  1804. if (open_count > max_count) {
  1805. if (open_count > 0)
  1806. printk (KERN_WARNING "Can't offline dasd device with "
  1807. "open count = %i.\n",
  1808. open_count);
  1809. else
  1810. printk (KERN_WARNING "%s",
  1811. "Can't offline dasd device due to internal "
  1812. "use\n");
  1813. clear_bit(DASD_FLAG_OFFLINE, &device->flags);
  1814. dasd_put_device(device);
  1815. return -EBUSY;
  1816. }
  1817. dasd_set_target_state(device, DASD_STATE_NEW);
  1818. /* dasd_delete_device destroys the device reference. */
  1819. dasd_delete_device(device);
  1820. return 0;
  1821. }
  1822. int
  1823. dasd_generic_notify(struct ccw_device *cdev, int event)
  1824. {
  1825. struct dasd_device *device;
  1826. struct dasd_ccw_req *cqr;
  1827. unsigned long flags;
  1828. int ret;
  1829. device = dasd_device_from_cdev(cdev);
  1830. if (IS_ERR(device))
  1831. return 0;
  1832. spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
  1833. ret = 0;
  1834. switch (event) {
  1835. case CIO_GONE:
  1836. case CIO_NO_PATH:
  1837. /* First of all call extended error reporting. */
  1838. dasd_eer_write(device, NULL, DASD_EER_NOPATH);
  1839. if (device->state < DASD_STATE_BASIC)
  1840. break;
  1841. /* Device is active. We want to keep it. */
  1842. if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
  1843. list_for_each_entry(cqr, &device->ccw_queue, list)
  1844. if (cqr->status == DASD_CQR_IN_IO)
  1845. cqr->status = DASD_CQR_FAILED;
  1846. device->stopped |= DASD_STOPPED_DC_EIO;
  1847. } else {
  1848. list_for_each_entry(cqr, &device->ccw_queue, list)
  1849. if (cqr->status == DASD_CQR_IN_IO) {
  1850. cqr->status = DASD_CQR_QUEUED;
  1851. cqr->retries++;
  1852. }
  1853. device->stopped |= DASD_STOPPED_DC_WAIT;
  1854. dasd_set_timer(device, 0);
  1855. }
  1856. dasd_schedule_bh(device);
  1857. ret = 1;
  1858. break;
  1859. case CIO_OPER:
  1860. /* FIXME: add a sanity check. */
  1861. device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
  1862. dasd_schedule_bh(device);
  1863. ret = 1;
  1864. break;
  1865. }
  1866. spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
  1867. dasd_put_device(device);
  1868. return ret;
  1869. }
  1870. static int __init
  1871. dasd_init(void)
  1872. {
  1873. int rc;
  1874. init_waitqueue_head(&dasd_init_waitq);
  1875. /* register 'common' DASD debug area, used for all DBF_XXX calls */
  1876. dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
  1877. if (dasd_debug_area == NULL) {
  1878. rc = -ENOMEM;
  1879. goto failed;
  1880. }
  1881. debug_register_view(dasd_debug_area, &debug_sprintf_view);
  1882. debug_set_level(dasd_debug_area, DBF_EMERG);
  1883. DBF_EVENT(DBF_EMERG, "%s", "debug area created");
  1884. dasd_diag_discipline_pointer = NULL;
  1885. rc = dasd_devmap_init();
  1886. if (rc)
  1887. goto failed;
  1888. rc = dasd_gendisk_init();
  1889. if (rc)
  1890. goto failed;
  1891. rc = dasd_parse();
  1892. if (rc)
  1893. goto failed;
  1894. rc = dasd_eer_init();
  1895. if (rc)
  1896. goto failed;
  1897. #ifdef CONFIG_PROC_FS
  1898. rc = dasd_proc_init();
  1899. if (rc)
  1900. goto failed;
  1901. #endif
  1902. return 0;
  1903. failed:
  1904. MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
  1905. dasd_exit();
  1906. return rc;
  1907. }
  1908. module_init(dasd_init);
  1909. module_exit(dasd_exit);
  1910. EXPORT_SYMBOL(dasd_debug_area);
  1911. EXPORT_SYMBOL(dasd_diag_discipline_pointer);
  1912. EXPORT_SYMBOL(dasd_add_request_head);
  1913. EXPORT_SYMBOL(dasd_add_request_tail);
  1914. EXPORT_SYMBOL(dasd_cancel_req);
  1915. EXPORT_SYMBOL(dasd_clear_timer);
  1916. EXPORT_SYMBOL(dasd_enable_device);
  1917. EXPORT_SYMBOL(dasd_int_handler);
  1918. EXPORT_SYMBOL(dasd_kfree_request);
  1919. EXPORT_SYMBOL(dasd_kick_device);
  1920. EXPORT_SYMBOL(dasd_kmalloc_request);
  1921. EXPORT_SYMBOL(dasd_schedule_bh);
  1922. EXPORT_SYMBOL(dasd_set_target_state);
  1923. EXPORT_SYMBOL(dasd_set_timer);
  1924. EXPORT_SYMBOL(dasd_sfree_request);
  1925. EXPORT_SYMBOL(dasd_sleep_on);
  1926. EXPORT_SYMBOL(dasd_sleep_on_immediatly);
  1927. EXPORT_SYMBOL(dasd_sleep_on_interruptible);
  1928. EXPORT_SYMBOL(dasd_smalloc_request);
  1929. EXPORT_SYMBOL(dasd_start_IO);
  1930. EXPORT_SYMBOL(dasd_term_IO);
  1931. EXPORT_SYMBOL_GPL(dasd_generic_probe);
  1932. EXPORT_SYMBOL_GPL(dasd_generic_remove);
  1933. EXPORT_SYMBOL_GPL(dasd_generic_notify);
  1934. EXPORT_SYMBOL_GPL(dasd_generic_set_online);
  1935. EXPORT_SYMBOL_GPL(dasd_generic_set_offline);