dasd.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071
  1. /*
  2. * File...........: linux/drivers/s390/block/dasd.c
  3. * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  4. * Horst Hummel <Horst.Hummel@de.ibm.com>
  5. * Carsten Otte <Cotte@de.ibm.com>
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * Bugreports.to..: <Linux390@de.ibm.com>
  8. * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
  9. *
  10. * $Revision: 1.167 $
  11. */
  12. #include <linux/config.h>
  13. #include <linux/kmod.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/ctype.h>
  17. #include <linux/major.h>
  18. #include <linux/slab.h>
  19. #include <linux/buffer_head.h>
  20. #include <asm/ccwdev.h>
  21. #include <asm/ebcdic.h>
  22. #include <asm/idals.h>
  23. #include <asm/todclk.h>
  24. /* This is ugly... */
  25. #define PRINTK_HEADER "dasd:"
  26. #include "dasd_int.h"
  27. /*
  28. * SECTION: Constant definitions to be used within this file
  29. */
  30. #define DASD_CHANQ_MAX_SIZE 4
  31. /*
  32. * SECTION: exported variables of dasd.c
  33. */
  34. debug_info_t *dasd_debug_area;
  35. struct dasd_discipline *dasd_diag_discipline_pointer;
  36. MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
  37. MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
  38. " Copyright 2000 IBM Corporation");
  39. MODULE_SUPPORTED_DEVICE("dasd");
  40. MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
  41. MODULE_LICENSE("GPL");
  42. /*
  43. * SECTION: prototypes for static functions of dasd.c
  44. */
  45. static int dasd_alloc_queue(struct dasd_device * device);
  46. static void dasd_setup_queue(struct dasd_device * device);
  47. static void dasd_free_queue(struct dasd_device * device);
  48. static void dasd_flush_request_queue(struct dasd_device *);
  49. static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
  50. static void dasd_flush_ccw_queue(struct dasd_device *, int);
  51. static void dasd_tasklet(struct dasd_device *);
  52. static void do_kick_device(void *data);
  53. /*
  54. * SECTION: Operations on the device structure.
  55. */
  56. static wait_queue_head_t dasd_init_waitq;
  57. /*
  58. * Allocate memory for a new device structure.
  59. */
  60. struct dasd_device *
  61. dasd_alloc_device(void)
  62. {
  63. struct dasd_device *device;
  64. device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
  65. if (device == NULL)
  66. return ERR_PTR(-ENOMEM);
  67. memset(device, 0, sizeof (struct dasd_device));
  68. /* open_count = 0 means device online but not in use */
  69. atomic_set(&device->open_count, -1);
  70. /* Get two pages for normal block device operations. */
  71. device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
  72. if (device->ccw_mem == NULL) {
  73. kfree(device);
  74. return ERR_PTR(-ENOMEM);
  75. }
  76. /* Get one page for error recovery. */
  77. device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
  78. if (device->erp_mem == NULL) {
  79. free_pages((unsigned long) device->ccw_mem, 1);
  80. kfree(device);
  81. return ERR_PTR(-ENOMEM);
  82. }
  83. dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
  84. dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
  85. spin_lock_init(&device->mem_lock);
  86. spin_lock_init(&device->request_queue_lock);
  87. atomic_set (&device->tasklet_scheduled, 0);
  88. tasklet_init(&device->tasklet,
  89. (void (*)(unsigned long)) dasd_tasklet,
  90. (unsigned long) device);
  91. INIT_LIST_HEAD(&device->ccw_queue);
  92. init_timer(&device->timer);
  93. INIT_WORK(&device->kick_work, do_kick_device, device);
  94. device->state = DASD_STATE_NEW;
  95. device->target = DASD_STATE_NEW;
  96. return device;
  97. }
  98. /*
  99. * Free memory of a device structure.
  100. */
  101. void
  102. dasd_free_device(struct dasd_device *device)
  103. {
  104. if (device->private)
  105. kfree(device->private);
  106. free_page((unsigned long) device->erp_mem);
  107. free_pages((unsigned long) device->ccw_mem, 1);
  108. kfree(device);
  109. }
  110. /*
  111. * Make a new device known to the system.
  112. */
  113. static inline int
  114. dasd_state_new_to_known(struct dasd_device *device)
  115. {
  116. int rc;
  117. /*
  118. * As long as the device is not in state DASD_STATE_NEW we want to
  119. * keep the reference count > 0.
  120. */
  121. dasd_get_device(device);
  122. rc = dasd_alloc_queue(device);
  123. if (rc) {
  124. dasd_put_device(device);
  125. return rc;
  126. }
  127. device->state = DASD_STATE_KNOWN;
  128. return 0;
  129. }
  130. /*
  131. * Let the system forget about a device.
  132. */
  133. static inline void
  134. dasd_state_known_to_new(struct dasd_device * device)
  135. {
  136. /* Forget the discipline information. */
  137. device->discipline = NULL;
  138. device->state = DASD_STATE_NEW;
  139. dasd_free_queue(device);
  140. /* Give up reference we took in dasd_state_new_to_known. */
  141. dasd_put_device(device);
  142. }
  143. /*
  144. * Request the irq line for the device.
  145. */
  146. static inline int
  147. dasd_state_known_to_basic(struct dasd_device * device)
  148. {
  149. int rc;
  150. /* Allocate and register gendisk structure. */
  151. rc = dasd_gendisk_alloc(device);
  152. if (rc)
  153. return rc;
  154. /* register 'device' debug area, used for all DBF_DEV_XXX calls */
  155. device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
  156. 8 * sizeof (long));
  157. debug_register_view(device->debug_area, &debug_sprintf_view);
  158. debug_set_level(device->debug_area, DBF_EMERG);
  159. DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
  160. device->state = DASD_STATE_BASIC;
  161. return 0;
  162. }
  163. /*
  164. * Release the irq line for the device. Terminate any running i/o.
  165. */
  166. static inline void
  167. dasd_state_basic_to_known(struct dasd_device * device)
  168. {
  169. dasd_gendisk_free(device);
  170. dasd_flush_ccw_queue(device, 1);
  171. DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
  172. if (device->debug_area != NULL) {
  173. debug_unregister(device->debug_area);
  174. device->debug_area = NULL;
  175. }
  176. device->state = DASD_STATE_KNOWN;
  177. }
  178. /*
  179. * Do the initial analysis. The do_analysis function may return
  180. * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
  181. * until the discipline decides to continue the startup sequence
  182. * by calling the function dasd_change_state. The eckd disciplines
  183. * uses this to start a ccw that detects the format. The completion
  184. * interrupt for this detection ccw uses the kernel event daemon to
  185. * trigger the call to dasd_change_state. All this is done in the
  186. * discipline code, see dasd_eckd.c.
  187. * After the analysis ccw is done (do_analysis returned 0 or error)
  188. * the block device is setup. Either a fake disk is added to allow
  189. * formatting or a proper device request queue is created.
  190. */
  191. static inline int
  192. dasd_state_basic_to_ready(struct dasd_device * device)
  193. {
  194. int rc;
  195. rc = 0;
  196. if (device->discipline->do_analysis != NULL)
  197. rc = device->discipline->do_analysis(device);
  198. if (rc)
  199. return rc;
  200. dasd_setup_queue(device);
  201. device->state = DASD_STATE_READY;
  202. if (dasd_scan_partitions(device) != 0)
  203. device->state = DASD_STATE_BASIC;
  204. return 0;
  205. }
  206. /*
  207. * Remove device from block device layer. Destroy dirty buffers.
  208. * Forget format information. Check if the target level is basic
  209. * and if it is create fake disk for formatting.
  210. */
  211. static inline void
  212. dasd_state_ready_to_basic(struct dasd_device * device)
  213. {
  214. dasd_flush_ccw_queue(device, 0);
  215. dasd_destroy_partitions(device);
  216. dasd_flush_request_queue(device);
  217. device->blocks = 0;
  218. device->bp_block = 0;
  219. device->s2b_shift = 0;
  220. device->state = DASD_STATE_BASIC;
  221. }
  222. /*
  223. * Make the device online and schedule the bottom half to start
  224. * the requeueing of requests from the linux request queue to the
  225. * ccw queue.
  226. */
  227. static inline int
  228. dasd_state_ready_to_online(struct dasd_device * device)
  229. {
  230. device->state = DASD_STATE_ONLINE;
  231. dasd_schedule_bh(device);
  232. return 0;
  233. }
  234. /*
  235. * Stop the requeueing of requests again.
  236. */
  237. static inline void
  238. dasd_state_online_to_ready(struct dasd_device * device)
  239. {
  240. device->state = DASD_STATE_READY;
  241. }
  242. /*
  243. * Device startup state changes.
  244. */
  245. static inline int
  246. dasd_increase_state(struct dasd_device *device)
  247. {
  248. int rc;
  249. rc = 0;
  250. if (device->state == DASD_STATE_NEW &&
  251. device->target >= DASD_STATE_KNOWN)
  252. rc = dasd_state_new_to_known(device);
  253. if (!rc &&
  254. device->state == DASD_STATE_KNOWN &&
  255. device->target >= DASD_STATE_BASIC)
  256. rc = dasd_state_known_to_basic(device);
  257. if (!rc &&
  258. device->state == DASD_STATE_BASIC &&
  259. device->target >= DASD_STATE_READY)
  260. rc = dasd_state_basic_to_ready(device);
  261. if (!rc &&
  262. device->state == DASD_STATE_READY &&
  263. device->target >= DASD_STATE_ONLINE)
  264. rc = dasd_state_ready_to_online(device);
  265. return rc;
  266. }
  267. /*
  268. * Device shutdown state changes.
  269. */
  270. static inline int
  271. dasd_decrease_state(struct dasd_device *device)
  272. {
  273. if (device->state == DASD_STATE_ONLINE &&
  274. device->target <= DASD_STATE_READY)
  275. dasd_state_online_to_ready(device);
  276. if (device->state == DASD_STATE_READY &&
  277. device->target <= DASD_STATE_BASIC)
  278. dasd_state_ready_to_basic(device);
  279. if (device->state == DASD_STATE_BASIC &&
  280. device->target <= DASD_STATE_KNOWN)
  281. dasd_state_basic_to_known(device);
  282. if (device->state == DASD_STATE_KNOWN &&
  283. device->target <= DASD_STATE_NEW)
  284. dasd_state_known_to_new(device);
  285. return 0;
  286. }
  287. /*
  288. * This is the main startup/shutdown routine.
  289. */
  290. static void
  291. dasd_change_state(struct dasd_device *device)
  292. {
  293. int rc;
  294. if (device->state == device->target)
  295. /* Already where we want to go today... */
  296. return;
  297. if (device->state < device->target)
  298. rc = dasd_increase_state(device);
  299. else
  300. rc = dasd_decrease_state(device);
  301. if (rc && rc != -EAGAIN)
  302. device->target = device->state;
  303. if (device->state == device->target)
  304. wake_up(&dasd_init_waitq);
  305. }
  306. /*
  307. * Kick starter for devices that did not complete the startup/shutdown
  308. * procedure or were sleeping because of a pending state.
  309. * dasd_kick_device will schedule a call do do_kick_device to the kernel
  310. * event daemon.
  311. */
  312. static void
  313. do_kick_device(void *data)
  314. {
  315. struct dasd_device *device;
  316. device = (struct dasd_device *) data;
  317. dasd_change_state(device);
  318. dasd_schedule_bh(device);
  319. dasd_put_device(device);
  320. }
  321. void
  322. dasd_kick_device(struct dasd_device *device)
  323. {
  324. dasd_get_device(device);
  325. /* queue call to dasd_kick_device to the kernel event daemon. */
  326. schedule_work(&device->kick_work);
  327. }
  328. /*
  329. * Set the target state for a device and starts the state change.
  330. */
  331. void
  332. dasd_set_target_state(struct dasd_device *device, int target)
  333. {
  334. /* If we are in probeonly mode stop at DASD_STATE_READY. */
  335. if (dasd_probeonly && target > DASD_STATE_READY)
  336. target = DASD_STATE_READY;
  337. if (device->target != target) {
  338. if (device->state == target)
  339. wake_up(&dasd_init_waitq);
  340. device->target = target;
  341. }
  342. if (device->state != device->target)
  343. dasd_change_state(device);
  344. }
  345. /*
  346. * Enable devices with device numbers in [from..to].
  347. */
  348. static inline int
  349. _wait_for_device(struct dasd_device *device)
  350. {
  351. return (device->state == device->target);
  352. }
  353. void
  354. dasd_enable_device(struct dasd_device *device)
  355. {
  356. dasd_set_target_state(device, DASD_STATE_ONLINE);
  357. if (device->state <= DASD_STATE_KNOWN)
  358. /* No discipline for device found. */
  359. dasd_set_target_state(device, DASD_STATE_NEW);
  360. /* Now wait for the devices to come up. */
  361. wait_event(dasd_init_waitq, _wait_for_device(device));
  362. }
  363. /*
  364. * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
  365. */
  366. #ifdef CONFIG_DASD_PROFILE
  367. struct dasd_profile_info_t dasd_global_profile;
  368. unsigned int dasd_profile_level = DASD_PROFILE_OFF;
  369. /*
  370. * Increments counter in global and local profiling structures.
  371. */
  372. #define dasd_profile_counter(value, counter, device) \
  373. { \
  374. int index; \
  375. for (index = 0; index < 31 && value >> (2+index); index++); \
  376. dasd_global_profile.counter[index]++; \
  377. device->profile.counter[index]++; \
  378. }
  379. /*
  380. * Add profiling information for cqr before execution.
  381. */
  382. static inline void
  383. dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
  384. struct request *req)
  385. {
  386. struct list_head *l;
  387. unsigned int counter;
  388. if (dasd_profile_level != DASD_PROFILE_ON)
  389. return;
  390. /* count the length of the chanq for statistics */
  391. counter = 0;
  392. list_for_each(l, &device->ccw_queue)
  393. if (++counter >= 31)
  394. break;
  395. dasd_global_profile.dasd_io_nr_req[counter]++;
  396. device->profile.dasd_io_nr_req[counter]++;
  397. }
  398. /*
  399. * Add profiling information for cqr after execution.
  400. */
  401. static inline void
  402. dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
  403. struct request *req)
  404. {
  405. long strtime, irqtime, endtime, tottime; /* in microseconds */
  406. long tottimeps, sectors;
  407. if (dasd_profile_level != DASD_PROFILE_ON)
  408. return;
  409. sectors = req->nr_sectors;
  410. if (!cqr->buildclk || !cqr->startclk ||
  411. !cqr->stopclk || !cqr->endclk ||
  412. !sectors)
  413. return;
  414. strtime = ((cqr->startclk - cqr->buildclk) >> 12);
  415. irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
  416. endtime = ((cqr->endclk - cqr->stopclk) >> 12);
  417. tottime = ((cqr->endclk - cqr->buildclk) >> 12);
  418. tottimeps = tottime / sectors;
  419. if (!dasd_global_profile.dasd_io_reqs)
  420. memset(&dasd_global_profile, 0,
  421. sizeof (struct dasd_profile_info_t));
  422. dasd_global_profile.dasd_io_reqs++;
  423. dasd_global_profile.dasd_io_sects += sectors;
  424. if (!device->profile.dasd_io_reqs)
  425. memset(&device->profile, 0,
  426. sizeof (struct dasd_profile_info_t));
  427. device->profile.dasd_io_reqs++;
  428. device->profile.dasd_io_sects += sectors;
  429. dasd_profile_counter(sectors, dasd_io_secs, device);
  430. dasd_profile_counter(tottime, dasd_io_times, device);
  431. dasd_profile_counter(tottimeps, dasd_io_timps, device);
  432. dasd_profile_counter(strtime, dasd_io_time1, device);
  433. dasd_profile_counter(irqtime, dasd_io_time2, device);
  434. dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
  435. dasd_profile_counter(endtime, dasd_io_time3, device);
  436. }
  437. #else
  438. #define dasd_profile_start(device, cqr, req) do {} while (0)
  439. #define dasd_profile_end(device, cqr, req) do {} while (0)
  440. #endif /* CONFIG_DASD_PROFILE */
  441. /*
  442. * Allocate memory for a channel program with 'cplength' channel
  443. * command words and 'datasize' additional space. There are two
  444. * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
  445. * memory and 2) dasd_smalloc_request uses the static ccw memory
  446. * that gets allocated for each device.
  447. */
  448. struct dasd_ccw_req *
  449. dasd_kmalloc_request(char *magic, int cplength, int datasize,
  450. struct dasd_device * device)
  451. {
  452. struct dasd_ccw_req *cqr;
  453. /* Sanity checks */
  454. if ( magic == NULL || datasize > PAGE_SIZE ||
  455. (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
  456. BUG();
  457. cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
  458. if (cqr == NULL)
  459. return ERR_PTR(-ENOMEM);
  460. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  461. cqr->cpaddr = NULL;
  462. if (cplength > 0) {
  463. cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
  464. GFP_ATOMIC | GFP_DMA);
  465. if (cqr->cpaddr == NULL) {
  466. kfree(cqr);
  467. return ERR_PTR(-ENOMEM);
  468. }
  469. memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
  470. }
  471. cqr->data = NULL;
  472. if (datasize > 0) {
  473. cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
  474. if (cqr->data == NULL) {
  475. if (cqr->cpaddr != NULL)
  476. kfree(cqr->cpaddr);
  477. kfree(cqr);
  478. return ERR_PTR(-ENOMEM);
  479. }
  480. memset(cqr->data, 0, datasize);
  481. }
  482. strncpy((char *) &cqr->magic, magic, 4);
  483. ASCEBC((char *) &cqr->magic, 4);
  484. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  485. dasd_get_device(device);
  486. return cqr;
  487. }
  488. struct dasd_ccw_req *
  489. dasd_smalloc_request(char *magic, int cplength, int datasize,
  490. struct dasd_device * device)
  491. {
  492. unsigned long flags;
  493. struct dasd_ccw_req *cqr;
  494. char *data;
  495. int size;
  496. /* Sanity checks */
  497. if ( magic == NULL || datasize > PAGE_SIZE ||
  498. (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
  499. BUG();
  500. size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
  501. if (cplength > 0)
  502. size += cplength * sizeof(struct ccw1);
  503. if (datasize > 0)
  504. size += datasize;
  505. spin_lock_irqsave(&device->mem_lock, flags);
  506. cqr = (struct dasd_ccw_req *)
  507. dasd_alloc_chunk(&device->ccw_chunks, size);
  508. spin_unlock_irqrestore(&device->mem_lock, flags);
  509. if (cqr == NULL)
  510. return ERR_PTR(-ENOMEM);
  511. memset(cqr, 0, sizeof(struct dasd_ccw_req));
  512. data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
  513. cqr->cpaddr = NULL;
  514. if (cplength > 0) {
  515. cqr->cpaddr = (struct ccw1 *) data;
  516. data += cplength*sizeof(struct ccw1);
  517. memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
  518. }
  519. cqr->data = NULL;
  520. if (datasize > 0) {
  521. cqr->data = data;
  522. memset(cqr->data, 0, datasize);
  523. }
  524. strncpy((char *) &cqr->magic, magic, 4);
  525. ASCEBC((char *) &cqr->magic, 4);
  526. set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  527. dasd_get_device(device);
  528. return cqr;
  529. }
  530. /*
  531. * Free memory of a channel program. This function needs to free all the
  532. * idal lists that might have been created by dasd_set_cda and the
  533. * struct dasd_ccw_req itself.
  534. */
  535. void
  536. dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
  537. {
  538. #ifdef CONFIG_ARCH_S390X
  539. struct ccw1 *ccw;
  540. /* Clear any idals used for the request. */
  541. ccw = cqr->cpaddr;
  542. do {
  543. clear_normalized_cda(ccw);
  544. } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
  545. #endif
  546. if (cqr->cpaddr != NULL)
  547. kfree(cqr->cpaddr);
  548. if (cqr->data != NULL)
  549. kfree(cqr->data);
  550. kfree(cqr);
  551. dasd_put_device(device);
  552. }
  553. void
  554. dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
  555. {
  556. unsigned long flags;
  557. spin_lock_irqsave(&device->mem_lock, flags);
  558. dasd_free_chunk(&device->ccw_chunks, cqr);
  559. spin_unlock_irqrestore(&device->mem_lock, flags);
  560. dasd_put_device(device);
  561. }
  562. /*
  563. * Check discipline magic in cqr.
  564. */
  565. static inline int
  566. dasd_check_cqr(struct dasd_ccw_req *cqr)
  567. {
  568. struct dasd_device *device;
  569. if (cqr == NULL)
  570. return -EINVAL;
  571. device = cqr->device;
  572. if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
  573. DEV_MESSAGE(KERN_WARNING, device,
  574. " dasd_ccw_req 0x%08x magic doesn't match"
  575. " discipline 0x%08x",
  576. cqr->magic,
  577. *(unsigned int *) device->discipline->name);
  578. return -EINVAL;
  579. }
  580. return 0;
  581. }
  582. /*
  583. * Terminate the current i/o and set the request to clear_pending.
  584. * Timer keeps device runnig.
  585. * ccw_device_clear can fail if the i/o subsystem
  586. * is in a bad mood.
  587. */
  588. int
  589. dasd_term_IO(struct dasd_ccw_req * cqr)
  590. {
  591. struct dasd_device *device;
  592. int retries, rc;
  593. /* Check the cqr */
  594. rc = dasd_check_cqr(cqr);
  595. if (rc)
  596. return rc;
  597. retries = 0;
  598. device = (struct dasd_device *) cqr->device;
  599. while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
  600. rc = ccw_device_clear(device->cdev, (long) cqr);
  601. switch (rc) {
  602. case 0: /* termination successful */
  603. if (cqr->retries > 0) {
  604. cqr->retries--;
  605. cqr->status = DASD_CQR_CLEAR;
  606. } else
  607. cqr->status = DASD_CQR_FAILED;
  608. cqr->stopclk = get_clock();
  609. DBF_DEV_EVENT(DBF_DEBUG, device,
  610. "terminate cqr %p successful",
  611. cqr);
  612. break;
  613. case -ENODEV:
  614. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  615. "device gone, retry");
  616. break;
  617. case -EIO:
  618. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  619. "I/O error, retry");
  620. break;
  621. case -EINVAL:
  622. case -EBUSY:
  623. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  624. "device busy, retry later");
  625. break;
  626. default:
  627. DEV_MESSAGE(KERN_ERR, device,
  628. "line %d unknown RC=%d, please "
  629. "report to linux390@de.ibm.com",
  630. __LINE__, rc);
  631. BUG();
  632. break;
  633. }
  634. retries++;
  635. }
  636. dasd_schedule_bh(device);
  637. return rc;
  638. }
  639. /*
  640. * Start the i/o. This start_IO can fail if the channel is really busy.
  641. * In that case set up a timer to start the request later.
  642. */
  643. int
  644. dasd_start_IO(struct dasd_ccw_req * cqr)
  645. {
  646. struct dasd_device *device;
  647. int rc;
  648. /* Check the cqr */
  649. rc = dasd_check_cqr(cqr);
  650. if (rc)
  651. return rc;
  652. device = (struct dasd_device *) cqr->device;
  653. if (cqr->retries < 0) {
  654. DEV_MESSAGE(KERN_DEBUG, device,
  655. "start_IO: request %p (%02x/%i) - no retry left.",
  656. cqr, cqr->status, cqr->retries);
  657. cqr->status = DASD_CQR_FAILED;
  658. return -EIO;
  659. }
  660. cqr->startclk = get_clock();
  661. cqr->starttime = jiffies;
  662. cqr->retries--;
  663. rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
  664. cqr->lpm, 0);
  665. switch (rc) {
  666. case 0:
  667. cqr->status = DASD_CQR_IN_IO;
  668. DBF_DEV_EVENT(DBF_DEBUG, device,
  669. "start_IO: request %p started successful",
  670. cqr);
  671. break;
  672. case -EBUSY:
  673. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  674. "start_IO: device busy, retry later");
  675. break;
  676. case -ETIMEDOUT:
  677. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  678. "start_IO: request timeout, retry later");
  679. break;
  680. case -EACCES:
  681. /* -EACCES indicates that the request used only a
  682. * subset of the available pathes and all these
  683. * pathes are gone.
  684. * Do a retry with all available pathes.
  685. */
  686. cqr->lpm = LPM_ANYPATH;
  687. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  688. "start_IO: selected pathes gone,"
  689. " retry on all pathes");
  690. break;
  691. case -ENODEV:
  692. case -EIO:
  693. DBF_DEV_EVENT(DBF_ERR, device, "%s",
  694. "start_IO: device gone, retry");
  695. break;
  696. default:
  697. DEV_MESSAGE(KERN_ERR, device,
  698. "line %d unknown RC=%d, please report"
  699. " to linux390@de.ibm.com", __LINE__, rc);
  700. BUG();
  701. break;
  702. }
  703. return rc;
  704. }
  705. /*
  706. * Timeout function for dasd devices. This is used for different purposes
  707. * 1) missing interrupt handler for normal operation
  708. * 2) delayed start of request where start_IO failed with -EBUSY
  709. * 3) timeout for missing state change interrupts
  710. * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
  711. * DASD_CQR_QUEUED for 2) and 3).
  712. */
  713. static void
  714. dasd_timeout_device(unsigned long ptr)
  715. {
  716. unsigned long flags;
  717. struct dasd_device *device;
  718. device = (struct dasd_device *) ptr;
  719. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  720. /* re-activate request queue */
  721. device->stopped &= ~DASD_STOPPED_PENDING;
  722. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  723. dasd_schedule_bh(device);
  724. }
  725. /*
  726. * Setup timeout for a device in jiffies.
  727. */
  728. void
  729. dasd_set_timer(struct dasd_device *device, int expires)
  730. {
  731. if (expires == 0) {
  732. if (timer_pending(&device->timer))
  733. del_timer(&device->timer);
  734. return;
  735. }
  736. if (timer_pending(&device->timer)) {
  737. if (mod_timer(&device->timer, jiffies + expires))
  738. return;
  739. }
  740. device->timer.function = dasd_timeout_device;
  741. device->timer.data = (unsigned long) device;
  742. device->timer.expires = jiffies + expires;
  743. add_timer(&device->timer);
  744. }
  745. /*
  746. * Clear timeout for a device.
  747. */
  748. void
  749. dasd_clear_timer(struct dasd_device *device)
  750. {
  751. if (timer_pending(&device->timer))
  752. del_timer(&device->timer);
  753. }
  754. static void
  755. dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
  756. {
  757. struct dasd_ccw_req *cqr;
  758. struct dasd_device *device;
  759. cqr = (struct dasd_ccw_req *) intparm;
  760. if (cqr->status != DASD_CQR_IN_IO) {
  761. MESSAGE(KERN_DEBUG,
  762. "invalid status in handle_killed_request: "
  763. "bus_id %s, status %02x",
  764. cdev->dev.bus_id, cqr->status);
  765. return;
  766. }
  767. device = (struct dasd_device *) cqr->device;
  768. if (device == NULL ||
  769. device != dasd_device_from_cdev(cdev) ||
  770. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  771. MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
  772. cdev->dev.bus_id);
  773. return;
  774. }
  775. /* Schedule request to be retried. */
  776. cqr->status = DASD_CQR_QUEUED;
  777. dasd_clear_timer(device);
  778. dasd_schedule_bh(device);
  779. dasd_put_device(device);
  780. }
  781. static void
  782. dasd_handle_state_change_pending(struct dasd_device *device)
  783. {
  784. struct dasd_ccw_req *cqr;
  785. struct list_head *l, *n;
  786. device->stopped &= ~DASD_STOPPED_PENDING;
  787. /* restart all 'running' IO on queue */
  788. list_for_each_safe(l, n, &device->ccw_queue) {
  789. cqr = list_entry(l, struct dasd_ccw_req, list);
  790. if (cqr->status == DASD_CQR_IN_IO) {
  791. cqr->status = DASD_CQR_QUEUED;
  792. }
  793. }
  794. dasd_clear_timer(device);
  795. dasd_schedule_bh(device);
  796. }
  797. /*
  798. * Interrupt handler for "normal" ssch-io based dasd devices.
  799. */
  800. void
  801. dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
  802. struct irb *irb)
  803. {
  804. struct dasd_ccw_req *cqr, *next;
  805. struct dasd_device *device;
  806. unsigned long long now;
  807. int expires;
  808. dasd_era_t era;
  809. char mask;
  810. if (IS_ERR(irb)) {
  811. switch (PTR_ERR(irb)) {
  812. case -EIO:
  813. dasd_handle_killed_request(cdev, intparm);
  814. break;
  815. case -ETIMEDOUT:
  816. printk(KERN_WARNING"%s(%s): request timed out\n",
  817. __FUNCTION__, cdev->dev.bus_id);
  818. //FIXME - dasd uses own timeout interface...
  819. break;
  820. default:
  821. printk(KERN_WARNING"%s(%s): unknown error %ld\n",
  822. __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
  823. }
  824. return;
  825. }
  826. now = get_clock();
  827. DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
  828. cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
  829. (unsigned int) intparm);
  830. /* first of all check for state change pending interrupt */
  831. mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
  832. if ((irb->scsw.dstat & mask) == mask) {
  833. device = dasd_device_from_cdev(cdev);
  834. if (!IS_ERR(device)) {
  835. dasd_handle_state_change_pending(device);
  836. dasd_put_device(device);
  837. }
  838. return;
  839. }
  840. cqr = (struct dasd_ccw_req *) intparm;
  841. /* check for unsolicited interrupts */
  842. if (cqr == NULL) {
  843. MESSAGE(KERN_DEBUG,
  844. "unsolicited interrupt received: bus_id %s",
  845. cdev->dev.bus_id);
  846. return;
  847. }
  848. device = (struct dasd_device *) cqr->device;
  849. if (device == NULL ||
  850. strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
  851. MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
  852. cdev->dev.bus_id);
  853. return;
  854. }
  855. /* Check for clear pending */
  856. if (cqr->status == DASD_CQR_CLEAR &&
  857. irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
  858. cqr->status = DASD_CQR_QUEUED;
  859. dasd_clear_timer(device);
  860. dasd_schedule_bh(device);
  861. return;
  862. }
  863. /* check status - the request might have been killed by dyn detach */
  864. if (cqr->status != DASD_CQR_IN_IO) {
  865. MESSAGE(KERN_DEBUG,
  866. "invalid status: bus_id %s, status %02x",
  867. cdev->dev.bus_id, cqr->status);
  868. return;
  869. }
  870. DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
  871. ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
  872. /* Find out the appropriate era_action. */
  873. if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
  874. era = dasd_era_fatal;
  875. else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
  876. irb->scsw.cstat == 0 &&
  877. !irb->esw.esw0.erw.cons)
  878. era = dasd_era_none;
  879. else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
  880. era = dasd_era_fatal; /* don't recover this request */
  881. else if (irb->esw.esw0.erw.cons)
  882. era = device->discipline->examine_error(cqr, irb);
  883. else
  884. era = dasd_era_recover;
  885. DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
  886. expires = 0;
  887. if (era == dasd_era_none) {
  888. cqr->status = DASD_CQR_DONE;
  889. cqr->stopclk = now;
  890. /* Start first request on queue if possible -> fast_io. */
  891. if (cqr->list.next != &device->ccw_queue) {
  892. next = list_entry(cqr->list.next,
  893. struct dasd_ccw_req, list);
  894. if ((next->status == DASD_CQR_QUEUED) &&
  895. (!device->stopped)) {
  896. if (device->discipline->start_IO(next) == 0)
  897. expires = next->expires;
  898. else
  899. DEV_MESSAGE(KERN_DEBUG, device, "%s",
  900. "Interrupt fastpath "
  901. "failed!");
  902. }
  903. }
  904. } else { /* error */
  905. memcpy(&cqr->irb, irb, sizeof (struct irb));
  906. #ifdef ERP_DEBUG
  907. /* dump sense data */
  908. dasd_log_sense(cqr, irb);
  909. #endif
  910. switch (era) {
  911. case dasd_era_fatal:
  912. cqr->status = DASD_CQR_FAILED;
  913. cqr->stopclk = now;
  914. break;
  915. case dasd_era_recover:
  916. cqr->status = DASD_CQR_ERROR;
  917. break;
  918. default:
  919. BUG();
  920. }
  921. }
  922. if (expires != 0)
  923. dasd_set_timer(device, expires);
  924. else
  925. dasd_clear_timer(device);
  926. dasd_schedule_bh(device);
  927. }
  928. /*
  929. * posts the buffer_cache about a finalized request
  930. */
  931. static inline void
  932. dasd_end_request(struct request *req, int uptodate)
  933. {
  934. if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
  935. BUG();
  936. add_disk_randomness(req->rq_disk);
  937. end_that_request_last(req);
  938. }
  939. /*
  940. * Process finished error recovery ccw.
  941. */
  942. static inline void
  943. __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
  944. {
  945. dasd_erp_fn_t erp_fn;
  946. if (cqr->status == DASD_CQR_DONE)
  947. DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
  948. else
  949. DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
  950. erp_fn = device->discipline->erp_postaction(cqr);
  951. erp_fn(cqr);
  952. }
  953. /*
  954. * Process ccw request queue.
  955. */
  956. static inline void
  957. __dasd_process_ccw_queue(struct dasd_device * device,
  958. struct list_head *final_queue)
  959. {
  960. struct list_head *l, *n;
  961. struct dasd_ccw_req *cqr;
  962. dasd_erp_fn_t erp_fn;
  963. restart:
  964. /* Process request with final status. */
  965. list_for_each_safe(l, n, &device->ccw_queue) {
  966. cqr = list_entry(l, struct dasd_ccw_req, list);
  967. /* Stop list processing at the first non-final request. */
  968. if (cqr->status != DASD_CQR_DONE &&
  969. cqr->status != DASD_CQR_FAILED &&
  970. cqr->status != DASD_CQR_ERROR)
  971. break;
  972. /* Process requests with DASD_CQR_ERROR */
  973. if (cqr->status == DASD_CQR_ERROR) {
  974. if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
  975. cqr->status = DASD_CQR_FAILED;
  976. cqr->stopclk = get_clock();
  977. } else {
  978. if (cqr->irb.esw.esw0.erw.cons) {
  979. erp_fn = device->discipline->
  980. erp_action(cqr);
  981. erp_fn(cqr);
  982. } else
  983. dasd_default_erp_action(cqr);
  984. }
  985. goto restart;
  986. }
  987. /* Process finished ERP request. */
  988. if (cqr->refers) {
  989. __dasd_process_erp(device, cqr);
  990. goto restart;
  991. }
  992. /* Rechain finished requests to final queue */
  993. cqr->endclk = get_clock();
  994. list_move_tail(&cqr->list, final_queue);
  995. }
  996. }
  997. static void
  998. dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
  999. {
  1000. struct request *req;
  1001. struct dasd_device *device;
  1002. int status;
  1003. req = (struct request *) data;
  1004. device = cqr->device;
  1005. dasd_profile_end(device, cqr, req);
  1006. status = cqr->device->discipline->free_cp(cqr,req);
  1007. spin_lock_irq(&device->request_queue_lock);
  1008. dasd_end_request(req, status);
  1009. spin_unlock_irq(&device->request_queue_lock);
  1010. }
  1011. /*
  1012. * Fetch requests from the block device queue.
  1013. */
  1014. static inline void
  1015. __dasd_process_blk_queue(struct dasd_device * device)
  1016. {
  1017. request_queue_t *queue;
  1018. struct request *req;
  1019. struct dasd_ccw_req *cqr;
  1020. int nr_queued;
  1021. queue = device->request_queue;
  1022. /* No queue ? Then there is nothing to do. */
  1023. if (queue == NULL)
  1024. return;
  1025. /*
  1026. * We requeue request from the block device queue to the ccw
  1027. * queue only in two states. In state DASD_STATE_READY the
  1028. * partition detection is done and we need to requeue requests
  1029. * for that. State DASD_STATE_ONLINE is normal block device
  1030. * operation.
  1031. */
  1032. if (device->state != DASD_STATE_READY &&
  1033. device->state != DASD_STATE_ONLINE)
  1034. return;
  1035. nr_queued = 0;
  1036. /* Now we try to fetch requests from the request queue */
  1037. list_for_each_entry(cqr, &device->ccw_queue, list)
  1038. if (cqr->status == DASD_CQR_QUEUED)
  1039. nr_queued++;
  1040. while (!blk_queue_plugged(queue) &&
  1041. elv_next_request(queue) &&
  1042. nr_queued < DASD_CHANQ_MAX_SIZE) {
  1043. req = elv_next_request(queue);
  1044. if (device->features & DASD_FEATURE_READONLY &&
  1045. rq_data_dir(req) == WRITE) {
  1046. DBF_DEV_EVENT(DBF_ERR, device,
  1047. "Rejecting write request %p",
  1048. req);
  1049. blkdev_dequeue_request(req);
  1050. dasd_end_request(req, 0);
  1051. continue;
  1052. }
  1053. if (device->stopped & DASD_STOPPED_DC_EIO) {
  1054. blkdev_dequeue_request(req);
  1055. dasd_end_request(req, 0);
  1056. continue;
  1057. }
  1058. cqr = device->discipline->build_cp(device, req);
  1059. if (IS_ERR(cqr)) {
  1060. if (PTR_ERR(cqr) == -ENOMEM)
  1061. break; /* terminate request queue loop */
  1062. DBF_DEV_EVENT(DBF_ERR, device,
  1063. "CCW creation failed (rc=%ld) "
  1064. "on request %p",
  1065. PTR_ERR(cqr), req);
  1066. blkdev_dequeue_request(req);
  1067. dasd_end_request(req, 0);
  1068. continue;
  1069. }
  1070. cqr->callback = dasd_end_request_cb;
  1071. cqr->callback_data = (void *) req;
  1072. cqr->status = DASD_CQR_QUEUED;
  1073. blkdev_dequeue_request(req);
  1074. list_add_tail(&cqr->list, &device->ccw_queue);
  1075. dasd_profile_start(device, cqr, req);
  1076. nr_queued++;
  1077. }
  1078. }
  1079. /*
  1080. * Take a look at the first request on the ccw queue and check
  1081. * if it reached its expire time. If so, terminate the IO.
  1082. */
  1083. static inline void
  1084. __dasd_check_expire(struct dasd_device * device)
  1085. {
  1086. struct dasd_ccw_req *cqr;
  1087. if (list_empty(&device->ccw_queue))
  1088. return;
  1089. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1090. if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
  1091. if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
  1092. if (device->discipline->term_IO(cqr) != 0)
  1093. /* Hmpf, try again in 1/10 sec */
  1094. dasd_set_timer(device, 10);
  1095. }
  1096. }
  1097. }
  1098. /*
  1099. * Take a look at the first request on the ccw queue and check
  1100. * if it needs to be started.
  1101. */
  1102. static inline void
  1103. __dasd_start_head(struct dasd_device * device)
  1104. {
  1105. struct dasd_ccw_req *cqr;
  1106. int rc;
  1107. if (list_empty(&device->ccw_queue))
  1108. return;
  1109. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1110. if ((cqr->status == DASD_CQR_QUEUED) &&
  1111. (!device->stopped)) {
  1112. /* try to start the first I/O that can be started */
  1113. rc = device->discipline->start_IO(cqr);
  1114. if (rc == 0)
  1115. dasd_set_timer(device, cqr->expires);
  1116. else if (rc == -EACCES) {
  1117. dasd_schedule_bh(device);
  1118. } else
  1119. /* Hmpf, try again in 1/2 sec */
  1120. dasd_set_timer(device, 50);
  1121. }
  1122. }
  1123. /*
  1124. * Remove requests from the ccw queue.
  1125. */
  1126. static void
  1127. dasd_flush_ccw_queue(struct dasd_device * device, int all)
  1128. {
  1129. struct list_head flush_queue;
  1130. struct list_head *l, *n;
  1131. struct dasd_ccw_req *cqr;
  1132. INIT_LIST_HEAD(&flush_queue);
  1133. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1134. list_for_each_safe(l, n, &device->ccw_queue) {
  1135. cqr = list_entry(l, struct dasd_ccw_req, list);
  1136. /* Flush all request or only block device requests? */
  1137. if (all == 0 && cqr->callback == dasd_end_request_cb)
  1138. continue;
  1139. if (cqr->status == DASD_CQR_IN_IO)
  1140. device->discipline->term_IO(cqr);
  1141. if (cqr->status != DASD_CQR_DONE ||
  1142. cqr->status != DASD_CQR_FAILED) {
  1143. cqr->status = DASD_CQR_FAILED;
  1144. cqr->stopclk = get_clock();
  1145. }
  1146. /* Process finished ERP request. */
  1147. if (cqr->refers) {
  1148. __dasd_process_erp(device, cqr);
  1149. continue;
  1150. }
  1151. /* Rechain request on device request queue */
  1152. cqr->endclk = get_clock();
  1153. list_move_tail(&cqr->list, &flush_queue);
  1154. }
  1155. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1156. /* Now call the callback function of flushed requests */
  1157. list_for_each_safe(l, n, &flush_queue) {
  1158. cqr = list_entry(l, struct dasd_ccw_req, list);
  1159. if (cqr->callback != NULL)
  1160. (cqr->callback)(cqr, cqr->callback_data);
  1161. }
  1162. }
  1163. /*
  1164. * Acquire the device lock and process queues for the device.
  1165. */
  1166. static void
  1167. dasd_tasklet(struct dasd_device * device)
  1168. {
  1169. struct list_head final_queue;
  1170. struct list_head *l, *n;
  1171. struct dasd_ccw_req *cqr;
  1172. atomic_set (&device->tasklet_scheduled, 0);
  1173. INIT_LIST_HEAD(&final_queue);
  1174. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1175. /* Check expire time of first request on the ccw queue. */
  1176. __dasd_check_expire(device);
  1177. /* Finish off requests on ccw queue */
  1178. __dasd_process_ccw_queue(device, &final_queue);
  1179. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1180. /* Now call the callback function of requests with final status */
  1181. list_for_each_safe(l, n, &final_queue) {
  1182. cqr = list_entry(l, struct dasd_ccw_req, list);
  1183. list_del(&cqr->list);
  1184. if (cqr->callback != NULL)
  1185. (cqr->callback)(cqr, cqr->callback_data);
  1186. }
  1187. spin_lock_irq(&device->request_queue_lock);
  1188. spin_lock(get_ccwdev_lock(device->cdev));
  1189. /* Get new request from the block device request queue */
  1190. __dasd_process_blk_queue(device);
  1191. /* Now check if the head of the ccw queue needs to be started. */
  1192. __dasd_start_head(device);
  1193. spin_unlock(get_ccwdev_lock(device->cdev));
  1194. spin_unlock_irq(&device->request_queue_lock);
  1195. dasd_put_device(device);
  1196. }
  1197. /*
  1198. * Schedules a call to dasd_tasklet over the device tasklet.
  1199. */
  1200. void
  1201. dasd_schedule_bh(struct dasd_device * device)
  1202. {
  1203. /* Protect against rescheduling. */
  1204. if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled))
  1205. return;
  1206. dasd_get_device(device);
  1207. tasklet_hi_schedule(&device->tasklet);
  1208. }
  1209. /*
  1210. * Queue a request to the head of the ccw_queue. Start the I/O if
  1211. * possible.
  1212. */
  1213. void
  1214. dasd_add_request_head(struct dasd_ccw_req *req)
  1215. {
  1216. struct dasd_device *device;
  1217. unsigned long flags;
  1218. device = req->device;
  1219. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1220. req->status = DASD_CQR_QUEUED;
  1221. req->device = device;
  1222. list_add(&req->list, &device->ccw_queue);
  1223. /* let the bh start the request to keep them in order */
  1224. dasd_schedule_bh(device);
  1225. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1226. }
  1227. /*
  1228. * Queue a request to the tail of the ccw_queue. Start the I/O if
  1229. * possible.
  1230. */
  1231. void
  1232. dasd_add_request_tail(struct dasd_ccw_req *req)
  1233. {
  1234. struct dasd_device *device;
  1235. unsigned long flags;
  1236. device = req->device;
  1237. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1238. req->status = DASD_CQR_QUEUED;
  1239. req->device = device;
  1240. list_add_tail(&req->list, &device->ccw_queue);
  1241. /* let the bh start the request to keep them in order */
  1242. dasd_schedule_bh(device);
  1243. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1244. }
  1245. /*
  1246. * Wakeup callback.
  1247. */
  1248. static void
  1249. dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
  1250. {
  1251. wake_up((wait_queue_head_t *) data);
  1252. }
  1253. static inline int
  1254. _wait_for_wakeup(struct dasd_ccw_req *cqr)
  1255. {
  1256. struct dasd_device *device;
  1257. int rc;
  1258. device = cqr->device;
  1259. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1260. rc = cqr->status == DASD_CQR_DONE || cqr->status == DASD_CQR_FAILED;
  1261. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1262. return rc;
  1263. }
  1264. /*
  1265. * Attempts to start a special ccw queue and waits for its completion.
  1266. */
  1267. int
  1268. dasd_sleep_on(struct dasd_ccw_req * cqr)
  1269. {
  1270. wait_queue_head_t wait_q;
  1271. struct dasd_device *device;
  1272. int rc;
  1273. device = cqr->device;
  1274. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1275. init_waitqueue_head (&wait_q);
  1276. cqr->callback = dasd_wakeup_cb;
  1277. cqr->callback_data = (void *) &wait_q;
  1278. cqr->status = DASD_CQR_QUEUED;
  1279. list_add_tail(&cqr->list, &device->ccw_queue);
  1280. /* let the bh start the request to keep them in order */
  1281. dasd_schedule_bh(device);
  1282. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1283. wait_event(wait_q, _wait_for_wakeup(cqr));
  1284. /* Request status is either done or failed. */
  1285. rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  1286. return rc;
  1287. }
  1288. /*
  1289. * Attempts to start a special ccw queue and wait interruptible
  1290. * for its completion.
  1291. */
  1292. int
  1293. dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
  1294. {
  1295. wait_queue_head_t wait_q;
  1296. struct dasd_device *device;
  1297. int rc, finished;
  1298. device = cqr->device;
  1299. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1300. init_waitqueue_head (&wait_q);
  1301. cqr->callback = dasd_wakeup_cb;
  1302. cqr->callback_data = (void *) &wait_q;
  1303. cqr->status = DASD_CQR_QUEUED;
  1304. list_add_tail(&cqr->list, &device->ccw_queue);
  1305. /* let the bh start the request to keep them in order */
  1306. dasd_schedule_bh(device);
  1307. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1308. finished = 0;
  1309. while (!finished) {
  1310. rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
  1311. if (rc != -ERESTARTSYS) {
  1312. /* Request status is either done or failed. */
  1313. rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  1314. break;
  1315. }
  1316. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1317. if (cqr->status == DASD_CQR_IN_IO &&
  1318. device->discipline->term_IO(cqr) == 0) {
  1319. list_del(&cqr->list);
  1320. finished = 1;
  1321. }
  1322. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1323. }
  1324. return rc;
  1325. }
  1326. /*
  1327. * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
  1328. * for eckd devices) the currently running request has to be terminated
  1329. * and be put back to status queued, before the special request is added
  1330. * to the head of the queue. Then the special request is waited on normally.
  1331. */
  1332. static inline int
  1333. _dasd_term_running_cqr(struct dasd_device *device)
  1334. {
  1335. struct dasd_ccw_req *cqr;
  1336. int rc;
  1337. if (list_empty(&device->ccw_queue))
  1338. return 0;
  1339. cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
  1340. rc = device->discipline->term_IO(cqr);
  1341. if (rc == 0) {
  1342. /* termination successful */
  1343. cqr->status = DASD_CQR_QUEUED;
  1344. cqr->startclk = cqr->stopclk = 0;
  1345. cqr->starttime = 0;
  1346. }
  1347. return rc;
  1348. }
  1349. int
  1350. dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
  1351. {
  1352. wait_queue_head_t wait_q;
  1353. struct dasd_device *device;
  1354. int rc;
  1355. device = cqr->device;
  1356. spin_lock_irq(get_ccwdev_lock(device->cdev));
  1357. rc = _dasd_term_running_cqr(device);
  1358. if (rc) {
  1359. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1360. return rc;
  1361. }
  1362. init_waitqueue_head (&wait_q);
  1363. cqr->callback = dasd_wakeup_cb;
  1364. cqr->callback_data = (void *) &wait_q;
  1365. cqr->status = DASD_CQR_QUEUED;
  1366. list_add(&cqr->list, &device->ccw_queue);
  1367. /* let the bh start the request to keep them in order */
  1368. dasd_schedule_bh(device);
  1369. spin_unlock_irq(get_ccwdev_lock(device->cdev));
  1370. wait_event(wait_q, _wait_for_wakeup(cqr));
  1371. /* Request status is either done or failed. */
  1372. rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
  1373. return rc;
  1374. }
  1375. /*
  1376. * Cancels a request that was started with dasd_sleep_on_req.
  1377. * This is useful to timeout requests. The request will be
  1378. * terminated if it is currently in i/o.
  1379. * Returns 1 if the request has been terminated.
  1380. */
  1381. int
  1382. dasd_cancel_req(struct dasd_ccw_req *cqr)
  1383. {
  1384. struct dasd_device *device = cqr->device;
  1385. unsigned long flags;
  1386. int rc;
  1387. rc = 0;
  1388. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1389. switch (cqr->status) {
  1390. case DASD_CQR_QUEUED:
  1391. /* request was not started - just set to failed */
  1392. cqr->status = DASD_CQR_FAILED;
  1393. break;
  1394. case DASD_CQR_IN_IO:
  1395. /* request in IO - terminate IO and release again */
  1396. if (device->discipline->term_IO(cqr) != 0)
  1397. /* what to do if unable to terminate ??????
  1398. e.g. not _IN_IO */
  1399. cqr->status = DASD_CQR_FAILED;
  1400. cqr->stopclk = get_clock();
  1401. rc = 1;
  1402. break;
  1403. case DASD_CQR_DONE:
  1404. case DASD_CQR_FAILED:
  1405. /* already finished - do nothing */
  1406. break;
  1407. default:
  1408. DEV_MESSAGE(KERN_ALERT, device,
  1409. "invalid status %02x in request",
  1410. cqr->status);
  1411. BUG();
  1412. }
  1413. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1414. dasd_schedule_bh(device);
  1415. return rc;
  1416. }
  1417. /*
  1418. * SECTION: Block device operations (request queue, partitions, open, release).
  1419. */
  1420. /*
  1421. * Dasd request queue function. Called from ll_rw_blk.c
  1422. */
  1423. static void
  1424. do_dasd_request(request_queue_t * queue)
  1425. {
  1426. struct dasd_device *device;
  1427. device = (struct dasd_device *) queue->queuedata;
  1428. spin_lock(get_ccwdev_lock(device->cdev));
  1429. /* Get new request from the block device request queue */
  1430. __dasd_process_blk_queue(device);
  1431. /* Now check if the head of the ccw queue needs to be started. */
  1432. __dasd_start_head(device);
  1433. spin_unlock(get_ccwdev_lock(device->cdev));
  1434. }
  1435. /*
  1436. * Allocate and initialize request queue and default I/O scheduler.
  1437. */
  1438. static int
  1439. dasd_alloc_queue(struct dasd_device * device)
  1440. {
  1441. int rc;
  1442. device->request_queue = blk_init_queue(do_dasd_request,
  1443. &device->request_queue_lock);
  1444. if (device->request_queue == NULL)
  1445. return -ENOMEM;
  1446. device->request_queue->queuedata = device;
  1447. elevator_exit(device->request_queue->elevator);
  1448. rc = elevator_init(device->request_queue, "deadline");
  1449. if (rc) {
  1450. blk_cleanup_queue(device->request_queue);
  1451. return rc;
  1452. }
  1453. return 0;
  1454. }
  1455. /*
  1456. * Allocate and initialize request queue.
  1457. */
  1458. static void
  1459. dasd_setup_queue(struct dasd_device * device)
  1460. {
  1461. int max;
  1462. blk_queue_hardsect_size(device->request_queue, device->bp_block);
  1463. max = device->discipline->max_blocks << device->s2b_shift;
  1464. blk_queue_max_sectors(device->request_queue, max);
  1465. blk_queue_max_phys_segments(device->request_queue, -1L);
  1466. blk_queue_max_hw_segments(device->request_queue, -1L);
  1467. blk_queue_max_segment_size(device->request_queue, -1L);
  1468. blk_queue_segment_boundary(device->request_queue, -1L);
  1469. blk_queue_ordered(device->request_queue, 1);
  1470. }
  1471. /*
  1472. * Deactivate and free request queue.
  1473. */
  1474. static void
  1475. dasd_free_queue(struct dasd_device * device)
  1476. {
  1477. if (device->request_queue) {
  1478. blk_cleanup_queue(device->request_queue);
  1479. device->request_queue = NULL;
  1480. }
  1481. }
  1482. /*
  1483. * Flush request on the request queue.
  1484. */
  1485. static void
  1486. dasd_flush_request_queue(struct dasd_device * device)
  1487. {
  1488. struct request *req;
  1489. if (!device->request_queue)
  1490. return;
  1491. spin_lock_irq(&device->request_queue_lock);
  1492. while (!list_empty(&device->request_queue->queue_head)) {
  1493. req = elv_next_request(device->request_queue);
  1494. if (req == NULL)
  1495. break;
  1496. dasd_end_request(req, 0);
  1497. blkdev_dequeue_request(req);
  1498. }
  1499. spin_unlock_irq(&device->request_queue_lock);
  1500. }
  1501. static int
  1502. dasd_open(struct inode *inp, struct file *filp)
  1503. {
  1504. struct gendisk *disk = inp->i_bdev->bd_disk;
  1505. struct dasd_device *device = disk->private_data;
  1506. int rc;
  1507. atomic_inc(&device->open_count);
  1508. if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1509. rc = -ENODEV;
  1510. goto unlock;
  1511. }
  1512. if (!try_module_get(device->discipline->owner)) {
  1513. rc = -EINVAL;
  1514. goto unlock;
  1515. }
  1516. if (dasd_probeonly) {
  1517. DEV_MESSAGE(KERN_INFO, device, "%s",
  1518. "No access to device due to probeonly mode");
  1519. rc = -EPERM;
  1520. goto out;
  1521. }
  1522. if (device->state < DASD_STATE_BASIC) {
  1523. DBF_DEV_EVENT(DBF_ERR, device, " %s",
  1524. " Cannot open unrecognized device");
  1525. rc = -ENODEV;
  1526. goto out;
  1527. }
  1528. return 0;
  1529. out:
  1530. module_put(device->discipline->owner);
  1531. unlock:
  1532. atomic_dec(&device->open_count);
  1533. return rc;
  1534. }
  1535. static int
  1536. dasd_release(struct inode *inp, struct file *filp)
  1537. {
  1538. struct gendisk *disk = inp->i_bdev->bd_disk;
  1539. struct dasd_device *device = disk->private_data;
  1540. atomic_dec(&device->open_count);
  1541. module_put(device->discipline->owner);
  1542. return 0;
  1543. }
  1544. struct block_device_operations
  1545. dasd_device_operations = {
  1546. .owner = THIS_MODULE,
  1547. .open = dasd_open,
  1548. .release = dasd_release,
  1549. .ioctl = dasd_ioctl,
  1550. };
  1551. static void
  1552. dasd_exit(void)
  1553. {
  1554. #ifdef CONFIG_PROC_FS
  1555. dasd_proc_exit();
  1556. #endif
  1557. dasd_ioctl_exit();
  1558. if (dasd_page_cache != NULL) {
  1559. kmem_cache_destroy(dasd_page_cache);
  1560. dasd_page_cache = NULL;
  1561. }
  1562. dasd_gendisk_exit();
  1563. dasd_devmap_exit();
  1564. devfs_remove("dasd");
  1565. if (dasd_debug_area != NULL) {
  1566. debug_unregister(dasd_debug_area);
  1567. dasd_debug_area = NULL;
  1568. }
  1569. }
  1570. /*
  1571. * SECTION: common functions for ccw_driver use
  1572. */
  1573. /* initial attempt at a probe function. this can be simplified once
  1574. * the other detection code is gone */
  1575. int
  1576. dasd_generic_probe (struct ccw_device *cdev,
  1577. struct dasd_discipline *discipline)
  1578. {
  1579. int ret;
  1580. ret = dasd_add_sysfs_files(cdev);
  1581. if (ret) {
  1582. printk(KERN_WARNING
  1583. "dasd_generic_probe: could not add sysfs entries "
  1584. "for %s\n", cdev->dev.bus_id);
  1585. } else {
  1586. cdev->handler = &dasd_int_handler;
  1587. }
  1588. return ret;
  1589. }
  1590. /* this will one day be called from a global not_oper handler.
  1591. * It is also used by driver_unregister during module unload */
  1592. void
  1593. dasd_generic_remove (struct ccw_device *cdev)
  1594. {
  1595. struct dasd_device *device;
  1596. cdev->handler = NULL;
  1597. dasd_remove_sysfs_files(cdev);
  1598. device = dasd_device_from_cdev(cdev);
  1599. if (IS_ERR(device))
  1600. return;
  1601. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1602. /* Already doing offline processing */
  1603. dasd_put_device(device);
  1604. return;
  1605. }
  1606. /*
  1607. * This device is removed unconditionally. Set offline
  1608. * flag to prevent dasd_open from opening it while it is
  1609. * no quite down yet.
  1610. */
  1611. dasd_set_target_state(device, DASD_STATE_NEW);
  1612. /* dasd_delete_device destroys the device reference. */
  1613. dasd_delete_device(device);
  1614. }
  1615. /* activate a device. This is called from dasd_{eckd,fba}_probe() when either
  1616. * the device is detected for the first time and is supposed to be used
  1617. * or the user has started activation through sysfs */
  1618. int
  1619. dasd_generic_set_online (struct ccw_device *cdev,
  1620. struct dasd_discipline *discipline)
  1621. {
  1622. struct dasd_device *device;
  1623. int rc;
  1624. device = dasd_create_device(cdev);
  1625. if (IS_ERR(device))
  1626. return PTR_ERR(device);
  1627. if (device->features & DASD_FEATURE_USEDIAG) {
  1628. if (!dasd_diag_discipline_pointer) {
  1629. printk (KERN_WARNING
  1630. "dasd_generic couldn't online device %s "
  1631. "- discipline DIAG not available\n",
  1632. cdev->dev.bus_id);
  1633. dasd_delete_device(device);
  1634. return -ENODEV;
  1635. }
  1636. discipline = dasd_diag_discipline_pointer;
  1637. }
  1638. device->discipline = discipline;
  1639. rc = discipline->check_device(device);
  1640. if (rc) {
  1641. printk (KERN_WARNING
  1642. "dasd_generic couldn't online device %s "
  1643. "with discipline %s rc=%i\n",
  1644. cdev->dev.bus_id, discipline->name, rc);
  1645. dasd_delete_device(device);
  1646. return rc;
  1647. }
  1648. dasd_set_target_state(device, DASD_STATE_ONLINE);
  1649. if (device->state <= DASD_STATE_KNOWN) {
  1650. printk (KERN_WARNING
  1651. "dasd_generic discipline not found for %s\n",
  1652. cdev->dev.bus_id);
  1653. rc = -ENODEV;
  1654. dasd_set_target_state(device, DASD_STATE_NEW);
  1655. dasd_delete_device(device);
  1656. } else
  1657. pr_debug("dasd_generic device %s found\n",
  1658. cdev->dev.bus_id);
  1659. /* FIXME: we have to wait for the root device but we don't want
  1660. * to wait for each single device but for all at once. */
  1661. wait_event(dasd_init_waitq, _wait_for_device(device));
  1662. dasd_put_device(device);
  1663. return rc;
  1664. }
  1665. int
  1666. dasd_generic_set_offline (struct ccw_device *cdev)
  1667. {
  1668. struct dasd_device *device;
  1669. int max_count;
  1670. device = dasd_device_from_cdev(cdev);
  1671. if (IS_ERR(device))
  1672. return PTR_ERR(device);
  1673. if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
  1674. /* Already doing offline processing */
  1675. dasd_put_device(device);
  1676. return 0;
  1677. }
  1678. /*
  1679. * We must make sure that this device is currently not in use.
  1680. * The open_count is increased for every opener, that includes
  1681. * the blkdev_get in dasd_scan_partitions. We are only interested
  1682. * in the other openers.
  1683. */
  1684. max_count = device->bdev ? 0 : -1;
  1685. if (atomic_read(&device->open_count) > max_count) {
  1686. printk (KERN_WARNING "Can't offline dasd device with open"
  1687. " count = %i.\n",
  1688. atomic_read(&device->open_count));
  1689. clear_bit(DASD_FLAG_OFFLINE, &device->flags);
  1690. dasd_put_device(device);
  1691. return -EBUSY;
  1692. }
  1693. dasd_set_target_state(device, DASD_STATE_NEW);
  1694. /* dasd_delete_device destroys the device reference. */
  1695. dasd_delete_device(device);
  1696. return 0;
  1697. }
  1698. int
  1699. dasd_generic_notify(struct ccw_device *cdev, int event)
  1700. {
  1701. struct dasd_device *device;
  1702. struct dasd_ccw_req *cqr;
  1703. unsigned long flags;
  1704. int ret;
  1705. device = dasd_device_from_cdev(cdev);
  1706. if (IS_ERR(device))
  1707. return 0;
  1708. spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
  1709. ret = 0;
  1710. switch (event) {
  1711. case CIO_GONE:
  1712. case CIO_NO_PATH:
  1713. if (device->state < DASD_STATE_BASIC)
  1714. break;
  1715. /* Device is active. We want to keep it. */
  1716. if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
  1717. list_for_each_entry(cqr, &device->ccw_queue, list)
  1718. if (cqr->status == DASD_CQR_IN_IO)
  1719. cqr->status = DASD_CQR_FAILED;
  1720. device->stopped |= DASD_STOPPED_DC_EIO;
  1721. dasd_schedule_bh(device);
  1722. } else {
  1723. list_for_each_entry(cqr, &device->ccw_queue, list)
  1724. if (cqr->status == DASD_CQR_IN_IO) {
  1725. cqr->status = DASD_CQR_QUEUED;
  1726. cqr->retries++;
  1727. }
  1728. device->stopped |= DASD_STOPPED_DC_WAIT;
  1729. dasd_set_timer(device, 0);
  1730. }
  1731. ret = 1;
  1732. break;
  1733. case CIO_OPER:
  1734. /* FIXME: add a sanity check. */
  1735. device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
  1736. dasd_schedule_bh(device);
  1737. ret = 1;
  1738. break;
  1739. }
  1740. spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
  1741. dasd_put_device(device);
  1742. return ret;
  1743. }
  1744. /*
  1745. * Automatically online either all dasd devices (dasd_autodetect) or
  1746. * all devices specified with dasd= parameters.
  1747. */
  1748. static int
  1749. __dasd_auto_online(struct device *dev, void *data)
  1750. {
  1751. struct ccw_device *cdev;
  1752. cdev = to_ccwdev(dev);
  1753. if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
  1754. ccw_device_set_online(cdev);
  1755. return 0;
  1756. }
  1757. void
  1758. dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
  1759. {
  1760. struct device_driver *drv;
  1761. drv = get_driver(&dasd_discipline_driver->driver);
  1762. driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
  1763. put_driver(drv);
  1764. }
  1765. static int __init
  1766. dasd_init(void)
  1767. {
  1768. int rc;
  1769. init_waitqueue_head(&dasd_init_waitq);
  1770. /* register 'common' DASD debug area, used for all DBF_XXX calls */
  1771. dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
  1772. if (dasd_debug_area == NULL) {
  1773. rc = -ENOMEM;
  1774. goto failed;
  1775. }
  1776. debug_register_view(dasd_debug_area, &debug_sprintf_view);
  1777. debug_set_level(dasd_debug_area, DBF_EMERG);
  1778. DBF_EVENT(DBF_EMERG, "%s", "debug area created");
  1779. dasd_diag_discipline_pointer = NULL;
  1780. rc = devfs_mk_dir("dasd");
  1781. if (rc)
  1782. goto failed;
  1783. rc = dasd_devmap_init();
  1784. if (rc)
  1785. goto failed;
  1786. rc = dasd_gendisk_init();
  1787. if (rc)
  1788. goto failed;
  1789. rc = dasd_parse();
  1790. if (rc)
  1791. goto failed;
  1792. rc = dasd_ioctl_init();
  1793. if (rc)
  1794. goto failed;
  1795. #ifdef CONFIG_PROC_FS
  1796. rc = dasd_proc_init();
  1797. if (rc)
  1798. goto failed;
  1799. #endif
  1800. return 0;
  1801. failed:
  1802. MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
  1803. dasd_exit();
  1804. return rc;
  1805. }
  1806. module_init(dasd_init);
  1807. module_exit(dasd_exit);
  1808. EXPORT_SYMBOL(dasd_debug_area);
  1809. EXPORT_SYMBOL(dasd_diag_discipline_pointer);
  1810. EXPORT_SYMBOL(dasd_add_request_head);
  1811. EXPORT_SYMBOL(dasd_add_request_tail);
  1812. EXPORT_SYMBOL(dasd_cancel_req);
  1813. EXPORT_SYMBOL(dasd_clear_timer);
  1814. EXPORT_SYMBOL(dasd_enable_device);
  1815. EXPORT_SYMBOL(dasd_int_handler);
  1816. EXPORT_SYMBOL(dasd_kfree_request);
  1817. EXPORT_SYMBOL(dasd_kick_device);
  1818. EXPORT_SYMBOL(dasd_kmalloc_request);
  1819. EXPORT_SYMBOL(dasd_schedule_bh);
  1820. EXPORT_SYMBOL(dasd_set_target_state);
  1821. EXPORT_SYMBOL(dasd_set_timer);
  1822. EXPORT_SYMBOL(dasd_sfree_request);
  1823. EXPORT_SYMBOL(dasd_sleep_on);
  1824. EXPORT_SYMBOL(dasd_sleep_on_immediatly);
  1825. EXPORT_SYMBOL(dasd_sleep_on_interruptible);
  1826. EXPORT_SYMBOL(dasd_smalloc_request);
  1827. EXPORT_SYMBOL(dasd_start_IO);
  1828. EXPORT_SYMBOL(dasd_term_IO);
  1829. EXPORT_SYMBOL_GPL(dasd_generic_probe);
  1830. EXPORT_SYMBOL_GPL(dasd_generic_remove);
  1831. EXPORT_SYMBOL_GPL(dasd_generic_notify);
  1832. EXPORT_SYMBOL_GPL(dasd_generic_set_online);
  1833. EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
  1834. EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
  1835. /*
  1836. * Overrides for Emacs so that we follow Linus's tabbing style.
  1837. * Emacs will notice this stuff at the end of the file and automatically
  1838. * adjust the settings for this buffer only. This must remain at the end
  1839. * of the file.
  1840. * ---------------------------------------------------------------------------
  1841. * Local variables:
  1842. * c-indent-level: 4
  1843. * c-brace-imaginary-offset: 0
  1844. * c-brace-offset: -4
  1845. * c-argdecl-indent: 4
  1846. * c-label-offset: -4
  1847. * c-continued-statement-offset: 4
  1848. * c-continued-brace-offset: 0
  1849. * indent-tabs-mode: 1
  1850. * tab-width: 8
  1851. * End:
  1852. */