device_fsm.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106
  1. /*
  2. * drivers/s390/cio/device_fsm.c
  3. * finite state machine for device handling
  4. *
  5. * Copyright IBM Corp. 2002,2008
  6. * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. */
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/string.h>
  13. #include <asm/ccwdev.h>
  14. #include <asm/cio.h>
  15. #include <asm/chpid.h>
  16. #include "cio.h"
  17. #include "cio_debug.h"
  18. #include "css.h"
  19. #include "device.h"
  20. #include "chsc.h"
  21. #include "ioasm.h"
  22. #include "chp.h"
  23. static int timeout_log_enabled;
  24. static int __init ccw_timeout_log_setup(char *unused)
  25. {
  26. timeout_log_enabled = 1;
  27. return 1;
  28. }
  29. __setup("ccw_timeout_log", ccw_timeout_log_setup);
  30. static void ccw_timeout_log(struct ccw_device *cdev)
  31. {
  32. struct schib schib;
  33. struct subchannel *sch;
  34. struct io_subchannel_private *private;
  35. union orb *orb;
  36. int cc;
  37. sch = to_subchannel(cdev->dev.parent);
  38. private = to_io_private(sch);
  39. orb = &private->orb;
  40. cc = stsch_err(sch->schid, &schib);
  41. printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
  42. "device information:\n", get_clock());
  43. printk(KERN_WARNING "cio: orb:\n");
  44. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  45. orb, sizeof(*orb), 0);
  46. printk(KERN_WARNING "cio: ccw device bus id: %s\n",
  47. dev_name(&cdev->dev));
  48. printk(KERN_WARNING "cio: subchannel bus id: %s\n",
  49. dev_name(&sch->dev));
  50. printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
  51. "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
  52. if (orb->tm.b) {
  53. printk(KERN_WARNING "cio: orb indicates transport mode\n");
  54. printk(KERN_WARNING "cio: last tcw:\n");
  55. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  56. (void *)(addr_t)orb->tm.tcw,
  57. sizeof(struct tcw), 0);
  58. } else {
  59. printk(KERN_WARNING "cio: orb indicates command mode\n");
  60. if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
  61. (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
  62. printk(KERN_WARNING "cio: last channel program "
  63. "(intern):\n");
  64. else
  65. printk(KERN_WARNING "cio: last channel program:\n");
  66. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  67. (void *)(addr_t)orb->cmd.cpa,
  68. sizeof(struct ccw1), 0);
  69. }
  70. printk(KERN_WARNING "cio: ccw device state: %d\n",
  71. cdev->private->state);
  72. printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
  73. printk(KERN_WARNING "cio: schib:\n");
  74. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  75. &schib, sizeof(schib), 0);
  76. printk(KERN_WARNING "cio: ccw device flags:\n");
  77. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  78. &cdev->private->flags, sizeof(cdev->private->flags), 0);
  79. }
  80. /*
  81. * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
  82. */
  83. static void
  84. ccw_device_timeout(unsigned long data)
  85. {
  86. struct ccw_device *cdev;
  87. cdev = (struct ccw_device *) data;
  88. spin_lock_irq(cdev->ccwlock);
  89. if (timeout_log_enabled)
  90. ccw_timeout_log(cdev);
  91. dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
  92. spin_unlock_irq(cdev->ccwlock);
  93. }
  94. /*
  95. * Set timeout
  96. */
  97. void
  98. ccw_device_set_timeout(struct ccw_device *cdev, int expires)
  99. {
  100. if (expires == 0) {
  101. del_timer(&cdev->private->timer);
  102. return;
  103. }
  104. if (timer_pending(&cdev->private->timer)) {
  105. if (mod_timer(&cdev->private->timer, jiffies + expires))
  106. return;
  107. }
  108. cdev->private->timer.function = ccw_device_timeout;
  109. cdev->private->timer.data = (unsigned long) cdev;
  110. cdev->private->timer.expires = jiffies + expires;
  111. add_timer(&cdev->private->timer);
  112. }
  113. /*
  114. * Cancel running i/o. This is called repeatedly since halt/clear are
  115. * asynchronous operations. We do one try with cio_cancel, two tries
  116. * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
  117. * Returns 0 if device now idle, -ENODEV for device not operational and
  118. * -EBUSY if an interrupt is expected (either from halt/clear or from a
  119. * status pending).
  120. */
  121. int
  122. ccw_device_cancel_halt_clear(struct ccw_device *cdev)
  123. {
  124. struct subchannel *sch;
  125. int ret;
  126. sch = to_subchannel(cdev->dev.parent);
  127. if (cio_update_schib(sch))
  128. return -ENODEV;
  129. if (!sch->schib.pmcw.ena)
  130. /* Not operational -> done. */
  131. return 0;
  132. /* Stage 1: cancel io. */
  133. if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
  134. !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
  135. if (!scsw_is_tm(&sch->schib.scsw)) {
  136. ret = cio_cancel(sch);
  137. if (ret != -EINVAL)
  138. return ret;
  139. }
  140. /* cancel io unsuccessful or not applicable (transport mode).
  141. * Continue with asynchronous instructions. */
  142. cdev->private->iretry = 3; /* 3 halt retries. */
  143. }
  144. if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
  145. /* Stage 2: halt io. */
  146. if (cdev->private->iretry) {
  147. cdev->private->iretry--;
  148. ret = cio_halt(sch);
  149. if (ret != -EBUSY)
  150. return (ret == 0) ? -EBUSY : ret;
  151. }
  152. /* halt io unsuccessful. */
  153. cdev->private->iretry = 255; /* 255 clear retries. */
  154. }
  155. /* Stage 3: clear io. */
  156. if (cdev->private->iretry) {
  157. cdev->private->iretry--;
  158. ret = cio_clear (sch);
  159. return (ret == 0) ? -EBUSY : ret;
  160. }
  161. /* Function was unsuccessful */
  162. CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
  163. cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
  164. return -EIO;
  165. }
  166. void ccw_device_update_sense_data(struct ccw_device *cdev)
  167. {
  168. memset(&cdev->id, 0, sizeof(cdev->id));
  169. cdev->id.cu_type = cdev->private->senseid.cu_type;
  170. cdev->id.cu_model = cdev->private->senseid.cu_model;
  171. cdev->id.dev_type = cdev->private->senseid.dev_type;
  172. cdev->id.dev_model = cdev->private->senseid.dev_model;
  173. }
  174. int ccw_device_test_sense_data(struct ccw_device *cdev)
  175. {
  176. return cdev->id.cu_type == cdev->private->senseid.cu_type &&
  177. cdev->id.cu_model == cdev->private->senseid.cu_model &&
  178. cdev->id.dev_type == cdev->private->senseid.dev_type &&
  179. cdev->id.dev_model == cdev->private->senseid.dev_model;
  180. }
  181. /*
  182. * The machine won't give us any notification by machine check if a chpid has
  183. * been varied online on the SE so we have to find out by magic (i. e. driving
  184. * the channel subsystem to device selection and updating our path masks).
  185. */
  186. static void
  187. __recover_lost_chpids(struct subchannel *sch, int old_lpm)
  188. {
  189. int mask, i;
  190. struct chp_id chpid;
  191. chp_id_init(&chpid);
  192. for (i = 0; i<8; i++) {
  193. mask = 0x80 >> i;
  194. if (!(sch->lpm & mask))
  195. continue;
  196. if (old_lpm & mask)
  197. continue;
  198. chpid.id = sch->schib.pmcw.chpid[i];
  199. if (!chp_is_registered(chpid))
  200. css_schedule_eval_all();
  201. }
  202. }
  203. /*
  204. * Stop device recognition.
  205. */
  206. static void
  207. ccw_device_recog_done(struct ccw_device *cdev, int state)
  208. {
  209. struct subchannel *sch;
  210. int old_lpm;
  211. sch = to_subchannel(cdev->dev.parent);
  212. if (cio_disable_subchannel(sch))
  213. state = DEV_STATE_NOT_OPER;
  214. /*
  215. * Now that we tried recognition, we have performed device selection
  216. * through ssch() and the path information is up to date.
  217. */
  218. old_lpm = sch->lpm;
  219. /* Check since device may again have become not operational. */
  220. if (cio_update_schib(sch))
  221. state = DEV_STATE_NOT_OPER;
  222. else
  223. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  224. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
  225. /* Force reprobe on all chpids. */
  226. old_lpm = 0;
  227. if (sch->lpm != old_lpm)
  228. __recover_lost_chpids(sch, old_lpm);
  229. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
  230. (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
  231. cdev->private->flags.recog_done = 1;
  232. cdev->private->state = DEV_STATE_DISCONNECTED;
  233. wake_up(&cdev->private->wait_q);
  234. return;
  235. }
  236. if (cdev->private->flags.resuming) {
  237. cdev->private->state = state;
  238. cdev->private->flags.recog_done = 1;
  239. wake_up(&cdev->private->wait_q);
  240. return;
  241. }
  242. switch (state) {
  243. case DEV_STATE_NOT_OPER:
  244. break;
  245. case DEV_STATE_OFFLINE:
  246. if (!cdev->online) {
  247. ccw_device_update_sense_data(cdev);
  248. break;
  249. }
  250. cdev->private->state = DEV_STATE_OFFLINE;
  251. cdev->private->flags.recog_done = 1;
  252. if (ccw_device_test_sense_data(cdev)) {
  253. cdev->private->flags.donotify = 1;
  254. ccw_device_online(cdev);
  255. wake_up(&cdev->private->wait_q);
  256. } else {
  257. ccw_device_update_sense_data(cdev);
  258. ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
  259. }
  260. return;
  261. case DEV_STATE_BOXED:
  262. if (cdev->id.cu_type != 0) { /* device was recognized before */
  263. cdev->private->flags.recog_done = 1;
  264. cdev->private->state = DEV_STATE_BOXED;
  265. wake_up(&cdev->private->wait_q);
  266. return;
  267. }
  268. break;
  269. }
  270. cdev->private->state = state;
  271. io_subchannel_recog_done(cdev);
  272. wake_up(&cdev->private->wait_q);
  273. }
  274. /*
  275. * Function called from device_id.c after sense id has completed.
  276. */
  277. void
  278. ccw_device_sense_id_done(struct ccw_device *cdev, int err)
  279. {
  280. switch (err) {
  281. case 0:
  282. ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
  283. break;
  284. case -ETIME: /* Sense id stopped by timeout. */
  285. ccw_device_recog_done(cdev, DEV_STATE_BOXED);
  286. break;
  287. default:
  288. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  289. break;
  290. }
  291. }
  292. /**
  293. * ccw_device_notify() - inform the device's driver about an event
  294. * @cdev: device for which an event occured
  295. * @event: event that occurred
  296. *
  297. * Returns:
  298. * -%EINVAL if the device is offline or has no driver.
  299. * -%EOPNOTSUPP if the device's driver has no notifier registered.
  300. * %NOTIFY_OK if the driver wants to keep the device.
  301. * %NOTIFY_BAD if the driver doesn't want to keep the device.
  302. */
  303. int ccw_device_notify(struct ccw_device *cdev, int event)
  304. {
  305. int ret = -EINVAL;
  306. if (!cdev->drv)
  307. goto out;
  308. if (!cdev->online)
  309. goto out;
  310. CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
  311. cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
  312. event);
  313. if (!cdev->drv->notify) {
  314. ret = -EOPNOTSUPP;
  315. goto out;
  316. }
  317. if (cdev->drv->notify(cdev, event))
  318. ret = NOTIFY_OK;
  319. else
  320. ret = NOTIFY_BAD;
  321. out:
  322. return ret;
  323. }
  324. static void ccw_device_oper_notify(struct ccw_device *cdev)
  325. {
  326. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  327. if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
  328. /* Reenable channel measurements, if needed. */
  329. ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
  330. /* Save indication for new paths. */
  331. cdev->private->path_new_mask = sch->vpm;
  332. return;
  333. }
  334. /* Driver doesn't want device back. */
  335. ccw_device_set_notoper(cdev);
  336. ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
  337. }
  338. /*
  339. * Finished with online/offline processing.
  340. */
  341. static void
  342. ccw_device_done(struct ccw_device *cdev, int state)
  343. {
  344. struct subchannel *sch;
  345. sch = to_subchannel(cdev->dev.parent);
  346. ccw_device_set_timeout(cdev, 0);
  347. if (state != DEV_STATE_ONLINE)
  348. cio_disable_subchannel(sch);
  349. /* Reset device status. */
  350. memset(&cdev->private->irb, 0, sizeof(struct irb));
  351. cdev->private->state = state;
  352. switch (state) {
  353. case DEV_STATE_BOXED:
  354. CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
  355. cdev->private->dev_id.devno, sch->schid.sch_no);
  356. if (cdev->online &&
  357. ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
  358. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  359. cdev->private->flags.donotify = 0;
  360. break;
  361. case DEV_STATE_NOT_OPER:
  362. CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
  363. cdev->private->dev_id.devno, sch->schid.sch_no);
  364. if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
  365. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  366. else
  367. ccw_device_set_disconnected(cdev);
  368. cdev->private->flags.donotify = 0;
  369. break;
  370. case DEV_STATE_DISCONNECTED:
  371. CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
  372. "%04x\n", cdev->private->dev_id.devno,
  373. sch->schid.sch_no);
  374. if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
  375. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  376. else
  377. ccw_device_set_disconnected(cdev);
  378. cdev->private->flags.donotify = 0;
  379. break;
  380. default:
  381. break;
  382. }
  383. if (cdev->private->flags.donotify) {
  384. cdev->private->flags.donotify = 0;
  385. ccw_device_oper_notify(cdev);
  386. }
  387. wake_up(&cdev->private->wait_q);
  388. }
  389. /*
  390. * Start device recognition.
  391. */
  392. void ccw_device_recognition(struct ccw_device *cdev)
  393. {
  394. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  395. /*
  396. * We used to start here with a sense pgid to find out whether a device
  397. * is locked by someone else. Unfortunately, the sense pgid command
  398. * code has other meanings on devices predating the path grouping
  399. * algorithm, so we start with sense id and box the device after an
  400. * timeout (or if sense pgid during path verification detects the device
  401. * is locked, as may happen on newer devices).
  402. */
  403. cdev->private->flags.recog_done = 0;
  404. cdev->private->state = DEV_STATE_SENSE_ID;
  405. if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
  406. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  407. return;
  408. }
  409. ccw_device_sense_id_start(cdev);
  410. }
  411. /*
  412. * Handle events for states that use the ccw request infrastructure.
  413. */
  414. static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
  415. {
  416. switch (e) {
  417. case DEV_EVENT_NOTOPER:
  418. ccw_request_notoper(cdev);
  419. break;
  420. case DEV_EVENT_INTERRUPT:
  421. ccw_request_handler(cdev);
  422. break;
  423. case DEV_EVENT_TIMEOUT:
  424. ccw_request_timeout(cdev);
  425. break;
  426. default:
  427. break;
  428. }
  429. }
  430. static void ccw_device_report_path_events(struct ccw_device *cdev)
  431. {
  432. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  433. int path_event[8];
  434. int chp, mask;
  435. for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
  436. path_event[chp] = PE_NONE;
  437. if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
  438. path_event[chp] |= PE_PATH_GONE;
  439. if (mask & cdev->private->path_new_mask & sch->vpm)
  440. path_event[chp] |= PE_PATH_AVAILABLE;
  441. if (mask & cdev->private->pgid_reset_mask & sch->vpm)
  442. path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
  443. }
  444. if (cdev->online && cdev->drv->path_event)
  445. cdev->drv->path_event(cdev, path_event);
  446. }
  447. static void ccw_device_reset_path_events(struct ccw_device *cdev)
  448. {
  449. cdev->private->path_gone_mask = 0;
  450. cdev->private->path_new_mask = 0;
  451. cdev->private->pgid_reset_mask = 0;
  452. }
  453. void
  454. ccw_device_verify_done(struct ccw_device *cdev, int err)
  455. {
  456. struct subchannel *sch;
  457. sch = to_subchannel(cdev->dev.parent);
  458. /* Update schib - pom may have changed. */
  459. if (cio_update_schib(sch)) {
  460. err = -ENODEV;
  461. goto callback;
  462. }
  463. /* Update lpm with verified path mask. */
  464. sch->lpm = sch->vpm;
  465. /* Repeat path verification? */
  466. if (cdev->private->flags.doverify) {
  467. ccw_device_verify_start(cdev);
  468. return;
  469. }
  470. callback:
  471. switch (err) {
  472. case 0:
  473. ccw_device_done(cdev, DEV_STATE_ONLINE);
  474. /* Deliver fake irb to device driver, if needed. */
  475. if (cdev->private->flags.fake_irb) {
  476. memset(&cdev->private->irb, 0, sizeof(struct irb));
  477. cdev->private->irb.scsw.cmd.cc = 1;
  478. cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
  479. cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
  480. cdev->private->irb.scsw.cmd.stctl =
  481. SCSW_STCTL_STATUS_PEND;
  482. cdev->private->flags.fake_irb = 0;
  483. if (cdev->handler)
  484. cdev->handler(cdev, cdev->private->intparm,
  485. &cdev->private->irb);
  486. memset(&cdev->private->irb, 0, sizeof(struct irb));
  487. }
  488. ccw_device_report_path_events(cdev);
  489. break;
  490. case -ETIME:
  491. case -EUSERS:
  492. /* Reset oper notify indication after verify error. */
  493. cdev->private->flags.donotify = 0;
  494. ccw_device_done(cdev, DEV_STATE_BOXED);
  495. break;
  496. case -EACCES:
  497. /* Reset oper notify indication after verify error. */
  498. cdev->private->flags.donotify = 0;
  499. ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
  500. break;
  501. default:
  502. /* Reset oper notify indication after verify error. */
  503. cdev->private->flags.donotify = 0;
  504. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  505. break;
  506. }
  507. ccw_device_reset_path_events(cdev);
  508. }
  509. /*
  510. * Get device online.
  511. */
  512. int
  513. ccw_device_online(struct ccw_device *cdev)
  514. {
  515. struct subchannel *sch;
  516. int ret;
  517. if ((cdev->private->state != DEV_STATE_OFFLINE) &&
  518. (cdev->private->state != DEV_STATE_BOXED))
  519. return -EINVAL;
  520. sch = to_subchannel(cdev->dev.parent);
  521. ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
  522. if (ret != 0) {
  523. /* Couldn't enable the subchannel for i/o. Sick device. */
  524. if (ret == -ENODEV)
  525. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  526. return ret;
  527. }
  528. /* Start initial path verification. */
  529. cdev->private->state = DEV_STATE_VERIFY;
  530. ccw_device_verify_start(cdev);
  531. return 0;
  532. }
  533. void
  534. ccw_device_disband_done(struct ccw_device *cdev, int err)
  535. {
  536. switch (err) {
  537. case 0:
  538. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  539. break;
  540. case -ETIME:
  541. ccw_device_done(cdev, DEV_STATE_BOXED);
  542. break;
  543. default:
  544. cdev->private->flags.donotify = 0;
  545. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  546. break;
  547. }
  548. }
  549. /*
  550. * Shutdown device.
  551. */
  552. int
  553. ccw_device_offline(struct ccw_device *cdev)
  554. {
  555. struct subchannel *sch;
  556. /* Allow ccw_device_offline while disconnected. */
  557. if (cdev->private->state == DEV_STATE_DISCONNECTED ||
  558. cdev->private->state == DEV_STATE_NOT_OPER) {
  559. cdev->private->flags.donotify = 0;
  560. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  561. return 0;
  562. }
  563. if (cdev->private->state == DEV_STATE_BOXED) {
  564. ccw_device_done(cdev, DEV_STATE_BOXED);
  565. return 0;
  566. }
  567. if (ccw_device_is_orphan(cdev)) {
  568. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  569. return 0;
  570. }
  571. sch = to_subchannel(cdev->dev.parent);
  572. if (cio_update_schib(sch))
  573. return -ENODEV;
  574. if (scsw_actl(&sch->schib.scsw) != 0)
  575. return -EBUSY;
  576. if (cdev->private->state != DEV_STATE_ONLINE)
  577. return -EINVAL;
  578. /* Are we doing path grouping? */
  579. if (!cdev->private->flags.pgroup) {
  580. /* No, set state offline immediately. */
  581. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  582. return 0;
  583. }
  584. /* Start Set Path Group commands. */
  585. cdev->private->state = DEV_STATE_DISBAND_PGID;
  586. ccw_device_disband_start(cdev);
  587. return 0;
  588. }
  589. /*
  590. * Handle not operational event in non-special state.
  591. */
  592. static void ccw_device_generic_notoper(struct ccw_device *cdev,
  593. enum dev_event dev_event)
  594. {
  595. if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
  596. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  597. else
  598. ccw_device_set_disconnected(cdev);
  599. }
  600. /*
  601. * Handle path verification event in offline state.
  602. */
  603. static void ccw_device_offline_verify(struct ccw_device *cdev,
  604. enum dev_event dev_event)
  605. {
  606. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  607. css_schedule_eval(sch->schid);
  608. }
  609. /*
  610. * Handle path verification event.
  611. */
  612. static void
  613. ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
  614. {
  615. struct subchannel *sch;
  616. if (cdev->private->state == DEV_STATE_W4SENSE) {
  617. cdev->private->flags.doverify = 1;
  618. return;
  619. }
  620. sch = to_subchannel(cdev->dev.parent);
  621. /*
  622. * Since we might not just be coming from an interrupt from the
  623. * subchannel we have to update the schib.
  624. */
  625. if (cio_update_schib(sch)) {
  626. ccw_device_verify_done(cdev, -ENODEV);
  627. return;
  628. }
  629. if (scsw_actl(&sch->schib.scsw) != 0 ||
  630. (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
  631. (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
  632. /*
  633. * No final status yet or final status not yet delivered
  634. * to the device driver. Can't do path verfication now,
  635. * delay until final status was delivered.
  636. */
  637. cdev->private->flags.doverify = 1;
  638. return;
  639. }
  640. /* Device is idle, we can do the path verification. */
  641. cdev->private->state = DEV_STATE_VERIFY;
  642. ccw_device_verify_start(cdev);
  643. }
  644. /*
  645. * Handle path verification event in boxed state.
  646. */
  647. static void ccw_device_boxed_verify(struct ccw_device *cdev,
  648. enum dev_event dev_event)
  649. {
  650. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  651. if (cdev->online) {
  652. if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
  653. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  654. else
  655. ccw_device_online_verify(cdev, dev_event);
  656. } else
  657. css_schedule_eval(sch->schid);
  658. }
  659. /*
  660. * Got an interrupt for a normal io (state online).
  661. */
  662. static void
  663. ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
  664. {
  665. struct irb *irb;
  666. int is_cmd;
  667. irb = (struct irb *)&S390_lowcore.irb;
  668. is_cmd = !scsw_is_tm(&irb->scsw);
  669. /* Check for unsolicited interrupt. */
  670. if (!scsw_is_solicited(&irb->scsw)) {
  671. if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
  672. !irb->esw.esw0.erw.cons) {
  673. /* Unit check but no sense data. Need basic sense. */
  674. if (ccw_device_do_sense(cdev, irb) != 0)
  675. goto call_handler_unsol;
  676. memcpy(&cdev->private->irb, irb, sizeof(struct irb));
  677. cdev->private->state = DEV_STATE_W4SENSE;
  678. cdev->private->intparm = 0;
  679. return;
  680. }
  681. call_handler_unsol:
  682. if (cdev->handler)
  683. cdev->handler (cdev, 0, irb);
  684. if (cdev->private->flags.doverify)
  685. ccw_device_online_verify(cdev, 0);
  686. return;
  687. }
  688. /* Accumulate status and find out if a basic sense is needed. */
  689. ccw_device_accumulate_irb(cdev, irb);
  690. if (is_cmd && cdev->private->flags.dosense) {
  691. if (ccw_device_do_sense(cdev, irb) == 0) {
  692. cdev->private->state = DEV_STATE_W4SENSE;
  693. }
  694. return;
  695. }
  696. /* Call the handler. */
  697. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  698. /* Start delayed path verification. */
  699. ccw_device_online_verify(cdev, 0);
  700. }
  701. /*
  702. * Got an timeout in online state.
  703. */
  704. static void
  705. ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  706. {
  707. int ret;
  708. ccw_device_set_timeout(cdev, 0);
  709. cdev->private->iretry = 255;
  710. ret = ccw_device_cancel_halt_clear(cdev);
  711. if (ret == -EBUSY) {
  712. ccw_device_set_timeout(cdev, 3*HZ);
  713. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  714. return;
  715. }
  716. if (ret)
  717. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  718. else if (cdev->handler)
  719. cdev->handler(cdev, cdev->private->intparm,
  720. ERR_PTR(-ETIMEDOUT));
  721. }
  722. /*
  723. * Got an interrupt for a basic sense.
  724. */
  725. static void
  726. ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
  727. {
  728. struct irb *irb;
  729. irb = (struct irb *)&S390_lowcore.irb;
  730. /* Check for unsolicited interrupt. */
  731. if (scsw_stctl(&irb->scsw) ==
  732. (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
  733. if (scsw_cc(&irb->scsw) == 1)
  734. /* Basic sense hasn't started. Try again. */
  735. ccw_device_do_sense(cdev, irb);
  736. else {
  737. CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
  738. "interrupt during w4sense...\n",
  739. cdev->private->dev_id.ssid,
  740. cdev->private->dev_id.devno);
  741. if (cdev->handler)
  742. cdev->handler (cdev, 0, irb);
  743. }
  744. return;
  745. }
  746. /*
  747. * Check if a halt or clear has been issued in the meanwhile. If yes,
  748. * only deliver the halt/clear interrupt to the device driver as if it
  749. * had killed the original request.
  750. */
  751. if (scsw_fctl(&irb->scsw) &
  752. (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
  753. cdev->private->flags.dosense = 0;
  754. memset(&cdev->private->irb, 0, sizeof(struct irb));
  755. ccw_device_accumulate_irb(cdev, irb);
  756. goto call_handler;
  757. }
  758. /* Add basic sense info to irb. */
  759. ccw_device_accumulate_basic_sense(cdev, irb);
  760. if (cdev->private->flags.dosense) {
  761. /* Another basic sense is needed. */
  762. ccw_device_do_sense(cdev, irb);
  763. return;
  764. }
  765. call_handler:
  766. cdev->private->state = DEV_STATE_ONLINE;
  767. /* In case sensing interfered with setting the device online */
  768. wake_up(&cdev->private->wait_q);
  769. /* Call the handler. */
  770. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  771. /* Start delayed path verification. */
  772. ccw_device_online_verify(cdev, 0);
  773. }
  774. static void
  775. ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
  776. {
  777. struct subchannel *sch;
  778. sch = to_subchannel(cdev->dev.parent);
  779. ccw_device_set_timeout(cdev, 0);
  780. /* Start delayed path verification. */
  781. ccw_device_online_verify(cdev, 0);
  782. /* OK, i/o is dead now. Call interrupt handler. */
  783. if (cdev->handler)
  784. cdev->handler(cdev, cdev->private->intparm,
  785. ERR_PTR(-EIO));
  786. }
  787. static void
  788. ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  789. {
  790. int ret;
  791. ret = ccw_device_cancel_halt_clear(cdev);
  792. if (ret == -EBUSY) {
  793. ccw_device_set_timeout(cdev, 3*HZ);
  794. return;
  795. }
  796. /* Start delayed path verification. */
  797. ccw_device_online_verify(cdev, 0);
  798. if (cdev->handler)
  799. cdev->handler(cdev, cdev->private->intparm,
  800. ERR_PTR(-EIO));
  801. }
  802. void ccw_device_kill_io(struct ccw_device *cdev)
  803. {
  804. int ret;
  805. cdev->private->iretry = 255;
  806. ret = ccw_device_cancel_halt_clear(cdev);
  807. if (ret == -EBUSY) {
  808. ccw_device_set_timeout(cdev, 3*HZ);
  809. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  810. return;
  811. }
  812. /* Start delayed path verification. */
  813. ccw_device_online_verify(cdev, 0);
  814. if (cdev->handler)
  815. cdev->handler(cdev, cdev->private->intparm,
  816. ERR_PTR(-EIO));
  817. }
  818. static void
  819. ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
  820. {
  821. /* Start verification after current task finished. */
  822. cdev->private->flags.doverify = 1;
  823. }
  824. static void
  825. ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
  826. {
  827. struct subchannel *sch;
  828. sch = to_subchannel(cdev->dev.parent);
  829. if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
  830. /* Couldn't enable the subchannel for i/o. Sick device. */
  831. return;
  832. cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
  833. ccw_device_sense_id_start(cdev);
  834. }
  835. void ccw_device_trigger_reprobe(struct ccw_device *cdev)
  836. {
  837. struct subchannel *sch;
  838. if (cdev->private->state != DEV_STATE_DISCONNECTED)
  839. return;
  840. sch = to_subchannel(cdev->dev.parent);
  841. /* Update some values. */
  842. if (cio_update_schib(sch))
  843. return;
  844. /*
  845. * The pim, pam, pom values may not be accurate, but they are the best
  846. * we have before performing device selection :/
  847. */
  848. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  849. /*
  850. * Use the initial configuration since we can't be shure that the old
  851. * paths are valid.
  852. */
  853. io_subchannel_init_config(sch);
  854. if (cio_commit_config(sch))
  855. return;
  856. /* We should also udate ssd info, but this has to wait. */
  857. /* Check if this is another device which appeared on the same sch. */
  858. if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
  859. css_schedule_eval(sch->schid);
  860. else
  861. ccw_device_start_id(cdev, 0);
  862. }
  863. static void ccw_device_disabled_irq(struct ccw_device *cdev,
  864. enum dev_event dev_event)
  865. {
  866. struct subchannel *sch;
  867. sch = to_subchannel(cdev->dev.parent);
  868. /*
  869. * An interrupt in a disabled state means a previous disable was not
  870. * successful - should not happen, but we try to disable again.
  871. */
  872. cio_disable_subchannel(sch);
  873. }
  874. static void
  875. ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
  876. {
  877. retry_set_schib(cdev);
  878. cdev->private->state = DEV_STATE_ONLINE;
  879. dev_fsm_event(cdev, dev_event);
  880. }
  881. static void ccw_device_update_cmfblock(struct ccw_device *cdev,
  882. enum dev_event dev_event)
  883. {
  884. cmf_retry_copy_block(cdev);
  885. cdev->private->state = DEV_STATE_ONLINE;
  886. dev_fsm_event(cdev, dev_event);
  887. }
  888. static void
  889. ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
  890. {
  891. ccw_device_set_timeout(cdev, 0);
  892. cdev->private->state = DEV_STATE_NOT_OPER;
  893. wake_up(&cdev->private->wait_q);
  894. }
  895. static void
  896. ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  897. {
  898. int ret;
  899. ret = ccw_device_cancel_halt_clear(cdev);
  900. if (ret == -EBUSY) {
  901. ccw_device_set_timeout(cdev, HZ/10);
  902. } else {
  903. cdev->private->state = DEV_STATE_NOT_OPER;
  904. wake_up(&cdev->private->wait_q);
  905. }
  906. }
  907. /*
  908. * No operation action. This is used e.g. to ignore a timeout event in
  909. * state offline.
  910. */
  911. static void
  912. ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
  913. {
  914. }
  915. /*
  916. * device statemachine
  917. */
  918. fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
  919. [DEV_STATE_NOT_OPER] = {
  920. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  921. [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
  922. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  923. [DEV_EVENT_VERIFY] = ccw_device_nop,
  924. },
  925. [DEV_STATE_SENSE_PGID] = {
  926. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  927. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  928. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  929. [DEV_EVENT_VERIFY] = ccw_device_nop,
  930. },
  931. [DEV_STATE_SENSE_ID] = {
  932. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  933. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  934. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  935. [DEV_EVENT_VERIFY] = ccw_device_nop,
  936. },
  937. [DEV_STATE_OFFLINE] = {
  938. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  939. [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
  940. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  941. [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
  942. },
  943. [DEV_STATE_VERIFY] = {
  944. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  945. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  946. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  947. [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
  948. },
  949. [DEV_STATE_ONLINE] = {
  950. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  951. [DEV_EVENT_INTERRUPT] = ccw_device_irq,
  952. [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
  953. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  954. },
  955. [DEV_STATE_W4SENSE] = {
  956. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  957. [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
  958. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  959. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  960. },
  961. [DEV_STATE_DISBAND_PGID] = {
  962. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  963. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  964. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  965. [DEV_EVENT_VERIFY] = ccw_device_nop,
  966. },
  967. [DEV_STATE_BOXED] = {
  968. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  969. [DEV_EVENT_INTERRUPT] = ccw_device_nop,
  970. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  971. [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
  972. },
  973. /* states to wait for i/o completion before doing something */
  974. [DEV_STATE_TIMEOUT_KILL] = {
  975. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  976. [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
  977. [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
  978. [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
  979. },
  980. [DEV_STATE_QUIESCE] = {
  981. [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
  982. [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
  983. [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
  984. [DEV_EVENT_VERIFY] = ccw_device_nop,
  985. },
  986. /* special states for devices gone not operational */
  987. [DEV_STATE_DISCONNECTED] = {
  988. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  989. [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
  990. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  991. [DEV_EVENT_VERIFY] = ccw_device_start_id,
  992. },
  993. [DEV_STATE_DISCONNECTED_SENSE_ID] = {
  994. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  995. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  996. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  997. [DEV_EVENT_VERIFY] = ccw_device_nop,
  998. },
  999. [DEV_STATE_CMFCHANGE] = {
  1000. [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
  1001. [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
  1002. [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
  1003. [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
  1004. },
  1005. [DEV_STATE_CMFUPDATE] = {
  1006. [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
  1007. [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
  1008. [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
  1009. [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
  1010. },
  1011. [DEV_STATE_STEAL_LOCK] = {
  1012. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  1013. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  1014. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  1015. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1016. },
  1017. };
  1018. EXPORT_SYMBOL_GPL(ccw_device_set_timeout);