device_fsm.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. /*
  2. * drivers/s390/cio/device_fsm.c
  3. * finite state machine for device handling
  4. *
  5. * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
  6. * IBM Corporation
  7. * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
  8. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. */
  10. #include <linux/module.h>
  11. #include <linux/init.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/string.h>
  14. #include <asm/ccwdev.h>
  15. #include <asm/cio.h>
  16. #include <asm/chpid.h>
  17. #include "cio.h"
  18. #include "cio_debug.h"
  19. #include "css.h"
  20. #include "device.h"
  21. #include "chsc.h"
  22. #include "ioasm.h"
  23. #include "chp.h"
  24. static int timeout_log_enabled;
  25. int
  26. device_is_online(struct subchannel *sch)
  27. {
  28. struct ccw_device *cdev;
  29. cdev = sch_get_cdev(sch);
  30. if (!cdev)
  31. return 0;
  32. return (cdev->private->state == DEV_STATE_ONLINE);
  33. }
  34. int
  35. device_is_disconnected(struct subchannel *sch)
  36. {
  37. struct ccw_device *cdev;
  38. cdev = sch_get_cdev(sch);
  39. if (!cdev)
  40. return 0;
  41. return (cdev->private->state == DEV_STATE_DISCONNECTED ||
  42. cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
  43. }
  44. void
  45. device_set_disconnected(struct subchannel *sch)
  46. {
  47. struct ccw_device *cdev;
  48. cdev = sch_get_cdev(sch);
  49. if (!cdev)
  50. return;
  51. ccw_device_set_timeout(cdev, 0);
  52. cdev->private->flags.fake_irb = 0;
  53. cdev->private->state = DEV_STATE_DISCONNECTED;
  54. if (cdev->online)
  55. ccw_device_schedule_recovery();
  56. }
  57. void device_set_intretry(struct subchannel *sch)
  58. {
  59. struct ccw_device *cdev;
  60. cdev = sch_get_cdev(sch);
  61. if (!cdev)
  62. return;
  63. cdev->private->flags.intretry = 1;
  64. }
  65. int device_trigger_verify(struct subchannel *sch)
  66. {
  67. struct ccw_device *cdev;
  68. cdev = sch_get_cdev(sch);
  69. if (!cdev || !cdev->online)
  70. return -EINVAL;
  71. dev_fsm_event(cdev, DEV_EVENT_VERIFY);
  72. return 0;
  73. }
  74. static int __init ccw_timeout_log_setup(char *unused)
  75. {
  76. timeout_log_enabled = 1;
  77. return 1;
  78. }
  79. __setup("ccw_timeout_log", ccw_timeout_log_setup);
  80. static void ccw_timeout_log(struct ccw_device *cdev)
  81. {
  82. struct schib schib;
  83. struct subchannel *sch;
  84. struct io_subchannel_private *private;
  85. int cc;
  86. sch = to_subchannel(cdev->dev.parent);
  87. private = to_io_private(sch);
  88. cc = stsch(sch->schid, &schib);
  89. printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
  90. "device information:\n", get_clock());
  91. printk(KERN_WARNING "cio: orb:\n");
  92. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  93. &private->orb, sizeof(private->orb), 0);
  94. printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
  95. printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
  96. printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
  97. "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
  98. if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
  99. (void *)(addr_t)private->orb.cpa == cdev->private->iccws)
  100. printk(KERN_WARNING "cio: last channel program (intern):\n");
  101. else
  102. printk(KERN_WARNING "cio: last channel program:\n");
  103. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  104. (void *)(addr_t)private->orb.cpa,
  105. sizeof(struct ccw1), 0);
  106. printk(KERN_WARNING "cio: ccw device state: %d\n",
  107. cdev->private->state);
  108. printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
  109. printk(KERN_WARNING "cio: schib:\n");
  110. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  111. &schib, sizeof(schib), 0);
  112. printk(KERN_WARNING "cio: ccw device flags:\n");
  113. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  114. &cdev->private->flags, sizeof(cdev->private->flags), 0);
  115. }
  116. /*
  117. * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
  118. */
  119. static void
  120. ccw_device_timeout(unsigned long data)
  121. {
  122. struct ccw_device *cdev;
  123. cdev = (struct ccw_device *) data;
  124. spin_lock_irq(cdev->ccwlock);
  125. if (timeout_log_enabled)
  126. ccw_timeout_log(cdev);
  127. dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
  128. spin_unlock_irq(cdev->ccwlock);
  129. }
  130. /*
  131. * Set timeout
  132. */
  133. void
  134. ccw_device_set_timeout(struct ccw_device *cdev, int expires)
  135. {
  136. if (expires == 0) {
  137. del_timer(&cdev->private->timer);
  138. return;
  139. }
  140. if (timer_pending(&cdev->private->timer)) {
  141. if (mod_timer(&cdev->private->timer, jiffies + expires))
  142. return;
  143. }
  144. cdev->private->timer.function = ccw_device_timeout;
  145. cdev->private->timer.data = (unsigned long) cdev;
  146. cdev->private->timer.expires = jiffies + expires;
  147. add_timer(&cdev->private->timer);
  148. }
  149. /* Kill any pending timers after machine check. */
  150. void
  151. device_kill_pending_timer(struct subchannel *sch)
  152. {
  153. struct ccw_device *cdev;
  154. cdev = sch_get_cdev(sch);
  155. if (!cdev)
  156. return;
  157. ccw_device_set_timeout(cdev, 0);
  158. }
  159. /*
  160. * Cancel running i/o. This is called repeatedly since halt/clear are
  161. * asynchronous operations. We do one try with cio_cancel, two tries
  162. * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
  163. * Returns 0 if device now idle, -ENODEV for device not operational and
  164. * -EBUSY if an interrupt is expected (either from halt/clear or from a
  165. * status pending).
  166. */
  167. int
  168. ccw_device_cancel_halt_clear(struct ccw_device *cdev)
  169. {
  170. struct subchannel *sch;
  171. int ret;
  172. sch = to_subchannel(cdev->dev.parent);
  173. ret = stsch(sch->schid, &sch->schib);
  174. if (ret || !sch->schib.pmcw.dnv)
  175. return -ENODEV;
  176. if (!sch->schib.pmcw.ena)
  177. /* Not operational -> done. */
  178. return 0;
  179. /* Stage 1: cancel io. */
  180. if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
  181. !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
  182. ret = cio_cancel(sch);
  183. if (ret != -EINVAL)
  184. return ret;
  185. /* cancel io unsuccessful. From now on it is asynchronous. */
  186. cdev->private->iretry = 3; /* 3 halt retries. */
  187. }
  188. if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
  189. /* Stage 2: halt io. */
  190. if (cdev->private->iretry) {
  191. cdev->private->iretry--;
  192. ret = cio_halt(sch);
  193. if (ret != -EBUSY)
  194. return (ret == 0) ? -EBUSY : ret;
  195. }
  196. /* halt io unsuccessful. */
  197. cdev->private->iretry = 255; /* 255 clear retries. */
  198. }
  199. /* Stage 3: clear io. */
  200. if (cdev->private->iretry) {
  201. cdev->private->iretry--;
  202. ret = cio_clear (sch);
  203. return (ret == 0) ? -EBUSY : ret;
  204. }
  205. panic("Can't stop i/o on subchannel.\n");
  206. }
  207. static int
  208. ccw_device_handle_oper(struct ccw_device *cdev)
  209. {
  210. struct subchannel *sch;
  211. sch = to_subchannel(cdev->dev.parent);
  212. cdev->private->flags.recog_done = 1;
  213. /*
  214. * Check if cu type and device type still match. If
  215. * not, it is certainly another device and we have to
  216. * de- and re-register.
  217. */
  218. if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
  219. cdev->id.cu_model != cdev->private->senseid.cu_model ||
  220. cdev->id.dev_type != cdev->private->senseid.dev_type ||
  221. cdev->id.dev_model != cdev->private->senseid.dev_model) {
  222. PREPARE_WORK(&cdev->private->kick_work,
  223. ccw_device_do_unreg_rereg);
  224. queue_work(ccw_device_work, &cdev->private->kick_work);
  225. return 0;
  226. }
  227. cdev->private->flags.donotify = 1;
  228. return 1;
  229. }
  230. /*
  231. * The machine won't give us any notification by machine check if a chpid has
  232. * been varied online on the SE so we have to find out by magic (i. e. driving
  233. * the channel subsystem to device selection and updating our path masks).
  234. */
  235. static void
  236. __recover_lost_chpids(struct subchannel *sch, int old_lpm)
  237. {
  238. int mask, i;
  239. struct chp_id chpid;
  240. chp_id_init(&chpid);
  241. for (i = 0; i<8; i++) {
  242. mask = 0x80 >> i;
  243. if (!(sch->lpm & mask))
  244. continue;
  245. if (old_lpm & mask)
  246. continue;
  247. chpid.id = sch->schib.pmcw.chpid[i];
  248. if (!chp_is_registered(chpid))
  249. css_schedule_eval_all();
  250. }
  251. }
  252. /*
  253. * Stop device recognition.
  254. */
  255. static void
  256. ccw_device_recog_done(struct ccw_device *cdev, int state)
  257. {
  258. struct subchannel *sch;
  259. int notify, old_lpm, same_dev;
  260. sch = to_subchannel(cdev->dev.parent);
  261. ccw_device_set_timeout(cdev, 0);
  262. cio_disable_subchannel(sch);
  263. /*
  264. * Now that we tried recognition, we have performed device selection
  265. * through ssch() and the path information is up to date.
  266. */
  267. old_lpm = sch->lpm;
  268. stsch(sch->schid, &sch->schib);
  269. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  270. /* Check since device may again have become not operational. */
  271. if (!sch->schib.pmcw.dnv)
  272. state = DEV_STATE_NOT_OPER;
  273. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
  274. /* Force reprobe on all chpids. */
  275. old_lpm = 0;
  276. if (sch->lpm != old_lpm)
  277. __recover_lost_chpids(sch, old_lpm);
  278. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
  279. if (state == DEV_STATE_NOT_OPER) {
  280. cdev->private->flags.recog_done = 1;
  281. cdev->private->state = DEV_STATE_DISCONNECTED;
  282. return;
  283. }
  284. /* Boxed devices don't need extra treatment. */
  285. }
  286. notify = 0;
  287. same_dev = 0; /* Keep the compiler quiet... */
  288. switch (state) {
  289. case DEV_STATE_NOT_OPER:
  290. CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
  291. "subchannel 0.%x.%04x\n",
  292. cdev->private->dev_id.devno,
  293. sch->schid.ssid, sch->schid.sch_no);
  294. break;
  295. case DEV_STATE_OFFLINE:
  296. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
  297. same_dev = ccw_device_handle_oper(cdev);
  298. notify = 1;
  299. }
  300. /* fill out sense information */
  301. memset(&cdev->id, 0, sizeof(cdev->id));
  302. cdev->id.cu_type = cdev->private->senseid.cu_type;
  303. cdev->id.cu_model = cdev->private->senseid.cu_model;
  304. cdev->id.dev_type = cdev->private->senseid.dev_type;
  305. cdev->id.dev_model = cdev->private->senseid.dev_model;
  306. if (notify) {
  307. cdev->private->state = DEV_STATE_OFFLINE;
  308. if (same_dev) {
  309. /* Get device online again. */
  310. ccw_device_online(cdev);
  311. wake_up(&cdev->private->wait_q);
  312. }
  313. return;
  314. }
  315. /* Issue device info message. */
  316. CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
  317. "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
  318. "%04X/%02X\n",
  319. cdev->private->dev_id.ssid,
  320. cdev->private->dev_id.devno,
  321. cdev->id.cu_type, cdev->id.cu_model,
  322. cdev->id.dev_type, cdev->id.dev_model);
  323. break;
  324. case DEV_STATE_BOXED:
  325. CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
  326. " subchannel 0.%x.%04x\n",
  327. cdev->private->dev_id.devno,
  328. sch->schid.ssid, sch->schid.sch_no);
  329. break;
  330. }
  331. cdev->private->state = state;
  332. io_subchannel_recog_done(cdev);
  333. if (state != DEV_STATE_NOT_OPER)
  334. wake_up(&cdev->private->wait_q);
  335. }
  336. /*
  337. * Function called from device_id.c after sense id has completed.
  338. */
  339. void
  340. ccw_device_sense_id_done(struct ccw_device *cdev, int err)
  341. {
  342. switch (err) {
  343. case 0:
  344. ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
  345. break;
  346. case -ETIME: /* Sense id stopped by timeout. */
  347. ccw_device_recog_done(cdev, DEV_STATE_BOXED);
  348. break;
  349. default:
  350. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  351. break;
  352. }
  353. }
  354. static void
  355. ccw_device_oper_notify(struct work_struct *work)
  356. {
  357. struct ccw_device_private *priv;
  358. struct ccw_device *cdev;
  359. struct subchannel *sch;
  360. int ret;
  361. unsigned long flags;
  362. priv = container_of(work, struct ccw_device_private, kick_work);
  363. cdev = priv->cdev;
  364. spin_lock_irqsave(cdev->ccwlock, flags);
  365. sch = to_subchannel(cdev->dev.parent);
  366. if (sch->driver && sch->driver->notify) {
  367. spin_unlock_irqrestore(cdev->ccwlock, flags);
  368. ret = sch->driver->notify(sch, CIO_OPER);
  369. spin_lock_irqsave(cdev->ccwlock, flags);
  370. } else
  371. ret = 0;
  372. if (ret) {
  373. /* Reenable channel measurements, if needed. */
  374. spin_unlock_irqrestore(cdev->ccwlock, flags);
  375. cmf_reenable(cdev);
  376. spin_lock_irqsave(cdev->ccwlock, flags);
  377. wake_up(&cdev->private->wait_q);
  378. }
  379. spin_unlock_irqrestore(cdev->ccwlock, flags);
  380. if (!ret)
  381. /* Driver doesn't want device back. */
  382. ccw_device_do_unreg_rereg(work);
  383. }
  384. /*
  385. * Finished with online/offline processing.
  386. */
  387. static void
  388. ccw_device_done(struct ccw_device *cdev, int state)
  389. {
  390. struct subchannel *sch;
  391. sch = to_subchannel(cdev->dev.parent);
  392. ccw_device_set_timeout(cdev, 0);
  393. if (state != DEV_STATE_ONLINE)
  394. cio_disable_subchannel(sch);
  395. /* Reset device status. */
  396. memset(&cdev->private->irb, 0, sizeof(struct irb));
  397. cdev->private->state = state;
  398. if (state == DEV_STATE_BOXED)
  399. CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
  400. cdev->private->dev_id.devno, sch->schid.sch_no);
  401. if (cdev->private->flags.donotify) {
  402. cdev->private->flags.donotify = 0;
  403. PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
  404. queue_work(ccw_device_notify_work, &cdev->private->kick_work);
  405. }
  406. wake_up(&cdev->private->wait_q);
  407. if (css_init_done && state != DEV_STATE_ONLINE)
  408. put_device (&cdev->dev);
  409. }
  410. static int cmp_pgid(struct pgid *p1, struct pgid *p2)
  411. {
  412. char *c1;
  413. char *c2;
  414. c1 = (char *)p1;
  415. c2 = (char *)p2;
  416. return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
  417. }
  418. static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
  419. {
  420. int i;
  421. int last;
  422. last = 0;
  423. for (i = 0; i < 8; i++) {
  424. if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
  425. /* No PGID yet */
  426. continue;
  427. if (cdev->private->pgid[last].inf.ps.state1 ==
  428. SNID_STATE1_RESET) {
  429. /* First non-zero PGID */
  430. last = i;
  431. continue;
  432. }
  433. if (cmp_pgid(&cdev->private->pgid[i],
  434. &cdev->private->pgid[last]) == 0)
  435. /* Non-conflicting PGIDs */
  436. continue;
  437. /* PGID mismatch, can't pathgroup. */
  438. CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
  439. "0.%x.%04x, can't pathgroup\n",
  440. cdev->private->dev_id.ssid,
  441. cdev->private->dev_id.devno);
  442. cdev->private->options.pgroup = 0;
  443. return;
  444. }
  445. if (cdev->private->pgid[last].inf.ps.state1 ==
  446. SNID_STATE1_RESET)
  447. /* No previous pgid found */
  448. memcpy(&cdev->private->pgid[0],
  449. &channel_subsystems[0]->global_pgid,
  450. sizeof(struct pgid));
  451. else
  452. /* Use existing pgid */
  453. memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
  454. sizeof(struct pgid));
  455. }
  456. /*
  457. * Function called from device_pgid.c after sense path ground has completed.
  458. */
  459. void
  460. ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
  461. {
  462. struct subchannel *sch;
  463. sch = to_subchannel(cdev->dev.parent);
  464. switch (err) {
  465. case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
  466. cdev->private->options.pgroup = 0;
  467. break;
  468. case 0: /* success */
  469. case -EACCES: /* partial success, some paths not operational */
  470. /* Check if all pgids are equal or 0. */
  471. __ccw_device_get_common_pgid(cdev);
  472. break;
  473. case -ETIME: /* Sense path group id stopped by timeout. */
  474. case -EUSERS: /* device is reserved for someone else. */
  475. ccw_device_done(cdev, DEV_STATE_BOXED);
  476. return;
  477. default:
  478. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  479. return;
  480. }
  481. /* Start Path Group verification. */
  482. cdev->private->state = DEV_STATE_VERIFY;
  483. cdev->private->flags.doverify = 0;
  484. ccw_device_verify_start(cdev);
  485. }
  486. /*
  487. * Start device recognition.
  488. */
  489. int
  490. ccw_device_recognition(struct ccw_device *cdev)
  491. {
  492. struct subchannel *sch;
  493. int ret;
  494. if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
  495. (cdev->private->state != DEV_STATE_BOXED))
  496. return -EINVAL;
  497. sch = to_subchannel(cdev->dev.parent);
  498. ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
  499. if (ret != 0)
  500. /* Couldn't enable the subchannel for i/o. Sick device. */
  501. return ret;
  502. /* After 60s the device recognition is considered to have failed. */
  503. ccw_device_set_timeout(cdev, 60*HZ);
  504. /*
  505. * We used to start here with a sense pgid to find out whether a device
  506. * is locked by someone else. Unfortunately, the sense pgid command
  507. * code has other meanings on devices predating the path grouping
  508. * algorithm, so we start with sense id and box the device after an
  509. * timeout (or if sense pgid during path verification detects the device
  510. * is locked, as may happen on newer devices).
  511. */
  512. cdev->private->flags.recog_done = 0;
  513. cdev->private->state = DEV_STATE_SENSE_ID;
  514. ccw_device_sense_id_start(cdev);
  515. return 0;
  516. }
  517. /*
  518. * Handle timeout in device recognition.
  519. */
  520. static void
  521. ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  522. {
  523. int ret;
  524. ret = ccw_device_cancel_halt_clear(cdev);
  525. switch (ret) {
  526. case 0:
  527. ccw_device_recog_done(cdev, DEV_STATE_BOXED);
  528. break;
  529. case -ENODEV:
  530. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  531. break;
  532. default:
  533. ccw_device_set_timeout(cdev, 3*HZ);
  534. }
  535. }
  536. void
  537. ccw_device_verify_done(struct ccw_device *cdev, int err)
  538. {
  539. struct subchannel *sch;
  540. sch = to_subchannel(cdev->dev.parent);
  541. /* Update schib - pom may have changed. */
  542. stsch(sch->schid, &sch->schib);
  543. /* Update lpm with verified path mask. */
  544. sch->lpm = sch->vpm;
  545. /* Repeat path verification? */
  546. if (cdev->private->flags.doverify) {
  547. cdev->private->flags.doverify = 0;
  548. ccw_device_verify_start(cdev);
  549. return;
  550. }
  551. switch (err) {
  552. case -EOPNOTSUPP: /* path grouping not supported, just set online. */
  553. cdev->private->options.pgroup = 0;
  554. case 0:
  555. ccw_device_done(cdev, DEV_STATE_ONLINE);
  556. /* Deliver fake irb to device driver, if needed. */
  557. if (cdev->private->flags.fake_irb) {
  558. memset(&cdev->private->irb, 0, sizeof(struct irb));
  559. cdev->private->irb.scsw.cc = 1;
  560. cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
  561. cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
  562. cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
  563. cdev->private->flags.fake_irb = 0;
  564. if (cdev->handler)
  565. cdev->handler(cdev, cdev->private->intparm,
  566. &cdev->private->irb);
  567. memset(&cdev->private->irb, 0, sizeof(struct irb));
  568. }
  569. break;
  570. case -ETIME:
  571. /* Reset oper notify indication after verify error. */
  572. cdev->private->flags.donotify = 0;
  573. ccw_device_done(cdev, DEV_STATE_BOXED);
  574. break;
  575. default:
  576. /* Reset oper notify indication after verify error. */
  577. cdev->private->flags.donotify = 0;
  578. if (cdev->online) {
  579. ccw_device_set_timeout(cdev, 0);
  580. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  581. } else
  582. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  583. break;
  584. }
  585. }
  586. /*
  587. * Get device online.
  588. */
  589. int
  590. ccw_device_online(struct ccw_device *cdev)
  591. {
  592. struct subchannel *sch;
  593. int ret;
  594. if ((cdev->private->state != DEV_STATE_OFFLINE) &&
  595. (cdev->private->state != DEV_STATE_BOXED))
  596. return -EINVAL;
  597. sch = to_subchannel(cdev->dev.parent);
  598. if (css_init_done && !get_device(&cdev->dev))
  599. return -ENODEV;
  600. ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
  601. if (ret != 0) {
  602. /* Couldn't enable the subchannel for i/o. Sick device. */
  603. if (ret == -ENODEV)
  604. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  605. return ret;
  606. }
  607. /* Do we want to do path grouping? */
  608. if (!cdev->private->options.pgroup) {
  609. /* Start initial path verification. */
  610. cdev->private->state = DEV_STATE_VERIFY;
  611. cdev->private->flags.doverify = 0;
  612. ccw_device_verify_start(cdev);
  613. return 0;
  614. }
  615. /* Do a SensePGID first. */
  616. cdev->private->state = DEV_STATE_SENSE_PGID;
  617. ccw_device_sense_pgid_start(cdev);
  618. return 0;
  619. }
  620. void
  621. ccw_device_disband_done(struct ccw_device *cdev, int err)
  622. {
  623. switch (err) {
  624. case 0:
  625. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  626. break;
  627. case -ETIME:
  628. ccw_device_done(cdev, DEV_STATE_BOXED);
  629. break;
  630. default:
  631. cdev->private->flags.donotify = 0;
  632. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  633. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  634. break;
  635. }
  636. }
  637. /*
  638. * Shutdown device.
  639. */
  640. int
  641. ccw_device_offline(struct ccw_device *cdev)
  642. {
  643. struct subchannel *sch;
  644. if (ccw_device_is_orphan(cdev)) {
  645. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  646. return 0;
  647. }
  648. sch = to_subchannel(cdev->dev.parent);
  649. if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
  650. return -ENODEV;
  651. if (cdev->private->state != DEV_STATE_ONLINE) {
  652. if (sch->schib.scsw.actl != 0)
  653. return -EBUSY;
  654. return -EINVAL;
  655. }
  656. if (sch->schib.scsw.actl != 0)
  657. return -EBUSY;
  658. /* Are we doing path grouping? */
  659. if (!cdev->private->options.pgroup) {
  660. /* No, set state offline immediately. */
  661. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  662. return 0;
  663. }
  664. /* Start Set Path Group commands. */
  665. cdev->private->state = DEV_STATE_DISBAND_PGID;
  666. ccw_device_disband_start(cdev);
  667. return 0;
  668. }
  669. /*
  670. * Handle timeout in device online/offline process.
  671. */
  672. static void
  673. ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  674. {
  675. int ret;
  676. ret = ccw_device_cancel_halt_clear(cdev);
  677. switch (ret) {
  678. case 0:
  679. ccw_device_done(cdev, DEV_STATE_BOXED);
  680. break;
  681. case -ENODEV:
  682. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  683. break;
  684. default:
  685. ccw_device_set_timeout(cdev, 3*HZ);
  686. }
  687. }
  688. /*
  689. * Handle not oper event in device recognition.
  690. */
  691. static void
  692. ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
  693. {
  694. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  695. }
  696. /*
  697. * Handle not operational event in non-special state.
  698. */
  699. static void ccw_device_generic_notoper(struct ccw_device *cdev,
  700. enum dev_event dev_event)
  701. {
  702. struct subchannel *sch;
  703. cdev->private->state = DEV_STATE_NOT_OPER;
  704. sch = to_subchannel(cdev->dev.parent);
  705. css_schedule_eval(sch->schid);
  706. }
  707. /*
  708. * Handle path verification event.
  709. */
  710. static void
  711. ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
  712. {
  713. struct subchannel *sch;
  714. if (cdev->private->state == DEV_STATE_W4SENSE) {
  715. cdev->private->flags.doverify = 1;
  716. return;
  717. }
  718. sch = to_subchannel(cdev->dev.parent);
  719. /*
  720. * Since we might not just be coming from an interrupt from the
  721. * subchannel we have to update the schib.
  722. */
  723. stsch(sch->schid, &sch->schib);
  724. if (sch->schib.scsw.actl != 0 ||
  725. (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
  726. (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
  727. /*
  728. * No final status yet or final status not yet delivered
  729. * to the device driver. Can't do path verfication now,
  730. * delay until final status was delivered.
  731. */
  732. cdev->private->flags.doverify = 1;
  733. return;
  734. }
  735. /* Device is idle, we can do the path verification. */
  736. cdev->private->state = DEV_STATE_VERIFY;
  737. cdev->private->flags.doverify = 0;
  738. ccw_device_verify_start(cdev);
  739. }
  740. /*
  741. * Got an interrupt for a normal io (state online).
  742. */
  743. static void
  744. ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
  745. {
  746. struct irb *irb;
  747. irb = (struct irb *) __LC_IRB;
  748. /* Check for unsolicited interrupt. */
  749. if ((irb->scsw.stctl ==
  750. (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
  751. && (!irb->scsw.cc)) {
  752. if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
  753. !irb->esw.esw0.erw.cons) {
  754. /* Unit check but no sense data. Need basic sense. */
  755. if (ccw_device_do_sense(cdev, irb) != 0)
  756. goto call_handler_unsol;
  757. memcpy(&cdev->private->irb, irb, sizeof(struct irb));
  758. cdev->private->state = DEV_STATE_W4SENSE;
  759. cdev->private->intparm = 0;
  760. return;
  761. }
  762. call_handler_unsol:
  763. if (cdev->handler)
  764. cdev->handler (cdev, 0, irb);
  765. if (cdev->private->flags.doverify)
  766. ccw_device_online_verify(cdev, 0);
  767. return;
  768. }
  769. /* Accumulate status and find out if a basic sense is needed. */
  770. ccw_device_accumulate_irb(cdev, irb);
  771. if (cdev->private->flags.dosense) {
  772. if (ccw_device_do_sense(cdev, irb) == 0) {
  773. cdev->private->state = DEV_STATE_W4SENSE;
  774. }
  775. return;
  776. }
  777. /* Call the handler. */
  778. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  779. /* Start delayed path verification. */
  780. ccw_device_online_verify(cdev, 0);
  781. }
  782. /*
  783. * Got an timeout in online state.
  784. */
  785. static void
  786. ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  787. {
  788. int ret;
  789. ccw_device_set_timeout(cdev, 0);
  790. ret = ccw_device_cancel_halt_clear(cdev);
  791. if (ret == -EBUSY) {
  792. ccw_device_set_timeout(cdev, 3*HZ);
  793. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  794. return;
  795. }
  796. if (ret == -ENODEV)
  797. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  798. else if (cdev->handler)
  799. cdev->handler(cdev, cdev->private->intparm,
  800. ERR_PTR(-ETIMEDOUT));
  801. }
  802. /*
  803. * Got an interrupt for a basic sense.
  804. */
  805. static void
  806. ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
  807. {
  808. struct irb *irb;
  809. irb = (struct irb *) __LC_IRB;
  810. /* Check for unsolicited interrupt. */
  811. if (irb->scsw.stctl ==
  812. (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
  813. if (irb->scsw.cc == 1)
  814. /* Basic sense hasn't started. Try again. */
  815. ccw_device_do_sense(cdev, irb);
  816. else {
  817. CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
  818. "interrupt during w4sense...\n",
  819. cdev->private->dev_id.ssid,
  820. cdev->private->dev_id.devno);
  821. if (cdev->handler)
  822. cdev->handler (cdev, 0, irb);
  823. }
  824. return;
  825. }
  826. /*
  827. * Check if a halt or clear has been issued in the meanwhile. If yes,
  828. * only deliver the halt/clear interrupt to the device driver as if it
  829. * had killed the original request.
  830. */
  831. if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
  832. /* Retry Basic Sense if requested. */
  833. if (cdev->private->flags.intretry) {
  834. cdev->private->flags.intretry = 0;
  835. ccw_device_do_sense(cdev, irb);
  836. return;
  837. }
  838. cdev->private->flags.dosense = 0;
  839. memset(&cdev->private->irb, 0, sizeof(struct irb));
  840. ccw_device_accumulate_irb(cdev, irb);
  841. goto call_handler;
  842. }
  843. /* Add basic sense info to irb. */
  844. ccw_device_accumulate_basic_sense(cdev, irb);
  845. if (cdev->private->flags.dosense) {
  846. /* Another basic sense is needed. */
  847. ccw_device_do_sense(cdev, irb);
  848. return;
  849. }
  850. call_handler:
  851. cdev->private->state = DEV_STATE_ONLINE;
  852. /* Call the handler. */
  853. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  854. /* Start delayed path verification. */
  855. ccw_device_online_verify(cdev, 0);
  856. }
  857. static void
  858. ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
  859. {
  860. struct irb *irb;
  861. irb = (struct irb *) __LC_IRB;
  862. /* Accumulate status. We don't do basic sense. */
  863. ccw_device_accumulate_irb(cdev, irb);
  864. /* Remember to clear irb to avoid residuals. */
  865. memset(&cdev->private->irb, 0, sizeof(struct irb));
  866. /* Try to start delayed device verification. */
  867. ccw_device_online_verify(cdev, 0);
  868. /* Note: Don't call handler for cio initiated clear! */
  869. }
  870. static void
  871. ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
  872. {
  873. struct subchannel *sch;
  874. sch = to_subchannel(cdev->dev.parent);
  875. ccw_device_set_timeout(cdev, 0);
  876. /* Start delayed path verification. */
  877. ccw_device_online_verify(cdev, 0);
  878. /* OK, i/o is dead now. Call interrupt handler. */
  879. if (cdev->handler)
  880. cdev->handler(cdev, cdev->private->intparm,
  881. ERR_PTR(-EIO));
  882. }
  883. static void
  884. ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  885. {
  886. int ret;
  887. ret = ccw_device_cancel_halt_clear(cdev);
  888. if (ret == -EBUSY) {
  889. ccw_device_set_timeout(cdev, 3*HZ);
  890. return;
  891. }
  892. /* Start delayed path verification. */
  893. ccw_device_online_verify(cdev, 0);
  894. if (cdev->handler)
  895. cdev->handler(cdev, cdev->private->intparm,
  896. ERR_PTR(-EIO));
  897. }
  898. void device_kill_io(struct subchannel *sch)
  899. {
  900. int ret;
  901. struct ccw_device *cdev;
  902. cdev = sch_get_cdev(sch);
  903. ret = ccw_device_cancel_halt_clear(cdev);
  904. if (ret == -EBUSY) {
  905. ccw_device_set_timeout(cdev, 3*HZ);
  906. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  907. return;
  908. }
  909. /* Start delayed path verification. */
  910. ccw_device_online_verify(cdev, 0);
  911. if (cdev->handler)
  912. cdev->handler(cdev, cdev->private->intparm,
  913. ERR_PTR(-EIO));
  914. }
  915. static void
  916. ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
  917. {
  918. /* Start verification after current task finished. */
  919. cdev->private->flags.doverify = 1;
  920. }
  921. static void
  922. ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
  923. {
  924. struct irb *irb;
  925. switch (dev_event) {
  926. case DEV_EVENT_INTERRUPT:
  927. irb = (struct irb *) __LC_IRB;
  928. /* Check for unsolicited interrupt. */
  929. if ((irb->scsw.stctl ==
  930. (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
  931. (!irb->scsw.cc))
  932. /* FIXME: we should restart stlck here, but this
  933. * is extremely unlikely ... */
  934. goto out_wakeup;
  935. ccw_device_accumulate_irb(cdev, irb);
  936. /* We don't care about basic sense etc. */
  937. break;
  938. default: /* timeout */
  939. break;
  940. }
  941. out_wakeup:
  942. wake_up(&cdev->private->wait_q);
  943. }
  944. static void
  945. ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
  946. {
  947. struct subchannel *sch;
  948. sch = to_subchannel(cdev->dev.parent);
  949. if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
  950. /* Couldn't enable the subchannel for i/o. Sick device. */
  951. return;
  952. /* After 60s the device recognition is considered to have failed. */
  953. ccw_device_set_timeout(cdev, 60*HZ);
  954. cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
  955. ccw_device_sense_id_start(cdev);
  956. }
  957. void
  958. device_trigger_reprobe(struct subchannel *sch)
  959. {
  960. struct ccw_device *cdev;
  961. cdev = sch_get_cdev(sch);
  962. if (!cdev)
  963. return;
  964. if (cdev->private->state != DEV_STATE_DISCONNECTED)
  965. return;
  966. /* Update some values. */
  967. if (stsch(sch->schid, &sch->schib))
  968. return;
  969. if (!sch->schib.pmcw.dnv)
  970. return;
  971. /*
  972. * The pim, pam, pom values may not be accurate, but they are the best
  973. * we have before performing device selection :/
  974. */
  975. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  976. /* Re-set some bits in the pmcw that were lost. */
  977. sch->schib.pmcw.csense = 1;
  978. sch->schib.pmcw.ena = 0;
  979. if ((sch->lpm & (sch->lpm - 1)) != 0)
  980. sch->schib.pmcw.mp = 1;
  981. sch->schib.pmcw.intparm = (u32)(addr_t)sch;
  982. /* We should also udate ssd info, but this has to wait. */
  983. /* Check if this is another device which appeared on the same sch. */
  984. if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
  985. PREPARE_WORK(&cdev->private->kick_work,
  986. ccw_device_move_to_orphanage);
  987. queue_work(slow_path_wq, &cdev->private->kick_work);
  988. } else
  989. ccw_device_start_id(cdev, 0);
  990. }
  991. static void
  992. ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
  993. {
  994. struct subchannel *sch;
  995. sch = to_subchannel(cdev->dev.parent);
  996. /*
  997. * An interrupt in state offline means a previous disable was not
  998. * successful. Try again.
  999. */
  1000. cio_disable_subchannel(sch);
  1001. }
  1002. static void
  1003. ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
  1004. {
  1005. retry_set_schib(cdev);
  1006. cdev->private->state = DEV_STATE_ONLINE;
  1007. dev_fsm_event(cdev, dev_event);
  1008. }
  1009. static void ccw_device_update_cmfblock(struct ccw_device *cdev,
  1010. enum dev_event dev_event)
  1011. {
  1012. cmf_retry_copy_block(cdev);
  1013. cdev->private->state = DEV_STATE_ONLINE;
  1014. dev_fsm_event(cdev, dev_event);
  1015. }
  1016. static void
  1017. ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
  1018. {
  1019. ccw_device_set_timeout(cdev, 0);
  1020. if (dev_event == DEV_EVENT_NOTOPER)
  1021. cdev->private->state = DEV_STATE_NOT_OPER;
  1022. else
  1023. cdev->private->state = DEV_STATE_OFFLINE;
  1024. wake_up(&cdev->private->wait_q);
  1025. }
  1026. static void
  1027. ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  1028. {
  1029. int ret;
  1030. ret = ccw_device_cancel_halt_clear(cdev);
  1031. switch (ret) {
  1032. case 0:
  1033. cdev->private->state = DEV_STATE_OFFLINE;
  1034. wake_up(&cdev->private->wait_q);
  1035. break;
  1036. case -ENODEV:
  1037. cdev->private->state = DEV_STATE_NOT_OPER;
  1038. wake_up(&cdev->private->wait_q);
  1039. break;
  1040. default:
  1041. ccw_device_set_timeout(cdev, HZ/10);
  1042. }
  1043. }
  1044. /*
  1045. * No operation action. This is used e.g. to ignore a timeout event in
  1046. * state offline.
  1047. */
  1048. static void
  1049. ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
  1050. {
  1051. }
  1052. /*
  1053. * Bug operation action.
  1054. */
  1055. static void
  1056. ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
  1057. {
  1058. CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
  1059. "0.%x.%04x\n", cdev->private->state, dev_event,
  1060. cdev->private->dev_id.ssid,
  1061. cdev->private->dev_id.devno);
  1062. BUG();
  1063. }
  1064. /*
  1065. * device statemachine
  1066. */
  1067. fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
  1068. [DEV_STATE_NOT_OPER] = {
  1069. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  1070. [DEV_EVENT_INTERRUPT] = ccw_device_bug,
  1071. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1072. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1073. },
  1074. [DEV_STATE_SENSE_PGID] = {
  1075. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1076. [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
  1077. [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
  1078. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1079. },
  1080. [DEV_STATE_SENSE_ID] = {
  1081. [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
  1082. [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
  1083. [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
  1084. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1085. },
  1086. [DEV_STATE_OFFLINE] = {
  1087. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1088. [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
  1089. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1090. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1091. },
  1092. [DEV_STATE_VERIFY] = {
  1093. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1094. [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
  1095. [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
  1096. [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
  1097. },
  1098. [DEV_STATE_ONLINE] = {
  1099. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1100. [DEV_EVENT_INTERRUPT] = ccw_device_irq,
  1101. [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
  1102. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  1103. },
  1104. [DEV_STATE_W4SENSE] = {
  1105. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1106. [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
  1107. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1108. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  1109. },
  1110. [DEV_STATE_DISBAND_PGID] = {
  1111. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1112. [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
  1113. [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
  1114. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1115. },
  1116. [DEV_STATE_BOXED] = {
  1117. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1118. [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
  1119. [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
  1120. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1121. },
  1122. /* states to wait for i/o completion before doing something */
  1123. [DEV_STATE_CLEAR_VERIFY] = {
  1124. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1125. [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
  1126. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1127. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1128. },
  1129. [DEV_STATE_TIMEOUT_KILL] = {
  1130. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  1131. [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
  1132. [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
  1133. [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
  1134. },
  1135. [DEV_STATE_QUIESCE] = {
  1136. [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
  1137. [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
  1138. [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
  1139. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1140. },
  1141. /* special states for devices gone not operational */
  1142. [DEV_STATE_DISCONNECTED] = {
  1143. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  1144. [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
  1145. [DEV_EVENT_TIMEOUT] = ccw_device_bug,
  1146. [DEV_EVENT_VERIFY] = ccw_device_start_id,
  1147. },
  1148. [DEV_STATE_DISCONNECTED_SENSE_ID] = {
  1149. [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
  1150. [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
  1151. [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
  1152. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1153. },
  1154. [DEV_STATE_CMFCHANGE] = {
  1155. [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
  1156. [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
  1157. [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
  1158. [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
  1159. },
  1160. [DEV_STATE_CMFUPDATE] = {
  1161. [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
  1162. [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
  1163. [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
  1164. [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
  1165. },
  1166. };
  1167. EXPORT_SYMBOL_GPL(ccw_device_set_timeout);