device_ops.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * drivers/s390/cio/device_ops.c
  3. *
  4. * $Revision: 1.57 $
  5. *
  6. * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
  7. * IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. * Cornelia Huck (cohuck@de.ibm.com)
  10. */
  11. #include <linux/config.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/errno.h>
  15. #include <linux/slab.h>
  16. #include <linux/list.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <asm/ccwdev.h>
  20. #include <asm/idals.h>
  21. #include "cio.h"
  22. #include "cio_debug.h"
  23. #include "css.h"
  24. #include "chsc.h"
  25. #include "device.h"
  26. int
  27. ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
  28. {
  29. /*
  30. * The flag usage is mutal exclusive ...
  31. */
  32. if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
  33. (flags & CCWDEV_REPORT_ALL))
  34. return -EINVAL;
  35. cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
  36. cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
  37. cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
  38. cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
  39. return 0;
  40. }
  41. int
  42. ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
  43. {
  44. struct subchannel *sch;
  45. int ret;
  46. if (!cdev)
  47. return -ENODEV;
  48. if (cdev->private->state == DEV_STATE_NOT_OPER)
  49. return -ENODEV;
  50. if (cdev->private->state != DEV_STATE_ONLINE &&
  51. cdev->private->state != DEV_STATE_WAIT4IO &&
  52. cdev->private->state != DEV_STATE_W4SENSE)
  53. return -EINVAL;
  54. sch = to_subchannel(cdev->dev.parent);
  55. if (!sch)
  56. return -ENODEV;
  57. ret = cio_clear(sch);
  58. if (ret == 0)
  59. cdev->private->intparm = intparm;
  60. return ret;
  61. }
  62. int
  63. ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
  64. unsigned long intparm, __u8 lpm, __u8 key,
  65. unsigned long flags)
  66. {
  67. struct subchannel *sch;
  68. int ret;
  69. if (!cdev)
  70. return -ENODEV;
  71. sch = to_subchannel(cdev->dev.parent);
  72. if (!sch)
  73. return -ENODEV;
  74. if (cdev->private->state == DEV_STATE_NOT_OPER)
  75. return -ENODEV;
  76. if (cdev->private->state == DEV_STATE_VERIFY) {
  77. /* Remember to fake irb when finished. */
  78. if (!cdev->private->flags.fake_irb) {
  79. cdev->private->flags.fake_irb = 1;
  80. cdev->private->intparm = intparm;
  81. return 0;
  82. } else
  83. /* There's already a fake I/O around. */
  84. return -EBUSY;
  85. }
  86. if (cdev->private->state != DEV_STATE_ONLINE ||
  87. ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
  88. !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
  89. cdev->private->flags.doverify)
  90. return -EBUSY;
  91. ret = cio_set_options (sch, flags);
  92. if (ret)
  93. return ret;
  94. ret = cio_start_key (sch, cpa, lpm, key);
  95. if (ret == 0)
  96. cdev->private->intparm = intparm;
  97. return ret;
  98. }
  99. int
  100. ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
  101. unsigned long intparm, __u8 lpm, __u8 key,
  102. unsigned long flags, int expires)
  103. {
  104. int ret;
  105. if (!cdev)
  106. return -ENODEV;
  107. ccw_device_set_timeout(cdev, expires);
  108. ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
  109. if (ret != 0)
  110. ccw_device_set_timeout(cdev, 0);
  111. return ret;
  112. }
  113. int
  114. ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
  115. unsigned long intparm, __u8 lpm, unsigned long flags)
  116. {
  117. return ccw_device_start_key(cdev, cpa, intparm, lpm,
  118. PAGE_DEFAULT_KEY, flags);
  119. }
  120. int
  121. ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
  122. unsigned long intparm, __u8 lpm, unsigned long flags,
  123. int expires)
  124. {
  125. return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
  126. PAGE_DEFAULT_KEY, flags,
  127. expires);
  128. }
  129. int
  130. ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
  131. {
  132. struct subchannel *sch;
  133. int ret;
  134. if (!cdev)
  135. return -ENODEV;
  136. if (cdev->private->state == DEV_STATE_NOT_OPER)
  137. return -ENODEV;
  138. if (cdev->private->state != DEV_STATE_ONLINE &&
  139. cdev->private->state != DEV_STATE_WAIT4IO &&
  140. cdev->private->state != DEV_STATE_W4SENSE)
  141. return -EINVAL;
  142. sch = to_subchannel(cdev->dev.parent);
  143. if (!sch)
  144. return -ENODEV;
  145. ret = cio_halt(sch);
  146. if (ret == 0)
  147. cdev->private->intparm = intparm;
  148. return ret;
  149. }
  150. int
  151. ccw_device_resume(struct ccw_device *cdev)
  152. {
  153. struct subchannel *sch;
  154. if (!cdev)
  155. return -ENODEV;
  156. sch = to_subchannel(cdev->dev.parent);
  157. if (!sch)
  158. return -ENODEV;
  159. if (cdev->private->state == DEV_STATE_NOT_OPER)
  160. return -ENODEV;
  161. if (cdev->private->state != DEV_STATE_ONLINE ||
  162. !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
  163. return -EINVAL;
  164. return cio_resume(sch);
  165. }
  166. /*
  167. * Pass interrupt to device driver.
  168. */
  169. int
  170. ccw_device_call_handler(struct ccw_device *cdev)
  171. {
  172. struct subchannel *sch;
  173. unsigned int stctl;
  174. int ending_status;
  175. sch = to_subchannel(cdev->dev.parent);
  176. /*
  177. * we allow for the device action handler if .
  178. * - we received ending status
  179. * - the action handler requested to see all interrupts
  180. * - we received an intermediate status
  181. * - fast notification was requested (primary status)
  182. * - unsolicited interrupts
  183. */
  184. stctl = cdev->private->irb.scsw.stctl;
  185. ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
  186. (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
  187. (stctl == SCSW_STCTL_STATUS_PEND);
  188. if (!ending_status &&
  189. !cdev->private->options.repall &&
  190. !(stctl & SCSW_STCTL_INTER_STATUS) &&
  191. !(cdev->private->options.fast &&
  192. (stctl & SCSW_STCTL_PRIM_STATUS)))
  193. return 0;
  194. /*
  195. * Now we are ready to call the device driver interrupt handler.
  196. */
  197. if (cdev->handler)
  198. cdev->handler(cdev, cdev->private->intparm,
  199. &cdev->private->irb);
  200. /*
  201. * Clear the old and now useless interrupt response block.
  202. */
  203. memset(&cdev->private->irb, 0, sizeof(struct irb));
  204. return 1;
  205. }
  206. /*
  207. * Search for CIW command in extended sense data.
  208. */
  209. struct ciw *
  210. ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
  211. {
  212. int ciw_cnt;
  213. if (cdev->private->flags.esid == 0)
  214. return NULL;
  215. for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
  216. if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
  217. return cdev->private->senseid.ciw + ciw_cnt;
  218. return NULL;
  219. }
  220. __u8
  221. ccw_device_get_path_mask(struct ccw_device *cdev)
  222. {
  223. struct subchannel *sch;
  224. sch = to_subchannel(cdev->dev.parent);
  225. if (!sch)
  226. return 0;
  227. else
  228. return sch->vpm;
  229. }
  230. static void
  231. ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
  232. {
  233. if (!ip)
  234. /* unsolicited interrupt */
  235. return;
  236. /* Abuse intparm for error reporting. */
  237. if (IS_ERR(irb))
  238. cdev->private->intparm = -EIO;
  239. else if ((irb->scsw.dstat !=
  240. (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
  241. (irb->scsw.cstat != 0)) {
  242. /*
  243. * We didn't get channel end / device end. Check if path
  244. * verification has been started; we can retry after it has
  245. * finished. We also retry unit checks except for command reject
  246. * or intervention required.
  247. */
  248. if (cdev->private->flags.doverify ||
  249. cdev->private->state == DEV_STATE_VERIFY)
  250. cdev->private->intparm = -EAGAIN;
  251. if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
  252. !(irb->ecw[0] &
  253. (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
  254. cdev->private->intparm = -EAGAIN;
  255. else
  256. cdev->private->intparm = -EIO;
  257. } else
  258. cdev->private->intparm = 0;
  259. wake_up(&cdev->private->wait_q);
  260. }
  261. static inline int
  262. __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
  263. {
  264. int ret;
  265. struct subchannel *sch;
  266. sch = to_subchannel(cdev->dev.parent);
  267. do {
  268. ret = cio_start (sch, ccw, lpm);
  269. if ((ret == -EBUSY) || (ret == -EACCES)) {
  270. /* Try again later. */
  271. spin_unlock_irq(&sch->lock);
  272. msleep(10);
  273. spin_lock_irq(&sch->lock);
  274. continue;
  275. }
  276. if (ret != 0)
  277. /* Non-retryable error. */
  278. break;
  279. /* Wait for end of request. */
  280. cdev->private->intparm = magic;
  281. spin_unlock_irq(&sch->lock);
  282. wait_event(cdev->private->wait_q,
  283. (cdev->private->intparm == -EIO) ||
  284. (cdev->private->intparm == -EAGAIN) ||
  285. (cdev->private->intparm == 0));
  286. spin_lock_irq(&sch->lock);
  287. /* Check at least for channel end / device end */
  288. if (cdev->private->intparm == -EIO) {
  289. /* Non-retryable error. */
  290. ret = -EIO;
  291. break;
  292. }
  293. if (cdev->private->intparm == 0)
  294. /* Success. */
  295. break;
  296. /* Try again later. */
  297. spin_unlock_irq(&sch->lock);
  298. msleep(10);
  299. spin_lock_irq(&sch->lock);
  300. } while (1);
  301. return ret;
  302. }
  303. /**
  304. * read_dev_chars() - read device characteristics
  305. * @param cdev target ccw device
  306. * @param buffer pointer to buffer for rdc data
  307. * @param length size of rdc data
  308. * @returns 0 for success, negative error value on failure
  309. *
  310. * Context:
  311. * called for online device, lock not held
  312. **/
  313. int
  314. read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
  315. {
  316. void (*handler)(struct ccw_device *, unsigned long, struct irb *);
  317. struct subchannel *sch;
  318. int ret;
  319. struct ccw1 *rdc_ccw;
  320. if (!cdev)
  321. return -ENODEV;
  322. if (!buffer || !length)
  323. return -EINVAL;
  324. sch = to_subchannel(cdev->dev.parent);
  325. CIO_TRACE_EVENT (4, "rddevch");
  326. CIO_TRACE_EVENT (4, sch->dev.bus_id);
  327. rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
  328. if (!rdc_ccw)
  329. return -ENOMEM;
  330. memset(rdc_ccw, 0, sizeof(struct ccw1));
  331. rdc_ccw->cmd_code = CCW_CMD_RDC;
  332. rdc_ccw->count = length;
  333. rdc_ccw->flags = CCW_FLAG_SLI;
  334. ret = set_normalized_cda (rdc_ccw, (*buffer));
  335. if (ret != 0) {
  336. kfree(rdc_ccw);
  337. return ret;
  338. }
  339. spin_lock_irq(&sch->lock);
  340. /* Save interrupt handler. */
  341. handler = cdev->handler;
  342. /* Temporarily install own handler. */
  343. cdev->handler = ccw_device_wake_up;
  344. if (cdev->private->state != DEV_STATE_ONLINE)
  345. ret = -ENODEV;
  346. else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
  347. !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
  348. cdev->private->flags.doverify)
  349. ret = -EBUSY;
  350. else
  351. /* 0x00D9C4C3 == ebcdic "RDC" */
  352. ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
  353. /* Restore interrupt handler. */
  354. cdev->handler = handler;
  355. spin_unlock_irq(&sch->lock);
  356. clear_normalized_cda (rdc_ccw);
  357. kfree(rdc_ccw);
  358. return ret;
  359. }
  360. /*
  361. * Read Configuration data using path mask
  362. */
  363. int
  364. read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
  365. {
  366. void (*handler)(struct ccw_device *, unsigned long, struct irb *);
  367. struct subchannel *sch;
  368. struct ciw *ciw;
  369. char *rcd_buf;
  370. int ret;
  371. struct ccw1 *rcd_ccw;
  372. if (!cdev)
  373. return -ENODEV;
  374. if (!buffer || !length)
  375. return -EINVAL;
  376. sch = to_subchannel(cdev->dev.parent);
  377. CIO_TRACE_EVENT (4, "rdconf");
  378. CIO_TRACE_EVENT (4, sch->dev.bus_id);
  379. /*
  380. * scan for RCD command in extended SenseID data
  381. */
  382. ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
  383. if (!ciw || ciw->cmd == 0)
  384. return -EOPNOTSUPP;
  385. rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
  386. if (!rcd_ccw)
  387. return -ENOMEM;
  388. memset(rcd_ccw, 0, sizeof(struct ccw1));
  389. rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
  390. if (!rcd_buf) {
  391. kfree(rcd_ccw);
  392. return -ENOMEM;
  393. }
  394. memset (rcd_buf, 0, ciw->count);
  395. rcd_ccw->cmd_code = ciw->cmd;
  396. rcd_ccw->cda = (__u32) __pa (rcd_buf);
  397. rcd_ccw->count = ciw->count;
  398. rcd_ccw->flags = CCW_FLAG_SLI;
  399. spin_lock_irq(&sch->lock);
  400. /* Save interrupt handler. */
  401. handler = cdev->handler;
  402. /* Temporarily install own handler. */
  403. cdev->handler = ccw_device_wake_up;
  404. if (cdev->private->state != DEV_STATE_ONLINE)
  405. ret = -ENODEV;
  406. else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
  407. !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
  408. cdev->private->flags.doverify)
  409. ret = -EBUSY;
  410. else
  411. /* 0x00D9C3C4 == ebcdic "RCD" */
  412. ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
  413. /* Restore interrupt handler. */
  414. cdev->handler = handler;
  415. spin_unlock_irq(&sch->lock);
  416. /*
  417. * on success we update the user input parms
  418. */
  419. if (ret) {
  420. kfree (rcd_buf);
  421. *buffer = NULL;
  422. *length = 0;
  423. } else {
  424. *length = ciw->count;
  425. *buffer = rcd_buf;
  426. }
  427. kfree(rcd_ccw);
  428. return ret;
  429. }
  430. /*
  431. * Read Configuration data
  432. */
  433. int
  434. read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
  435. {
  436. return read_conf_data_lpm (cdev, buffer, length, 0);
  437. }
  438. /*
  439. * Try to break the lock on a boxed device.
  440. */
  441. int
  442. ccw_device_stlck(struct ccw_device *cdev)
  443. {
  444. void *buf, *buf2;
  445. unsigned long flags;
  446. struct subchannel *sch;
  447. int ret;
  448. if (!cdev)
  449. return -ENODEV;
  450. if (cdev->drv && !cdev->private->options.force)
  451. return -EINVAL;
  452. sch = to_subchannel(cdev->dev.parent);
  453. CIO_TRACE_EVENT(2, "stl lock");
  454. CIO_TRACE_EVENT(2, cdev->dev.bus_id);
  455. buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
  456. if (!buf)
  457. return -ENOMEM;
  458. buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
  459. if (!buf2) {
  460. kfree(buf);
  461. return -ENOMEM;
  462. }
  463. spin_lock_irqsave(&sch->lock, flags);
  464. ret = cio_enable_subchannel(sch, 3);
  465. if (ret)
  466. goto out_unlock;
  467. /*
  468. * Setup ccw. We chain an unconditional reserve and a release so we
  469. * only break the lock.
  470. */
  471. cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
  472. cdev->private->iccws[0].cda = (__u32) __pa(buf);
  473. cdev->private->iccws[0].count = 32;
  474. cdev->private->iccws[0].flags = CCW_FLAG_CC;
  475. cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
  476. cdev->private->iccws[1].cda = (__u32) __pa(buf2);
  477. cdev->private->iccws[1].count = 32;
  478. cdev->private->iccws[1].flags = 0;
  479. ret = cio_start(sch, cdev->private->iccws, 0);
  480. if (ret) {
  481. cio_disable_subchannel(sch); //FIXME: return code?
  482. goto out_unlock;
  483. }
  484. cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
  485. spin_unlock_irqrestore(&sch->lock, flags);
  486. wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
  487. spin_lock_irqsave(&sch->lock, flags);
  488. cio_disable_subchannel(sch); //FIXME: return code?
  489. if ((cdev->private->irb.scsw.dstat !=
  490. (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
  491. (cdev->private->irb.scsw.cstat != 0))
  492. ret = -EIO;
  493. /* Clear irb. */
  494. memset(&cdev->private->irb, 0, sizeof(struct irb));
  495. out_unlock:
  496. if (buf)
  497. kfree(buf);
  498. if (buf2)
  499. kfree(buf2);
  500. spin_unlock_irqrestore(&sch->lock, flags);
  501. return ret;
  502. }
  503. void *
  504. ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
  505. {
  506. struct subchannel *sch;
  507. sch = to_subchannel(cdev->dev.parent);
  508. return chsc_get_chp_desc(sch, chp_no);
  509. }
  510. // FIXME: these have to go:
  511. int
  512. _ccw_device_get_subchannel_number(struct ccw_device *cdev)
  513. {
  514. return cdev->private->irq;
  515. }
  516. int
  517. _ccw_device_get_device_number(struct ccw_device *cdev)
  518. {
  519. return cdev->private->devno;
  520. }
  521. MODULE_LICENSE("GPL");
  522. EXPORT_SYMBOL(ccw_device_set_options);
  523. EXPORT_SYMBOL(ccw_device_clear);
  524. EXPORT_SYMBOL(ccw_device_halt);
  525. EXPORT_SYMBOL(ccw_device_resume);
  526. EXPORT_SYMBOL(ccw_device_start_timeout);
  527. EXPORT_SYMBOL(ccw_device_start);
  528. EXPORT_SYMBOL(ccw_device_start_timeout_key);
  529. EXPORT_SYMBOL(ccw_device_start_key);
  530. EXPORT_SYMBOL(ccw_device_get_ciw);
  531. EXPORT_SYMBOL(ccw_device_get_path_mask);
  532. EXPORT_SYMBOL(read_conf_data);
  533. EXPORT_SYMBOL(read_dev_chars);
  534. EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
  535. EXPORT_SYMBOL(_ccw_device_get_device_number);
  536. EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
  537. EXPORT_SYMBOL_GPL(read_conf_data_lpm);