device.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578
  1. /*
  2. * drivers/s390/cio/device.c
  3. * bus driver for ccw devices
  4. *
  5. * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
  6. * IBM Corporation
  7. * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  8. * Cornelia Huck (cornelia.huck@de.ibm.com)
  9. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/errno.h>
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/list.h>
  18. #include <linux/device.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/timer.h>
  21. #include <asm/ccwdev.h>
  22. #include <asm/cio.h>
  23. #include <asm/param.h> /* HZ */
  24. #include <asm/cmb.h>
  25. #include "cio.h"
  26. #include "cio_debug.h"
  27. #include "css.h"
  28. #include "device.h"
  29. #include "ioasm.h"
  30. #include "io_sch.h"
  31. static struct timer_list recovery_timer;
  32. static spinlock_t recovery_lock;
  33. static int recovery_phase;
  34. static const unsigned long recovery_delay[] = { 3, 30, 300 };
  35. /******************* bus type handling ***********************/
  36. /* The Linux driver model distinguishes between a bus type and
  37. * the bus itself. Of course we only have one channel
  38. * subsystem driver and one channel system per machine, but
  39. * we still use the abstraction. T.R. says it's a good idea. */
  40. static int
  41. ccw_bus_match (struct device * dev, struct device_driver * drv)
  42. {
  43. struct ccw_device *cdev = to_ccwdev(dev);
  44. struct ccw_driver *cdrv = to_ccwdrv(drv);
  45. const struct ccw_device_id *ids = cdrv->ids, *found;
  46. if (!ids)
  47. return 0;
  48. found = ccw_device_id_match(ids, &cdev->id);
  49. if (!found)
  50. return 0;
  51. cdev->id.driver_info = found->driver_info;
  52. return 1;
  53. }
  54. /* Store modalias string delimited by prefix/suffix string into buffer with
  55. * specified size. Return length of resulting string (excluding trailing '\0')
  56. * even if string doesn't fit buffer (snprintf semantics). */
  57. static int snprint_alias(char *buf, size_t size,
  58. struct ccw_device_id *id, const char *suffix)
  59. {
  60. int len;
  61. len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
  62. if (len > size)
  63. return len;
  64. buf += len;
  65. size -= len;
  66. if (id->dev_type != 0)
  67. len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
  68. id->dev_model, suffix);
  69. else
  70. len += snprintf(buf, size, "dtdm%s", suffix);
  71. return len;
  72. }
  73. /* Set up environment variables for ccw device uevent. Return 0 on success,
  74. * non-zero otherwise. */
  75. static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
  76. {
  77. struct ccw_device *cdev = to_ccwdev(dev);
  78. struct ccw_device_id *id = &(cdev->id);
  79. int ret;
  80. char modalias_buf[30];
  81. /* CU_TYPE= */
  82. ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
  83. if (ret)
  84. return ret;
  85. /* CU_MODEL= */
  86. ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
  87. if (ret)
  88. return ret;
  89. /* The next two can be zero, that's ok for us */
  90. /* DEV_TYPE= */
  91. ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
  92. if (ret)
  93. return ret;
  94. /* DEV_MODEL= */
  95. ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
  96. if (ret)
  97. return ret;
  98. /* MODALIAS= */
  99. snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
  100. ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
  101. return ret;
  102. }
  103. struct bus_type ccw_bus_type;
  104. static void io_subchannel_irq(struct subchannel *);
  105. static int io_subchannel_probe(struct subchannel *);
  106. static int io_subchannel_remove(struct subchannel *);
  107. static int io_subchannel_notify(struct subchannel *, int);
  108. static void io_subchannel_verify(struct subchannel *);
  109. static void io_subchannel_ioterm(struct subchannel *);
  110. static void io_subchannel_shutdown(struct subchannel *);
  111. static struct css_driver io_subchannel_driver = {
  112. .owner = THIS_MODULE,
  113. .subchannel_type = SUBCHANNEL_TYPE_IO,
  114. .name = "io_subchannel",
  115. .irq = io_subchannel_irq,
  116. .notify = io_subchannel_notify,
  117. .verify = io_subchannel_verify,
  118. .termination = io_subchannel_ioterm,
  119. .probe = io_subchannel_probe,
  120. .remove = io_subchannel_remove,
  121. .shutdown = io_subchannel_shutdown,
  122. };
  123. struct workqueue_struct *ccw_device_work;
  124. struct workqueue_struct *ccw_device_notify_work;
  125. wait_queue_head_t ccw_device_init_wq;
  126. atomic_t ccw_device_init_count;
  127. static void recovery_func(unsigned long data);
  128. static int __init
  129. init_ccw_bus_type (void)
  130. {
  131. int ret;
  132. init_waitqueue_head(&ccw_device_init_wq);
  133. atomic_set(&ccw_device_init_count, 0);
  134. setup_timer(&recovery_timer, recovery_func, 0);
  135. ccw_device_work = create_singlethread_workqueue("cio");
  136. if (!ccw_device_work)
  137. return -ENOMEM; /* FIXME: better errno ? */
  138. ccw_device_notify_work = create_singlethread_workqueue("cio_notify");
  139. if (!ccw_device_notify_work) {
  140. ret = -ENOMEM; /* FIXME: better errno ? */
  141. goto out_err;
  142. }
  143. slow_path_wq = create_singlethread_workqueue("kslowcrw");
  144. if (!slow_path_wq) {
  145. ret = -ENOMEM; /* FIXME: better errno ? */
  146. goto out_err;
  147. }
  148. if ((ret = bus_register (&ccw_bus_type)))
  149. goto out_err;
  150. ret = css_driver_register(&io_subchannel_driver);
  151. if (ret)
  152. goto out_err;
  153. wait_event(ccw_device_init_wq,
  154. atomic_read(&ccw_device_init_count) == 0);
  155. flush_workqueue(ccw_device_work);
  156. return 0;
  157. out_err:
  158. if (ccw_device_work)
  159. destroy_workqueue(ccw_device_work);
  160. if (ccw_device_notify_work)
  161. destroy_workqueue(ccw_device_notify_work);
  162. if (slow_path_wq)
  163. destroy_workqueue(slow_path_wq);
  164. return ret;
  165. }
  166. static void __exit
  167. cleanup_ccw_bus_type (void)
  168. {
  169. css_driver_unregister(&io_subchannel_driver);
  170. bus_unregister(&ccw_bus_type);
  171. destroy_workqueue(ccw_device_notify_work);
  172. destroy_workqueue(ccw_device_work);
  173. }
  174. subsys_initcall(init_ccw_bus_type);
  175. module_exit(cleanup_ccw_bus_type);
  176. /************************ device handling **************************/
  177. /*
  178. * A ccw_device has some interfaces in sysfs in addition to the
  179. * standard ones.
  180. * The following entries are designed to export the information which
  181. * resided in 2.4 in /proc/subchannels. Subchannel and device number
  182. * are obvious, so they don't have an entry :)
  183. * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
  184. */
  185. static ssize_t
  186. chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
  187. {
  188. struct subchannel *sch = to_subchannel(dev);
  189. struct chsc_ssd_info *ssd = &sch->ssd_info;
  190. ssize_t ret = 0;
  191. int chp;
  192. int mask;
  193. for (chp = 0; chp < 8; chp++) {
  194. mask = 0x80 >> chp;
  195. if (ssd->path_mask & mask)
  196. ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
  197. else
  198. ret += sprintf(buf + ret, "00 ");
  199. }
  200. ret += sprintf (buf+ret, "\n");
  201. return min((ssize_t)PAGE_SIZE, ret);
  202. }
  203. static ssize_t
  204. pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
  205. {
  206. struct subchannel *sch = to_subchannel(dev);
  207. struct pmcw *pmcw = &sch->schib.pmcw;
  208. return sprintf (buf, "%02x %02x %02x\n",
  209. pmcw->pim, pmcw->pam, pmcw->pom);
  210. }
  211. static ssize_t
  212. devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
  213. {
  214. struct ccw_device *cdev = to_ccwdev(dev);
  215. struct ccw_device_id *id = &(cdev->id);
  216. if (id->dev_type != 0)
  217. return sprintf(buf, "%04x/%02x\n",
  218. id->dev_type, id->dev_model);
  219. else
  220. return sprintf(buf, "n/a\n");
  221. }
  222. static ssize_t
  223. cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
  224. {
  225. struct ccw_device *cdev = to_ccwdev(dev);
  226. struct ccw_device_id *id = &(cdev->id);
  227. return sprintf(buf, "%04x/%02x\n",
  228. id->cu_type, id->cu_model);
  229. }
  230. static ssize_t
  231. modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
  232. {
  233. struct ccw_device *cdev = to_ccwdev(dev);
  234. struct ccw_device_id *id = &(cdev->id);
  235. int len;
  236. len = snprint_alias(buf, PAGE_SIZE, id, "\n");
  237. return len > PAGE_SIZE ? PAGE_SIZE : len;
  238. }
  239. static ssize_t
  240. online_show (struct device *dev, struct device_attribute *attr, char *buf)
  241. {
  242. struct ccw_device *cdev = to_ccwdev(dev);
  243. return sprintf(buf, cdev->online ? "1\n" : "0\n");
  244. }
  245. int ccw_device_is_orphan(struct ccw_device *cdev)
  246. {
  247. return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
  248. }
  249. static void ccw_device_unregister(struct ccw_device *cdev)
  250. {
  251. if (test_and_clear_bit(1, &cdev->private->registered))
  252. device_del(&cdev->dev);
  253. }
  254. static void ccw_device_remove_orphan_cb(struct device *dev)
  255. {
  256. struct ccw_device *cdev = to_ccwdev(dev);
  257. ccw_device_unregister(cdev);
  258. put_device(&cdev->dev);
  259. }
  260. static void ccw_device_remove_sch_cb(struct device *dev)
  261. {
  262. struct subchannel *sch;
  263. sch = to_subchannel(dev);
  264. css_sch_device_unregister(sch);
  265. /* Reset intparm to zeroes. */
  266. sch->schib.pmcw.intparm = 0;
  267. cio_modify(sch);
  268. put_device(&sch->dev);
  269. }
  270. static void
  271. ccw_device_remove_disconnected(struct ccw_device *cdev)
  272. {
  273. unsigned long flags;
  274. int rc;
  275. /*
  276. * Forced offline in disconnected state means
  277. * 'throw away device'.
  278. */
  279. if (ccw_device_is_orphan(cdev)) {
  280. /*
  281. * Deregister ccw device.
  282. * Unfortunately, we cannot do this directly from the
  283. * attribute method.
  284. */
  285. spin_lock_irqsave(cdev->ccwlock, flags);
  286. cdev->private->state = DEV_STATE_NOT_OPER;
  287. spin_unlock_irqrestore(cdev->ccwlock, flags);
  288. rc = device_schedule_callback(&cdev->dev,
  289. ccw_device_remove_orphan_cb);
  290. if (rc)
  291. CIO_MSG_EVENT(2, "Couldn't unregister orphan "
  292. "0.%x.%04x\n",
  293. cdev->private->dev_id.ssid,
  294. cdev->private->dev_id.devno);
  295. return;
  296. }
  297. /* Deregister subchannel, which will kill the ccw device. */
  298. rc = device_schedule_callback(cdev->dev.parent,
  299. ccw_device_remove_sch_cb);
  300. if (rc)
  301. CIO_MSG_EVENT(2, "Couldn't unregister disconnected device "
  302. "0.%x.%04x\n",
  303. cdev->private->dev_id.ssid,
  304. cdev->private->dev_id.devno);
  305. }
  306. /**
  307. * ccw_device_set_offline() - disable a ccw device for I/O
  308. * @cdev: target ccw device
  309. *
  310. * This function calls the driver's set_offline() function for @cdev, if
  311. * given, and then disables @cdev.
  312. * Returns:
  313. * %0 on success and a negative error value on failure.
  314. * Context:
  315. * enabled, ccw device lock not held
  316. */
  317. int ccw_device_set_offline(struct ccw_device *cdev)
  318. {
  319. int ret;
  320. if (!cdev)
  321. return -ENODEV;
  322. if (!cdev->online || !cdev->drv)
  323. return -EINVAL;
  324. if (cdev->drv->set_offline) {
  325. ret = cdev->drv->set_offline(cdev);
  326. if (ret != 0)
  327. return ret;
  328. }
  329. cdev->online = 0;
  330. spin_lock_irq(cdev->ccwlock);
  331. ret = ccw_device_offline(cdev);
  332. if (ret == -ENODEV) {
  333. if (cdev->private->state != DEV_STATE_NOT_OPER) {
  334. cdev->private->state = DEV_STATE_OFFLINE;
  335. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  336. }
  337. spin_unlock_irq(cdev->ccwlock);
  338. return ret;
  339. }
  340. spin_unlock_irq(cdev->ccwlock);
  341. if (ret == 0)
  342. wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
  343. else {
  344. CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
  345. "device 0.%x.%04x\n",
  346. ret, cdev->private->dev_id.ssid,
  347. cdev->private->dev_id.devno);
  348. cdev->online = 1;
  349. }
  350. return ret;
  351. }
  352. /**
  353. * ccw_device_set_online() - enable a ccw device for I/O
  354. * @cdev: target ccw device
  355. *
  356. * This function first enables @cdev and then calls the driver's set_online()
  357. * function for @cdev, if given. If set_online() returns an error, @cdev is
  358. * disabled again.
  359. * Returns:
  360. * %0 on success and a negative error value on failure.
  361. * Context:
  362. * enabled, ccw device lock not held
  363. */
  364. int ccw_device_set_online(struct ccw_device *cdev)
  365. {
  366. int ret;
  367. if (!cdev)
  368. return -ENODEV;
  369. if (cdev->online || !cdev->drv)
  370. return -EINVAL;
  371. spin_lock_irq(cdev->ccwlock);
  372. ret = ccw_device_online(cdev);
  373. spin_unlock_irq(cdev->ccwlock);
  374. if (ret == 0)
  375. wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
  376. else {
  377. CIO_MSG_EVENT(2, "ccw_device_online returned %d, "
  378. "device 0.%x.%04x\n",
  379. ret, cdev->private->dev_id.ssid,
  380. cdev->private->dev_id.devno);
  381. return ret;
  382. }
  383. if (cdev->private->state != DEV_STATE_ONLINE)
  384. return -ENODEV;
  385. if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
  386. cdev->online = 1;
  387. return 0;
  388. }
  389. spin_lock_irq(cdev->ccwlock);
  390. ret = ccw_device_offline(cdev);
  391. spin_unlock_irq(cdev->ccwlock);
  392. if (ret == 0)
  393. wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
  394. else
  395. CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
  396. "device 0.%x.%04x\n",
  397. ret, cdev->private->dev_id.ssid,
  398. cdev->private->dev_id.devno);
  399. return (ret == 0) ? -ENODEV : ret;
  400. }
  401. static void online_store_handle_offline(struct ccw_device *cdev)
  402. {
  403. if (cdev->private->state == DEV_STATE_DISCONNECTED)
  404. ccw_device_remove_disconnected(cdev);
  405. else if (cdev->drv && cdev->drv->set_offline)
  406. ccw_device_set_offline(cdev);
  407. }
  408. static int online_store_recog_and_online(struct ccw_device *cdev)
  409. {
  410. int ret;
  411. /* Do device recognition, if needed. */
  412. if (cdev->id.cu_type == 0) {
  413. ret = ccw_device_recognition(cdev);
  414. if (ret) {
  415. CIO_MSG_EVENT(0, "Couldn't start recognition "
  416. "for device 0.%x.%04x (ret=%d)\n",
  417. cdev->private->dev_id.ssid,
  418. cdev->private->dev_id.devno, ret);
  419. return ret;
  420. }
  421. wait_event(cdev->private->wait_q,
  422. cdev->private->flags.recog_done);
  423. }
  424. if (cdev->drv && cdev->drv->set_online)
  425. ccw_device_set_online(cdev);
  426. return 0;
  427. }
  428. static void online_store_handle_online(struct ccw_device *cdev, int force)
  429. {
  430. int ret;
  431. ret = online_store_recog_and_online(cdev);
  432. if (ret)
  433. return;
  434. if (force && cdev->private->state == DEV_STATE_BOXED) {
  435. ret = ccw_device_stlck(cdev);
  436. if (ret) {
  437. dev_warn(&cdev->dev,
  438. "ccw_device_stlck returned %d!\n", ret);
  439. return;
  440. }
  441. if (cdev->id.cu_type == 0)
  442. cdev->private->state = DEV_STATE_NOT_OPER;
  443. online_store_recog_and_online(cdev);
  444. }
  445. }
  446. static ssize_t online_store (struct device *dev, struct device_attribute *attr,
  447. const char *buf, size_t count)
  448. {
  449. struct ccw_device *cdev = to_ccwdev(dev);
  450. int i, force;
  451. char *tmp;
  452. if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
  453. return -EAGAIN;
  454. if (cdev->drv && !try_module_get(cdev->drv->owner)) {
  455. atomic_set(&cdev->private->onoff, 0);
  456. return -EINVAL;
  457. }
  458. if (!strncmp(buf, "force\n", count)) {
  459. force = 1;
  460. i = 1;
  461. } else {
  462. force = 0;
  463. i = simple_strtoul(buf, &tmp, 16);
  464. }
  465. switch (i) {
  466. case 0:
  467. online_store_handle_offline(cdev);
  468. break;
  469. case 1:
  470. online_store_handle_online(cdev, force);
  471. break;
  472. default:
  473. count = -EINVAL;
  474. }
  475. if (cdev->drv)
  476. module_put(cdev->drv->owner);
  477. atomic_set(&cdev->private->onoff, 0);
  478. return count;
  479. }
  480. static ssize_t
  481. available_show (struct device *dev, struct device_attribute *attr, char *buf)
  482. {
  483. struct ccw_device *cdev = to_ccwdev(dev);
  484. struct subchannel *sch;
  485. if (ccw_device_is_orphan(cdev))
  486. return sprintf(buf, "no device\n");
  487. switch (cdev->private->state) {
  488. case DEV_STATE_BOXED:
  489. return sprintf(buf, "boxed\n");
  490. case DEV_STATE_DISCONNECTED:
  491. case DEV_STATE_DISCONNECTED_SENSE_ID:
  492. case DEV_STATE_NOT_OPER:
  493. sch = to_subchannel(dev->parent);
  494. if (!sch->lpm)
  495. return sprintf(buf, "no path\n");
  496. else
  497. return sprintf(buf, "no device\n");
  498. default:
  499. /* All other states considered fine. */
  500. return sprintf(buf, "good\n");
  501. }
  502. }
  503. static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
  504. static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
  505. static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
  506. static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
  507. static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
  508. static DEVICE_ATTR(online, 0644, online_show, online_store);
  509. extern struct device_attribute dev_attr_cmb_enable;
  510. static DEVICE_ATTR(availability, 0444, available_show, NULL);
  511. static struct attribute * subch_attrs[] = {
  512. &dev_attr_chpids.attr,
  513. &dev_attr_pimpampom.attr,
  514. NULL,
  515. };
  516. static struct attribute_group subch_attr_group = {
  517. .attrs = subch_attrs,
  518. };
  519. struct attribute_group *subch_attr_groups[] = {
  520. &subch_attr_group,
  521. NULL,
  522. };
  523. static struct attribute * ccwdev_attrs[] = {
  524. &dev_attr_devtype.attr,
  525. &dev_attr_cutype.attr,
  526. &dev_attr_modalias.attr,
  527. &dev_attr_online.attr,
  528. &dev_attr_cmb_enable.attr,
  529. &dev_attr_availability.attr,
  530. NULL,
  531. };
  532. static struct attribute_group ccwdev_attr_group = {
  533. .attrs = ccwdev_attrs,
  534. };
  535. static struct attribute_group *ccwdev_attr_groups[] = {
  536. &ccwdev_attr_group,
  537. NULL,
  538. };
  539. /* this is a simple abstraction for device_register that sets the
  540. * correct bus type and adds the bus specific files */
  541. static int ccw_device_register(struct ccw_device *cdev)
  542. {
  543. struct device *dev = &cdev->dev;
  544. int ret;
  545. dev->bus = &ccw_bus_type;
  546. if ((ret = device_add(dev)))
  547. return ret;
  548. set_bit(1, &cdev->private->registered);
  549. return ret;
  550. }
  551. struct match_data {
  552. struct ccw_dev_id dev_id;
  553. struct ccw_device * sibling;
  554. };
  555. static int
  556. match_devno(struct device * dev, void * data)
  557. {
  558. struct match_data * d = data;
  559. struct ccw_device * cdev;
  560. cdev = to_ccwdev(dev);
  561. if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
  562. !ccw_device_is_orphan(cdev) &&
  563. ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
  564. (cdev != d->sibling))
  565. return 1;
  566. return 0;
  567. }
  568. static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
  569. struct ccw_device *sibling)
  570. {
  571. struct device *dev;
  572. struct match_data data;
  573. data.dev_id = *dev_id;
  574. data.sibling = sibling;
  575. dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
  576. return dev ? to_ccwdev(dev) : NULL;
  577. }
  578. static int match_orphan(struct device *dev, void *data)
  579. {
  580. struct ccw_dev_id *dev_id;
  581. struct ccw_device *cdev;
  582. dev_id = data;
  583. cdev = to_ccwdev(dev);
  584. return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
  585. }
  586. static struct ccw_device *
  587. get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
  588. struct ccw_dev_id *dev_id)
  589. {
  590. struct device *dev;
  591. dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
  592. match_orphan);
  593. return dev ? to_ccwdev(dev) : NULL;
  594. }
  595. static void
  596. ccw_device_add_changed(struct work_struct *work)
  597. {
  598. struct ccw_device_private *priv;
  599. struct ccw_device *cdev;
  600. priv = container_of(work, struct ccw_device_private, kick_work);
  601. cdev = priv->cdev;
  602. if (device_add(&cdev->dev)) {
  603. put_device(&cdev->dev);
  604. return;
  605. }
  606. set_bit(1, &cdev->private->registered);
  607. }
  608. void ccw_device_do_unreg_rereg(struct work_struct *work)
  609. {
  610. struct ccw_device_private *priv;
  611. struct ccw_device *cdev;
  612. struct subchannel *sch;
  613. priv = container_of(work, struct ccw_device_private, kick_work);
  614. cdev = priv->cdev;
  615. sch = to_subchannel(cdev->dev.parent);
  616. ccw_device_unregister(cdev);
  617. PREPARE_WORK(&cdev->private->kick_work,
  618. ccw_device_add_changed);
  619. queue_work(ccw_device_work, &cdev->private->kick_work);
  620. }
  621. static void
  622. ccw_device_release(struct device *dev)
  623. {
  624. struct ccw_device *cdev;
  625. cdev = to_ccwdev(dev);
  626. kfree(cdev->private);
  627. kfree(cdev);
  628. }
  629. static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
  630. {
  631. struct ccw_device *cdev;
  632. cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
  633. if (cdev) {
  634. cdev->private = kzalloc(sizeof(struct ccw_device_private),
  635. GFP_KERNEL | GFP_DMA);
  636. if (cdev->private)
  637. return cdev;
  638. }
  639. kfree(cdev);
  640. return ERR_PTR(-ENOMEM);
  641. }
  642. static int io_subchannel_initialize_dev(struct subchannel *sch,
  643. struct ccw_device *cdev)
  644. {
  645. cdev->private->cdev = cdev;
  646. atomic_set(&cdev->private->onoff, 0);
  647. cdev->dev.parent = &sch->dev;
  648. cdev->dev.release = ccw_device_release;
  649. INIT_WORK(&cdev->private->kick_work, NULL);
  650. cdev->dev.groups = ccwdev_attr_groups;
  651. /* Do first half of device_register. */
  652. device_initialize(&cdev->dev);
  653. if (!get_device(&sch->dev)) {
  654. if (cdev->dev.release)
  655. cdev->dev.release(&cdev->dev);
  656. return -ENODEV;
  657. }
  658. return 0;
  659. }
  660. static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
  661. {
  662. struct ccw_device *cdev;
  663. int ret;
  664. cdev = io_subchannel_allocate_dev(sch);
  665. if (!IS_ERR(cdev)) {
  666. ret = io_subchannel_initialize_dev(sch, cdev);
  667. if (ret) {
  668. kfree(cdev);
  669. cdev = ERR_PTR(ret);
  670. }
  671. }
  672. return cdev;
  673. }
  674. static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
  675. static void sch_attach_device(struct subchannel *sch,
  676. struct ccw_device *cdev)
  677. {
  678. css_update_ssd_info(sch);
  679. spin_lock_irq(sch->lock);
  680. sch_set_cdev(sch, cdev);
  681. cdev->private->schid = sch->schid;
  682. cdev->ccwlock = sch->lock;
  683. device_trigger_reprobe(sch);
  684. spin_unlock_irq(sch->lock);
  685. }
  686. static void sch_attach_disconnected_device(struct subchannel *sch,
  687. struct ccw_device *cdev)
  688. {
  689. struct subchannel *other_sch;
  690. int ret;
  691. other_sch = to_subchannel(get_device(cdev->dev.parent));
  692. ret = device_move(&cdev->dev, &sch->dev);
  693. if (ret) {
  694. CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed "
  695. "(ret=%d)!\n", cdev->private->dev_id.ssid,
  696. cdev->private->dev_id.devno, ret);
  697. put_device(&other_sch->dev);
  698. return;
  699. }
  700. sch_set_cdev(other_sch, NULL);
  701. /* No need to keep a subchannel without ccw device around. */
  702. css_sch_device_unregister(other_sch);
  703. put_device(&other_sch->dev);
  704. sch_attach_device(sch, cdev);
  705. }
  706. static void sch_attach_orphaned_device(struct subchannel *sch,
  707. struct ccw_device *cdev)
  708. {
  709. int ret;
  710. /* Try to move the ccw device to its new subchannel. */
  711. ret = device_move(&cdev->dev, &sch->dev);
  712. if (ret) {
  713. CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
  714. "failed (ret=%d)!\n",
  715. cdev->private->dev_id.ssid,
  716. cdev->private->dev_id.devno, ret);
  717. return;
  718. }
  719. sch_attach_device(sch, cdev);
  720. }
  721. static void sch_create_and_recog_new_device(struct subchannel *sch)
  722. {
  723. struct ccw_device *cdev;
  724. /* Need to allocate a new ccw device. */
  725. cdev = io_subchannel_create_ccwdev(sch);
  726. if (IS_ERR(cdev)) {
  727. /* OK, we did everything we could... */
  728. css_sch_device_unregister(sch);
  729. return;
  730. }
  731. spin_lock_irq(sch->lock);
  732. sch_set_cdev(sch, cdev);
  733. spin_unlock_irq(sch->lock);
  734. /* Start recognition for the new ccw device. */
  735. if (io_subchannel_recog(cdev, sch)) {
  736. spin_lock_irq(sch->lock);
  737. sch_set_cdev(sch, NULL);
  738. spin_unlock_irq(sch->lock);
  739. if (cdev->dev.release)
  740. cdev->dev.release(&cdev->dev);
  741. css_sch_device_unregister(sch);
  742. }
  743. }
  744. void ccw_device_move_to_orphanage(struct work_struct *work)
  745. {
  746. struct ccw_device_private *priv;
  747. struct ccw_device *cdev;
  748. struct ccw_device *replacing_cdev;
  749. struct subchannel *sch;
  750. int ret;
  751. struct channel_subsystem *css;
  752. struct ccw_dev_id dev_id;
  753. priv = container_of(work, struct ccw_device_private, kick_work);
  754. cdev = priv->cdev;
  755. sch = to_subchannel(cdev->dev.parent);
  756. css = to_css(sch->dev.parent);
  757. dev_id.devno = sch->schib.pmcw.dev;
  758. dev_id.ssid = sch->schid.ssid;
  759. /*
  760. * Move the orphaned ccw device to the orphanage so the replacing
  761. * ccw device can take its place on the subchannel.
  762. */
  763. ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
  764. if (ret) {
  765. CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
  766. "(ret=%d)!\n", cdev->private->dev_id.ssid,
  767. cdev->private->dev_id.devno, ret);
  768. return;
  769. }
  770. cdev->ccwlock = css->pseudo_subchannel->lock;
  771. /*
  772. * Search for the replacing ccw device
  773. * - among the disconnected devices
  774. * - in the orphanage
  775. */
  776. replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
  777. if (replacing_cdev) {
  778. sch_attach_disconnected_device(sch, replacing_cdev);
  779. return;
  780. }
  781. replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
  782. if (replacing_cdev) {
  783. sch_attach_orphaned_device(sch, replacing_cdev);
  784. return;
  785. }
  786. sch_create_and_recog_new_device(sch);
  787. }
  788. /*
  789. * Register recognized device.
  790. */
  791. static void
  792. io_subchannel_register(struct work_struct *work)
  793. {
  794. struct ccw_device_private *priv;
  795. struct ccw_device *cdev;
  796. struct subchannel *sch;
  797. int ret;
  798. unsigned long flags;
  799. priv = container_of(work, struct ccw_device_private, kick_work);
  800. cdev = priv->cdev;
  801. sch = to_subchannel(cdev->dev.parent);
  802. css_update_ssd_info(sch);
  803. /*
  804. * io_subchannel_register() will also be called after device
  805. * recognition has been done for a boxed device (which will already
  806. * be registered). We need to reprobe since we may now have sense id
  807. * information.
  808. */
  809. if (klist_node_attached(&cdev->dev.knode_parent)) {
  810. if (!cdev->drv) {
  811. ret = device_reprobe(&cdev->dev);
  812. if (ret)
  813. /* We can't do much here. */
  814. CIO_MSG_EVENT(2, "device_reprobe() returned"
  815. " %d for 0.%x.%04x\n", ret,
  816. cdev->private->dev_id.ssid,
  817. cdev->private->dev_id.devno);
  818. }
  819. goto out;
  820. }
  821. /*
  822. * Now we know this subchannel will stay, we can throw
  823. * our delayed uevent.
  824. */
  825. sch->dev.uevent_suppress = 0;
  826. kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
  827. /* make it known to the system */
  828. ret = ccw_device_register(cdev);
  829. if (ret) {
  830. CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
  831. cdev->private->dev_id.ssid,
  832. cdev->private->dev_id.devno, ret);
  833. put_device(&cdev->dev);
  834. spin_lock_irqsave(sch->lock, flags);
  835. sch_set_cdev(sch, NULL);
  836. spin_unlock_irqrestore(sch->lock, flags);
  837. kfree (cdev->private);
  838. kfree (cdev);
  839. put_device(&sch->dev);
  840. if (atomic_dec_and_test(&ccw_device_init_count))
  841. wake_up(&ccw_device_init_wq);
  842. return;
  843. }
  844. put_device(&cdev->dev);
  845. out:
  846. cdev->private->flags.recog_done = 1;
  847. put_device(&sch->dev);
  848. wake_up(&cdev->private->wait_q);
  849. if (atomic_dec_and_test(&ccw_device_init_count))
  850. wake_up(&ccw_device_init_wq);
  851. }
  852. static void ccw_device_call_sch_unregister(struct work_struct *work)
  853. {
  854. struct ccw_device_private *priv;
  855. struct ccw_device *cdev;
  856. struct subchannel *sch;
  857. priv = container_of(work, struct ccw_device_private, kick_work);
  858. cdev = priv->cdev;
  859. sch = to_subchannel(cdev->dev.parent);
  860. css_sch_device_unregister(sch);
  861. /* Reset intparm to zeroes. */
  862. sch->schib.pmcw.intparm = 0;
  863. cio_modify(sch);
  864. put_device(&cdev->dev);
  865. put_device(&sch->dev);
  866. }
  867. /*
  868. * subchannel recognition done. Called from the state machine.
  869. */
  870. void
  871. io_subchannel_recog_done(struct ccw_device *cdev)
  872. {
  873. struct subchannel *sch;
  874. if (css_init_done == 0) {
  875. cdev->private->flags.recog_done = 1;
  876. return;
  877. }
  878. switch (cdev->private->state) {
  879. case DEV_STATE_NOT_OPER:
  880. cdev->private->flags.recog_done = 1;
  881. /* Remove device found not operational. */
  882. if (!get_device(&cdev->dev))
  883. break;
  884. sch = to_subchannel(cdev->dev.parent);
  885. PREPARE_WORK(&cdev->private->kick_work,
  886. ccw_device_call_sch_unregister);
  887. queue_work(slow_path_wq, &cdev->private->kick_work);
  888. if (atomic_dec_and_test(&ccw_device_init_count))
  889. wake_up(&ccw_device_init_wq);
  890. break;
  891. case DEV_STATE_BOXED:
  892. /* Device did not respond in time. */
  893. case DEV_STATE_OFFLINE:
  894. /*
  895. * We can't register the device in interrupt context so
  896. * we schedule a work item.
  897. */
  898. if (!get_device(&cdev->dev))
  899. break;
  900. PREPARE_WORK(&cdev->private->kick_work,
  901. io_subchannel_register);
  902. queue_work(slow_path_wq, &cdev->private->kick_work);
  903. break;
  904. }
  905. }
  906. static int
  907. io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
  908. {
  909. int rc;
  910. struct ccw_device_private *priv;
  911. sch_set_cdev(sch, cdev);
  912. sch->driver = &io_subchannel_driver;
  913. cdev->ccwlock = sch->lock;
  914. /* Init private data. */
  915. priv = cdev->private;
  916. priv->dev_id.devno = sch->schib.pmcw.dev;
  917. priv->dev_id.ssid = sch->schid.ssid;
  918. priv->schid = sch->schid;
  919. priv->state = DEV_STATE_NOT_OPER;
  920. INIT_LIST_HEAD(&priv->cmb_list);
  921. init_waitqueue_head(&priv->wait_q);
  922. init_timer(&priv->timer);
  923. /* Set an initial name for the device. */
  924. snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
  925. sch->schid.ssid, sch->schib.pmcw.dev);
  926. /* Increase counter of devices currently in recognition. */
  927. atomic_inc(&ccw_device_init_count);
  928. /* Start async. device sensing. */
  929. spin_lock_irq(sch->lock);
  930. rc = ccw_device_recognition(cdev);
  931. spin_unlock_irq(sch->lock);
  932. if (rc) {
  933. if (atomic_dec_and_test(&ccw_device_init_count))
  934. wake_up(&ccw_device_init_wq);
  935. }
  936. return rc;
  937. }
  938. static void ccw_device_move_to_sch(struct work_struct *work)
  939. {
  940. struct ccw_device_private *priv;
  941. int rc;
  942. struct subchannel *sch;
  943. struct ccw_device *cdev;
  944. struct subchannel *former_parent;
  945. priv = container_of(work, struct ccw_device_private, kick_work);
  946. sch = priv->sch;
  947. cdev = priv->cdev;
  948. former_parent = ccw_device_is_orphan(cdev) ?
  949. NULL : to_subchannel(get_device(cdev->dev.parent));
  950. mutex_lock(&sch->reg_mutex);
  951. /* Try to move the ccw device to its new subchannel. */
  952. rc = device_move(&cdev->dev, &sch->dev);
  953. mutex_unlock(&sch->reg_mutex);
  954. if (rc) {
  955. CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel "
  956. "0.%x.%04x failed (ret=%d)!\n",
  957. cdev->private->dev_id.ssid,
  958. cdev->private->dev_id.devno, sch->schid.ssid,
  959. sch->schid.sch_no, rc);
  960. css_sch_device_unregister(sch);
  961. goto out;
  962. }
  963. if (former_parent) {
  964. spin_lock_irq(former_parent->lock);
  965. sch_set_cdev(former_parent, NULL);
  966. spin_unlock_irq(former_parent->lock);
  967. css_sch_device_unregister(former_parent);
  968. /* Reset intparm to zeroes. */
  969. former_parent->schib.pmcw.intparm = 0;
  970. cio_modify(former_parent);
  971. }
  972. sch_attach_device(sch, cdev);
  973. out:
  974. if (former_parent)
  975. put_device(&former_parent->dev);
  976. put_device(&cdev->dev);
  977. }
  978. static void io_subchannel_irq(struct subchannel *sch)
  979. {
  980. struct ccw_device *cdev;
  981. cdev = sch_get_cdev(sch);
  982. CIO_TRACE_EVENT(3, "IRQ");
  983. CIO_TRACE_EVENT(3, sch->dev.bus_id);
  984. if (cdev)
  985. dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
  986. }
  987. static int
  988. io_subchannel_probe (struct subchannel *sch)
  989. {
  990. struct ccw_device *cdev;
  991. int rc;
  992. unsigned long flags;
  993. struct ccw_dev_id dev_id;
  994. cdev = sch_get_cdev(sch);
  995. if (cdev) {
  996. /*
  997. * This subchannel already has an associated ccw_device.
  998. * Register it and exit. This happens for all early
  999. * device, e.g. the console.
  1000. */
  1001. cdev->dev.groups = ccwdev_attr_groups;
  1002. device_initialize(&cdev->dev);
  1003. ccw_device_register(cdev);
  1004. /*
  1005. * Check if the device is already online. If it is
  1006. * the reference count needs to be corrected
  1007. * (see ccw_device_online and css_init_done for the
  1008. * ugly details).
  1009. */
  1010. if (cdev->private->state != DEV_STATE_NOT_OPER &&
  1011. cdev->private->state != DEV_STATE_OFFLINE &&
  1012. cdev->private->state != DEV_STATE_BOXED)
  1013. get_device(&cdev->dev);
  1014. return 0;
  1015. }
  1016. /*
  1017. * First check if a fitting device may be found amongst the
  1018. * disconnected devices or in the orphanage.
  1019. */
  1020. dev_id.devno = sch->schib.pmcw.dev;
  1021. dev_id.ssid = sch->schid.ssid;
  1022. /* Allocate I/O subchannel private data. */
  1023. sch->private = kzalloc(sizeof(struct io_subchannel_private),
  1024. GFP_KERNEL | GFP_DMA);
  1025. if (!sch->private)
  1026. return -ENOMEM;
  1027. cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
  1028. if (!cdev)
  1029. cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
  1030. &dev_id);
  1031. if (cdev) {
  1032. /*
  1033. * Schedule moving the device until when we have a registered
  1034. * subchannel to move to and succeed the probe. We can
  1035. * unregister later again, when the probe is through.
  1036. */
  1037. cdev->private->sch = sch;
  1038. PREPARE_WORK(&cdev->private->kick_work,
  1039. ccw_device_move_to_sch);
  1040. queue_work(slow_path_wq, &cdev->private->kick_work);
  1041. return 0;
  1042. }
  1043. cdev = io_subchannel_create_ccwdev(sch);
  1044. if (IS_ERR(cdev)) {
  1045. kfree(sch->private);
  1046. return PTR_ERR(cdev);
  1047. }
  1048. rc = io_subchannel_recog(cdev, sch);
  1049. if (rc) {
  1050. spin_lock_irqsave(sch->lock, flags);
  1051. sch_set_cdev(sch, NULL);
  1052. spin_unlock_irqrestore(sch->lock, flags);
  1053. if (cdev->dev.release)
  1054. cdev->dev.release(&cdev->dev);
  1055. kfree(sch->private);
  1056. }
  1057. return rc;
  1058. }
  1059. static int
  1060. io_subchannel_remove (struct subchannel *sch)
  1061. {
  1062. struct ccw_device *cdev;
  1063. unsigned long flags;
  1064. cdev = sch_get_cdev(sch);
  1065. if (!cdev)
  1066. return 0;
  1067. /* Set ccw device to not operational and drop reference. */
  1068. spin_lock_irqsave(cdev->ccwlock, flags);
  1069. sch_set_cdev(sch, NULL);
  1070. cdev->private->state = DEV_STATE_NOT_OPER;
  1071. spin_unlock_irqrestore(cdev->ccwlock, flags);
  1072. ccw_device_unregister(cdev);
  1073. put_device(&cdev->dev);
  1074. kfree(sch->private);
  1075. return 0;
  1076. }
  1077. static int io_subchannel_notify(struct subchannel *sch, int event)
  1078. {
  1079. struct ccw_device *cdev;
  1080. cdev = sch_get_cdev(sch);
  1081. if (!cdev)
  1082. return 0;
  1083. if (!cdev->drv)
  1084. return 0;
  1085. if (!cdev->online)
  1086. return 0;
  1087. return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
  1088. }
  1089. static void io_subchannel_verify(struct subchannel *sch)
  1090. {
  1091. struct ccw_device *cdev;
  1092. cdev = sch_get_cdev(sch);
  1093. if (cdev)
  1094. dev_fsm_event(cdev, DEV_EVENT_VERIFY);
  1095. }
  1096. static void io_subchannel_ioterm(struct subchannel *sch)
  1097. {
  1098. struct ccw_device *cdev;
  1099. cdev = sch_get_cdev(sch);
  1100. if (!cdev)
  1101. return;
  1102. /* Internal I/O will be retried by the interrupt handler. */
  1103. if (cdev->private->flags.intretry)
  1104. return;
  1105. cdev->private->state = DEV_STATE_CLEAR_VERIFY;
  1106. if (cdev->handler)
  1107. cdev->handler(cdev, cdev->private->intparm,
  1108. ERR_PTR(-EIO));
  1109. }
  1110. static void
  1111. io_subchannel_shutdown(struct subchannel *sch)
  1112. {
  1113. struct ccw_device *cdev;
  1114. int ret;
  1115. cdev = sch_get_cdev(sch);
  1116. if (cio_is_console(sch->schid))
  1117. return;
  1118. if (!sch->schib.pmcw.ena)
  1119. /* Nothing to do. */
  1120. return;
  1121. ret = cio_disable_subchannel(sch);
  1122. if (ret != -EBUSY)
  1123. /* Subchannel is disabled, we're done. */
  1124. return;
  1125. cdev->private->state = DEV_STATE_QUIESCE;
  1126. if (cdev->handler)
  1127. cdev->handler(cdev, cdev->private->intparm,
  1128. ERR_PTR(-EIO));
  1129. ret = ccw_device_cancel_halt_clear(cdev);
  1130. if (ret == -EBUSY) {
  1131. ccw_device_set_timeout(cdev, HZ/10);
  1132. wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
  1133. }
  1134. cio_disable_subchannel(sch);
  1135. }
  1136. #ifdef CONFIG_CCW_CONSOLE
  1137. static struct ccw_device console_cdev;
  1138. static struct ccw_device_private console_private;
  1139. static int console_cdev_in_use;
  1140. static DEFINE_SPINLOCK(ccw_console_lock);
  1141. spinlock_t * cio_get_console_lock(void)
  1142. {
  1143. return &ccw_console_lock;
  1144. }
  1145. static int
  1146. ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
  1147. {
  1148. int rc;
  1149. /* Attach subchannel private data. */
  1150. sch->private = cio_get_console_priv();
  1151. memset(sch->private, 0, sizeof(struct io_subchannel_private));
  1152. /* Initialize the ccw_device structure. */
  1153. cdev->dev.parent= &sch->dev;
  1154. rc = io_subchannel_recog(cdev, sch);
  1155. if (rc)
  1156. return rc;
  1157. /* Now wait for the async. recognition to come to an end. */
  1158. spin_lock_irq(cdev->ccwlock);
  1159. while (!dev_fsm_final_state(cdev))
  1160. wait_cons_dev();
  1161. rc = -EIO;
  1162. if (cdev->private->state != DEV_STATE_OFFLINE)
  1163. goto out_unlock;
  1164. ccw_device_online(cdev);
  1165. while (!dev_fsm_final_state(cdev))
  1166. wait_cons_dev();
  1167. if (cdev->private->state != DEV_STATE_ONLINE)
  1168. goto out_unlock;
  1169. rc = 0;
  1170. out_unlock:
  1171. spin_unlock_irq(cdev->ccwlock);
  1172. return 0;
  1173. }
  1174. struct ccw_device *
  1175. ccw_device_probe_console(void)
  1176. {
  1177. struct subchannel *sch;
  1178. int ret;
  1179. if (xchg(&console_cdev_in_use, 1) != 0)
  1180. return ERR_PTR(-EBUSY);
  1181. sch = cio_probe_console();
  1182. if (IS_ERR(sch)) {
  1183. console_cdev_in_use = 0;
  1184. return (void *) sch;
  1185. }
  1186. memset(&console_cdev, 0, sizeof(struct ccw_device));
  1187. memset(&console_private, 0, sizeof(struct ccw_device_private));
  1188. console_cdev.private = &console_private;
  1189. console_private.cdev = &console_cdev;
  1190. ret = ccw_device_console_enable(&console_cdev, sch);
  1191. if (ret) {
  1192. cio_release_console();
  1193. console_cdev_in_use = 0;
  1194. return ERR_PTR(ret);
  1195. }
  1196. console_cdev.online = 1;
  1197. return &console_cdev;
  1198. }
  1199. #endif
  1200. /*
  1201. * get ccw_device matching the busid, but only if owned by cdrv
  1202. */
  1203. static int
  1204. __ccwdev_check_busid(struct device *dev, void *id)
  1205. {
  1206. char *bus_id;
  1207. bus_id = id;
  1208. return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
  1209. }
  1210. /**
  1211. * get_ccwdev_by_busid() - obtain device from a bus id
  1212. * @cdrv: driver the device is owned by
  1213. * @bus_id: bus id of the device to be searched
  1214. *
  1215. * This function searches all devices owned by @cdrv for a device with a bus
  1216. * id matching @bus_id.
  1217. * Returns:
  1218. * If a match is found, its reference count of the found device is increased
  1219. * and it is returned; else %NULL is returned.
  1220. */
  1221. struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
  1222. const char *bus_id)
  1223. {
  1224. struct device *dev;
  1225. struct device_driver *drv;
  1226. drv = get_driver(&cdrv->driver);
  1227. if (!drv)
  1228. return NULL;
  1229. dev = driver_find_device(drv, NULL, (void *)bus_id,
  1230. __ccwdev_check_busid);
  1231. put_driver(drv);
  1232. return dev ? to_ccwdev(dev) : NULL;
  1233. }
  1234. /************************** device driver handling ************************/
  1235. /* This is the implementation of the ccw_driver class. The probe, remove
  1236. * and release methods are initially very similar to the device_driver
  1237. * implementations, with the difference that they have ccw_device
  1238. * arguments.
  1239. *
  1240. * A ccw driver also contains the information that is needed for
  1241. * device matching.
  1242. */
  1243. static int
  1244. ccw_device_probe (struct device *dev)
  1245. {
  1246. struct ccw_device *cdev = to_ccwdev(dev);
  1247. struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
  1248. int ret;
  1249. cdev->drv = cdrv; /* to let the driver call _set_online */
  1250. ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
  1251. if (ret) {
  1252. cdev->drv = NULL;
  1253. return ret;
  1254. }
  1255. return 0;
  1256. }
  1257. static int
  1258. ccw_device_remove (struct device *dev)
  1259. {
  1260. struct ccw_device *cdev = to_ccwdev(dev);
  1261. struct ccw_driver *cdrv = cdev->drv;
  1262. int ret;
  1263. if (cdrv->remove)
  1264. cdrv->remove(cdev);
  1265. if (cdev->online) {
  1266. cdev->online = 0;
  1267. spin_lock_irq(cdev->ccwlock);
  1268. ret = ccw_device_offline(cdev);
  1269. spin_unlock_irq(cdev->ccwlock);
  1270. if (ret == 0)
  1271. wait_event(cdev->private->wait_q,
  1272. dev_fsm_final_state(cdev));
  1273. else
  1274. //FIXME: we can't fail!
  1275. CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
  1276. "device 0.%x.%04x\n",
  1277. ret, cdev->private->dev_id.ssid,
  1278. cdev->private->dev_id.devno);
  1279. }
  1280. ccw_device_set_timeout(cdev, 0);
  1281. cdev->drv = NULL;
  1282. return 0;
  1283. }
  1284. static void ccw_device_shutdown(struct device *dev)
  1285. {
  1286. struct ccw_device *cdev;
  1287. cdev = to_ccwdev(dev);
  1288. if (cdev->drv && cdev->drv->shutdown)
  1289. cdev->drv->shutdown(cdev);
  1290. disable_cmf(cdev);
  1291. }
  1292. struct bus_type ccw_bus_type = {
  1293. .name = "ccw",
  1294. .match = ccw_bus_match,
  1295. .uevent = ccw_uevent,
  1296. .probe = ccw_device_probe,
  1297. .remove = ccw_device_remove,
  1298. .shutdown = ccw_device_shutdown,
  1299. };
  1300. /**
  1301. * ccw_driver_register() - register a ccw driver
  1302. * @cdriver: driver to be registered
  1303. *
  1304. * This function is mainly a wrapper around driver_register().
  1305. * Returns:
  1306. * %0 on success and a negative error value on failure.
  1307. */
  1308. int ccw_driver_register(struct ccw_driver *cdriver)
  1309. {
  1310. struct device_driver *drv = &cdriver->driver;
  1311. drv->bus = &ccw_bus_type;
  1312. drv->name = cdriver->name;
  1313. drv->owner = cdriver->owner;
  1314. return driver_register(drv);
  1315. }
  1316. /**
  1317. * ccw_driver_unregister() - deregister a ccw driver
  1318. * @cdriver: driver to be deregistered
  1319. *
  1320. * This function is mainly a wrapper around driver_unregister().
  1321. */
  1322. void ccw_driver_unregister(struct ccw_driver *cdriver)
  1323. {
  1324. driver_unregister(&cdriver->driver);
  1325. }
  1326. /* Helper func for qdio. */
  1327. struct subchannel_id
  1328. ccw_device_get_subchannel_id(struct ccw_device *cdev)
  1329. {
  1330. struct subchannel *sch;
  1331. sch = to_subchannel(cdev->dev.parent);
  1332. return sch->schid;
  1333. }
  1334. static int recovery_check(struct device *dev, void *data)
  1335. {
  1336. struct ccw_device *cdev = to_ccwdev(dev);
  1337. int *redo = data;
  1338. spin_lock_irq(cdev->ccwlock);
  1339. switch (cdev->private->state) {
  1340. case DEV_STATE_DISCONNECTED:
  1341. CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
  1342. cdev->private->dev_id.ssid,
  1343. cdev->private->dev_id.devno);
  1344. dev_fsm_event(cdev, DEV_EVENT_VERIFY);
  1345. *redo = 1;
  1346. break;
  1347. case DEV_STATE_DISCONNECTED_SENSE_ID:
  1348. *redo = 1;
  1349. break;
  1350. }
  1351. spin_unlock_irq(cdev->ccwlock);
  1352. return 0;
  1353. }
  1354. static void recovery_func(unsigned long data)
  1355. {
  1356. int redo = 0;
  1357. bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
  1358. if (redo) {
  1359. spin_lock_irq(&recovery_lock);
  1360. if (!timer_pending(&recovery_timer)) {
  1361. if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
  1362. recovery_phase++;
  1363. mod_timer(&recovery_timer, jiffies +
  1364. recovery_delay[recovery_phase] * HZ);
  1365. }
  1366. spin_unlock_irq(&recovery_lock);
  1367. } else
  1368. CIO_MSG_EVENT(2, "recovery: end\n");
  1369. }
  1370. void ccw_device_schedule_recovery(void)
  1371. {
  1372. unsigned long flags;
  1373. CIO_MSG_EVENT(2, "recovery: schedule\n");
  1374. spin_lock_irqsave(&recovery_lock, flags);
  1375. if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
  1376. recovery_phase = 0;
  1377. mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
  1378. }
  1379. spin_unlock_irqrestore(&recovery_lock, flags);
  1380. }
  1381. MODULE_LICENSE("GPL");
  1382. EXPORT_SYMBOL(ccw_device_set_online);
  1383. EXPORT_SYMBOL(ccw_device_set_offline);
  1384. EXPORT_SYMBOL(ccw_driver_register);
  1385. EXPORT_SYMBOL(ccw_driver_unregister);
  1386. EXPORT_SYMBOL(get_ccwdev_by_busid);
  1387. EXPORT_SYMBOL(ccw_bus_type);
  1388. EXPORT_SYMBOL(ccw_device_work);
  1389. EXPORT_SYMBOL(ccw_device_notify_work);
  1390. EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);