css.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. /*
  2. * drivers/s390/cio/css.c
  3. * driver for channel subsystem
  4. *
  5. * Copyright IBM Corp. 2002,2008
  6. * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  7. * Cornelia Huck (cornelia.huck@de.ibm.com)
  8. */
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/device.h>
  12. #include <linux/slab.h>
  13. #include <linux/errno.h>
  14. #include <linux/list.h>
  15. #include <linux/reboot.h>
  16. #include "css.h"
  17. #include "cio.h"
  18. #include "cio_debug.h"
  19. #include "ioasm.h"
  20. #include "chsc.h"
  21. #include "device.h"
  22. #include "idset.h"
  23. #include "chp.h"
  24. int css_init_done = 0;
  25. static int need_reprobe = 0;
  26. static int max_ssid = 0;
  27. struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
  28. int css_characteristics_avail = 0;
  29. int
  30. for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  31. {
  32. struct subchannel_id schid;
  33. int ret;
  34. init_subchannel_id(&schid);
  35. ret = -ENODEV;
  36. do {
  37. do {
  38. ret = fn(schid, data);
  39. if (ret)
  40. break;
  41. } while (schid.sch_no++ < __MAX_SUBCHANNEL);
  42. schid.sch_no = 0;
  43. } while (schid.ssid++ < max_ssid);
  44. return ret;
  45. }
  46. struct cb_data {
  47. void *data;
  48. struct idset *set;
  49. int (*fn_known_sch)(struct subchannel *, void *);
  50. int (*fn_unknown_sch)(struct subchannel_id, void *);
  51. };
  52. static int call_fn_known_sch(struct device *dev, void *data)
  53. {
  54. struct subchannel *sch = to_subchannel(dev);
  55. struct cb_data *cb = data;
  56. int rc = 0;
  57. idset_sch_del(cb->set, sch->schid);
  58. if (cb->fn_known_sch)
  59. rc = cb->fn_known_sch(sch, cb->data);
  60. return rc;
  61. }
  62. static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  63. {
  64. struct cb_data *cb = data;
  65. int rc = 0;
  66. if (idset_sch_contains(cb->set, schid))
  67. rc = cb->fn_unknown_sch(schid, cb->data);
  68. return rc;
  69. }
  70. int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
  71. int (*fn_unknown)(struct subchannel_id,
  72. void *), void *data)
  73. {
  74. struct cb_data cb;
  75. int rc;
  76. cb.set = idset_sch_new();
  77. if (!cb.set)
  78. return -ENOMEM;
  79. idset_fill(cb.set);
  80. cb.data = data;
  81. cb.fn_known_sch = fn_known;
  82. cb.fn_unknown_sch = fn_unknown;
  83. /* Process registered subchannels. */
  84. rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
  85. if (rc)
  86. goto out;
  87. /* Process unregistered subchannels. */
  88. if (fn_unknown)
  89. rc = for_each_subchannel(call_fn_unknown_sch, &cb);
  90. out:
  91. idset_free(cb.set);
  92. return rc;
  93. }
  94. static struct subchannel *
  95. css_alloc_subchannel(struct subchannel_id schid)
  96. {
  97. struct subchannel *sch;
  98. int ret;
  99. sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
  100. if (sch == NULL)
  101. return ERR_PTR(-ENOMEM);
  102. ret = cio_validate_subchannel (sch, schid);
  103. if (ret < 0) {
  104. kfree(sch);
  105. return ERR_PTR(ret);
  106. }
  107. return sch;
  108. }
  109. static void
  110. css_free_subchannel(struct subchannel *sch)
  111. {
  112. if (sch) {
  113. /* Reset intparm to zeroes. */
  114. sch->schib.pmcw.intparm = 0;
  115. cio_modify(sch);
  116. kfree(sch->lock);
  117. kfree(sch);
  118. }
  119. }
  120. static void
  121. css_subchannel_release(struct device *dev)
  122. {
  123. struct subchannel *sch;
  124. sch = to_subchannel(dev);
  125. if (!cio_is_console(sch->schid)) {
  126. kfree(sch->lock);
  127. kfree(sch);
  128. }
  129. }
  130. static int css_sch_device_register(struct subchannel *sch)
  131. {
  132. int ret;
  133. mutex_lock(&sch->reg_mutex);
  134. ret = device_register(&sch->dev);
  135. mutex_unlock(&sch->reg_mutex);
  136. return ret;
  137. }
  138. void css_sch_device_unregister(struct subchannel *sch)
  139. {
  140. mutex_lock(&sch->reg_mutex);
  141. device_unregister(&sch->dev);
  142. mutex_unlock(&sch->reg_mutex);
  143. }
  144. static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
  145. {
  146. int i;
  147. int mask;
  148. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  149. ssd->path_mask = pmcw->pim;
  150. for (i = 0; i < 8; i++) {
  151. mask = 0x80 >> i;
  152. if (pmcw->pim & mask) {
  153. chp_id_init(&ssd->chpid[i]);
  154. ssd->chpid[i].id = pmcw->chpid[i];
  155. }
  156. }
  157. }
  158. static void ssd_register_chpids(struct chsc_ssd_info *ssd)
  159. {
  160. int i;
  161. int mask;
  162. for (i = 0; i < 8; i++) {
  163. mask = 0x80 >> i;
  164. if (ssd->path_mask & mask)
  165. if (!chp_is_registered(ssd->chpid[i]))
  166. chp_new(ssd->chpid[i]);
  167. }
  168. }
  169. void css_update_ssd_info(struct subchannel *sch)
  170. {
  171. int ret;
  172. if (cio_is_console(sch->schid)) {
  173. /* Console is initialized too early for functions requiring
  174. * memory allocation. */
  175. ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
  176. } else {
  177. ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
  178. if (ret)
  179. ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
  180. ssd_register_chpids(&sch->ssd_info);
  181. }
  182. }
  183. static ssize_t type_show(struct device *dev, struct device_attribute *attr,
  184. char *buf)
  185. {
  186. struct subchannel *sch = to_subchannel(dev);
  187. return sprintf(buf, "%01x\n", sch->st);
  188. }
  189. static DEVICE_ATTR(type, 0444, type_show, NULL);
  190. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
  191. char *buf)
  192. {
  193. struct subchannel *sch = to_subchannel(dev);
  194. return sprintf(buf, "css:t%01X\n", sch->st);
  195. }
  196. static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
  197. static struct attribute *subch_attrs[] = {
  198. &dev_attr_type.attr,
  199. &dev_attr_modalias.attr,
  200. NULL,
  201. };
  202. static struct attribute_group subch_attr_group = {
  203. .attrs = subch_attrs,
  204. };
  205. static struct attribute_group *default_subch_attr_groups[] = {
  206. &subch_attr_group,
  207. NULL,
  208. };
  209. static int css_register_subchannel(struct subchannel *sch)
  210. {
  211. int ret;
  212. /* Initialize the subchannel structure */
  213. sch->dev.parent = &channel_subsystems[0]->device;
  214. sch->dev.bus = &css_bus_type;
  215. sch->dev.release = &css_subchannel_release;
  216. sch->dev.groups = default_subch_attr_groups;
  217. /*
  218. * We don't want to generate uevents for I/O subchannels that don't
  219. * have a working ccw device behind them since they will be
  220. * unregistered before they can be used anyway, so we delay the add
  221. * uevent until after device recognition was successful.
  222. * Note that we suppress the uevent for all subchannel types;
  223. * the subchannel driver can decide itself when it wants to inform
  224. * userspace of its existence.
  225. */
  226. sch->dev.uevent_suppress = 1;
  227. css_update_ssd_info(sch);
  228. /* make it known to the system */
  229. ret = css_sch_device_register(sch);
  230. if (ret) {
  231. CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
  232. sch->schid.ssid, sch->schid.sch_no, ret);
  233. return ret;
  234. }
  235. if (!sch->driver) {
  236. /*
  237. * No driver matched. Generate the uevent now so that
  238. * a fitting driver module may be loaded based on the
  239. * modalias.
  240. */
  241. sch->dev.uevent_suppress = 0;
  242. kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
  243. }
  244. return ret;
  245. }
  246. static int css_probe_device(struct subchannel_id schid)
  247. {
  248. int ret;
  249. struct subchannel *sch;
  250. sch = css_alloc_subchannel(schid);
  251. if (IS_ERR(sch))
  252. return PTR_ERR(sch);
  253. ret = css_register_subchannel(sch);
  254. if (ret)
  255. css_free_subchannel(sch);
  256. return ret;
  257. }
  258. static int
  259. check_subchannel(struct device * dev, void * data)
  260. {
  261. struct subchannel *sch;
  262. struct subchannel_id *schid = data;
  263. sch = to_subchannel(dev);
  264. return schid_equal(&sch->schid, schid);
  265. }
  266. struct subchannel *
  267. get_subchannel_by_schid(struct subchannel_id schid)
  268. {
  269. struct device *dev;
  270. dev = bus_find_device(&css_bus_type, NULL,
  271. &schid, check_subchannel);
  272. return dev ? to_subchannel(dev) : NULL;
  273. }
  274. /**
  275. * css_sch_is_valid() - check if a subchannel is valid
  276. * @schib: subchannel information block for the subchannel
  277. */
  278. int css_sch_is_valid(struct schib *schib)
  279. {
  280. if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
  281. return 0;
  282. return 1;
  283. }
  284. EXPORT_SYMBOL_GPL(css_sch_is_valid);
  285. static int css_get_subchannel_status(struct subchannel *sch)
  286. {
  287. struct schib schib;
  288. if (stsch(sch->schid, &schib))
  289. return CIO_GONE;
  290. if (!css_sch_is_valid(&schib))
  291. return CIO_GONE;
  292. if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
  293. return CIO_REVALIDATE;
  294. if (!sch->lpm)
  295. return CIO_NO_PATH;
  296. return CIO_OPER;
  297. }
  298. static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
  299. {
  300. int event, ret, disc;
  301. unsigned long flags;
  302. enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
  303. spin_lock_irqsave(sch->lock, flags);
  304. disc = device_is_disconnected(sch);
  305. if (disc && slow) {
  306. /* Disconnected devices are evaluated directly only.*/
  307. spin_unlock_irqrestore(sch->lock, flags);
  308. return 0;
  309. }
  310. /* No interrupt after machine check - kill pending timers. */
  311. device_kill_pending_timer(sch);
  312. if (!disc && !slow) {
  313. /* Non-disconnected devices are evaluated on the slow path. */
  314. spin_unlock_irqrestore(sch->lock, flags);
  315. return -EAGAIN;
  316. }
  317. event = css_get_subchannel_status(sch);
  318. CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
  319. sch->schid.ssid, sch->schid.sch_no, event,
  320. disc ? "disconnected" : "normal",
  321. slow ? "slow" : "fast");
  322. /* Analyze subchannel status. */
  323. action = NONE;
  324. switch (event) {
  325. case CIO_NO_PATH:
  326. if (disc) {
  327. /* Check if paths have become available. */
  328. action = REPROBE;
  329. break;
  330. }
  331. /* fall through */
  332. case CIO_GONE:
  333. /* Prevent unwanted effects when opening lock. */
  334. cio_disable_subchannel(sch);
  335. device_set_disconnected(sch);
  336. /* Ask driver what to do with device. */
  337. action = UNREGISTER;
  338. if (sch->driver && sch->driver->notify) {
  339. spin_unlock_irqrestore(sch->lock, flags);
  340. ret = sch->driver->notify(sch, event);
  341. spin_lock_irqsave(sch->lock, flags);
  342. if (ret)
  343. action = NONE;
  344. }
  345. break;
  346. case CIO_REVALIDATE:
  347. /* Device will be removed, so no notify necessary. */
  348. if (disc)
  349. /* Reprobe because immediate unregister might block. */
  350. action = REPROBE;
  351. else
  352. action = UNREGISTER_PROBE;
  353. break;
  354. case CIO_OPER:
  355. if (disc)
  356. /* Get device operational again. */
  357. action = REPROBE;
  358. break;
  359. }
  360. /* Perform action. */
  361. ret = 0;
  362. switch (action) {
  363. case UNREGISTER:
  364. case UNREGISTER_PROBE:
  365. /* Unregister device (will use subchannel lock). */
  366. spin_unlock_irqrestore(sch->lock, flags);
  367. css_sch_device_unregister(sch);
  368. spin_lock_irqsave(sch->lock, flags);
  369. /* Reset intparm to zeroes. */
  370. sch->schib.pmcw.intparm = 0;
  371. cio_modify(sch);
  372. break;
  373. case REPROBE:
  374. device_trigger_reprobe(sch);
  375. break;
  376. default:
  377. break;
  378. }
  379. spin_unlock_irqrestore(sch->lock, flags);
  380. /* Probe if necessary. */
  381. if (action == UNREGISTER_PROBE)
  382. ret = css_probe_device(sch->schid);
  383. return ret;
  384. }
  385. static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
  386. {
  387. struct schib schib;
  388. if (!slow) {
  389. /* Will be done on the slow path. */
  390. return -EAGAIN;
  391. }
  392. if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
  393. /* Unusable - ignore. */
  394. return 0;
  395. }
  396. CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
  397. "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
  398. return css_probe_device(schid);
  399. }
  400. static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
  401. {
  402. struct subchannel *sch;
  403. int ret;
  404. sch = get_subchannel_by_schid(schid);
  405. if (sch) {
  406. ret = css_evaluate_known_subchannel(sch, slow);
  407. put_device(&sch->dev);
  408. } else
  409. ret = css_evaluate_new_subchannel(schid, slow);
  410. if (ret == -EAGAIN)
  411. css_schedule_eval(schid);
  412. }
  413. static struct idset *slow_subchannel_set;
  414. static spinlock_t slow_subchannel_lock;
  415. static int __init slow_subchannel_init(void)
  416. {
  417. spin_lock_init(&slow_subchannel_lock);
  418. slow_subchannel_set = idset_sch_new();
  419. if (!slow_subchannel_set) {
  420. CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
  421. return -ENOMEM;
  422. }
  423. return 0;
  424. }
  425. static int slow_eval_known_fn(struct subchannel *sch, void *data)
  426. {
  427. int eval;
  428. int rc;
  429. spin_lock_irq(&slow_subchannel_lock);
  430. eval = idset_sch_contains(slow_subchannel_set, sch->schid);
  431. idset_sch_del(slow_subchannel_set, sch->schid);
  432. spin_unlock_irq(&slow_subchannel_lock);
  433. if (eval) {
  434. rc = css_evaluate_known_subchannel(sch, 1);
  435. if (rc == -EAGAIN)
  436. css_schedule_eval(sch->schid);
  437. }
  438. return 0;
  439. }
  440. static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
  441. {
  442. int eval;
  443. int rc = 0;
  444. spin_lock_irq(&slow_subchannel_lock);
  445. eval = idset_sch_contains(slow_subchannel_set, schid);
  446. idset_sch_del(slow_subchannel_set, schid);
  447. spin_unlock_irq(&slow_subchannel_lock);
  448. if (eval) {
  449. rc = css_evaluate_new_subchannel(schid, 1);
  450. switch (rc) {
  451. case -EAGAIN:
  452. css_schedule_eval(schid);
  453. rc = 0;
  454. break;
  455. case -ENXIO:
  456. case -ENOMEM:
  457. case -EIO:
  458. /* These should abort looping */
  459. break;
  460. default:
  461. rc = 0;
  462. }
  463. }
  464. return rc;
  465. }
  466. static void css_slow_path_func(struct work_struct *unused)
  467. {
  468. CIO_TRACE_EVENT(4, "slowpath");
  469. for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
  470. NULL);
  471. }
  472. static DECLARE_WORK(slow_path_work, css_slow_path_func);
  473. struct workqueue_struct *slow_path_wq;
  474. void css_schedule_eval(struct subchannel_id schid)
  475. {
  476. unsigned long flags;
  477. spin_lock_irqsave(&slow_subchannel_lock, flags);
  478. idset_sch_add(slow_subchannel_set, schid);
  479. queue_work(slow_path_wq, &slow_path_work);
  480. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  481. }
  482. void css_schedule_eval_all(void)
  483. {
  484. unsigned long flags;
  485. spin_lock_irqsave(&slow_subchannel_lock, flags);
  486. idset_fill(slow_subchannel_set);
  487. queue_work(slow_path_wq, &slow_path_work);
  488. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  489. }
  490. void css_wait_for_slow_path(void)
  491. {
  492. flush_workqueue(ccw_device_notify_work);
  493. flush_workqueue(slow_path_wq);
  494. }
  495. /* Reprobe subchannel if unregistered. */
  496. static int reprobe_subchannel(struct subchannel_id schid, void *data)
  497. {
  498. int ret;
  499. CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
  500. schid.ssid, schid.sch_no);
  501. if (need_reprobe)
  502. return -EAGAIN;
  503. ret = css_probe_device(schid);
  504. switch (ret) {
  505. case 0:
  506. break;
  507. case -ENXIO:
  508. case -ENOMEM:
  509. case -EIO:
  510. /* These should abort looping */
  511. break;
  512. default:
  513. ret = 0;
  514. }
  515. return ret;
  516. }
  517. /* Work function used to reprobe all unregistered subchannels. */
  518. static void reprobe_all(struct work_struct *unused)
  519. {
  520. int ret;
  521. CIO_MSG_EVENT(4, "reprobe start\n");
  522. need_reprobe = 0;
  523. /* Make sure initial subchannel scan is done. */
  524. wait_event(ccw_device_init_wq,
  525. atomic_read(&ccw_device_init_count) == 0);
  526. ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
  527. CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
  528. need_reprobe);
  529. }
  530. static DECLARE_WORK(css_reprobe_work, reprobe_all);
  531. /* Schedule reprobing of all unregistered subchannels. */
  532. void css_schedule_reprobe(void)
  533. {
  534. need_reprobe = 1;
  535. queue_work(slow_path_wq, &css_reprobe_work);
  536. }
  537. EXPORT_SYMBOL_GPL(css_schedule_reprobe);
  538. /*
  539. * Called from the machine check handler for subchannel report words.
  540. */
  541. void css_process_crw(int rsid1, int rsid2)
  542. {
  543. struct subchannel_id mchk_schid;
  544. CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
  545. rsid1, rsid2);
  546. init_subchannel_id(&mchk_schid);
  547. mchk_schid.sch_no = rsid1;
  548. if (rsid2 != 0)
  549. mchk_schid.ssid = (rsid2 >> 8) & 3;
  550. /*
  551. * Since we are always presented with IPI in the CRW, we have to
  552. * use stsch() to find out if the subchannel in question has come
  553. * or gone.
  554. */
  555. css_evaluate_subchannel(mchk_schid, 0);
  556. }
  557. static int __init
  558. __init_channel_subsystem(struct subchannel_id schid, void *data)
  559. {
  560. struct subchannel *sch;
  561. int ret;
  562. if (cio_is_console(schid))
  563. sch = cio_get_console_subchannel();
  564. else {
  565. sch = css_alloc_subchannel(schid);
  566. if (IS_ERR(sch))
  567. ret = PTR_ERR(sch);
  568. else
  569. ret = 0;
  570. switch (ret) {
  571. case 0:
  572. break;
  573. case -ENOMEM:
  574. panic("Out of memory in init_channel_subsystem\n");
  575. /* -ENXIO: no more subchannels. */
  576. case -ENXIO:
  577. return ret;
  578. /* -EIO: this subchannel set not supported. */
  579. case -EIO:
  580. return ret;
  581. default:
  582. return 0;
  583. }
  584. }
  585. /*
  586. * We register ALL valid subchannels in ioinfo, even those
  587. * that have been present before init_channel_subsystem.
  588. * These subchannels can't have been registered yet (kmalloc
  589. * not working) so we do it now. This is true e.g. for the
  590. * console subchannel.
  591. */
  592. css_register_subchannel(sch);
  593. return 0;
  594. }
  595. static void __init
  596. css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
  597. {
  598. if (css_characteristics_avail && css_general_characteristics.mcss) {
  599. css->global_pgid.pgid_high.ext_cssid.version = 0x80;
  600. css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
  601. } else {
  602. #ifdef CONFIG_SMP
  603. css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
  604. #else
  605. css->global_pgid.pgid_high.cpu_addr = 0;
  606. #endif
  607. }
  608. css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
  609. css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
  610. css->global_pgid.tod_high = tod_high;
  611. }
  612. static void
  613. channel_subsystem_release(struct device *dev)
  614. {
  615. struct channel_subsystem *css;
  616. css = to_css(dev);
  617. mutex_destroy(&css->mutex);
  618. kfree(css);
  619. }
  620. static ssize_t
  621. css_cm_enable_show(struct device *dev, struct device_attribute *attr,
  622. char *buf)
  623. {
  624. struct channel_subsystem *css = to_css(dev);
  625. int ret;
  626. if (!css)
  627. return 0;
  628. mutex_lock(&css->mutex);
  629. ret = sprintf(buf, "%x\n", css->cm_enabled);
  630. mutex_unlock(&css->mutex);
  631. return ret;
  632. }
  633. static ssize_t
  634. css_cm_enable_store(struct device *dev, struct device_attribute *attr,
  635. const char *buf, size_t count)
  636. {
  637. struct channel_subsystem *css = to_css(dev);
  638. int ret;
  639. unsigned long val;
  640. ret = strict_strtoul(buf, 16, &val);
  641. if (ret)
  642. return ret;
  643. mutex_lock(&css->mutex);
  644. switch (val) {
  645. case 0:
  646. ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
  647. break;
  648. case 1:
  649. ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
  650. break;
  651. default:
  652. ret = -EINVAL;
  653. }
  654. mutex_unlock(&css->mutex);
  655. return ret < 0 ? ret : count;
  656. }
  657. static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
  658. static int __init setup_css(int nr)
  659. {
  660. u32 tod_high;
  661. int ret;
  662. struct channel_subsystem *css;
  663. css = channel_subsystems[nr];
  664. memset(css, 0, sizeof(struct channel_subsystem));
  665. css->pseudo_subchannel =
  666. kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
  667. if (!css->pseudo_subchannel)
  668. return -ENOMEM;
  669. css->pseudo_subchannel->dev.parent = &css->device;
  670. css->pseudo_subchannel->dev.release = css_subchannel_release;
  671. sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
  672. ret = cio_create_sch_lock(css->pseudo_subchannel);
  673. if (ret) {
  674. kfree(css->pseudo_subchannel);
  675. return ret;
  676. }
  677. mutex_init(&css->mutex);
  678. css->valid = 1;
  679. css->cssid = nr;
  680. sprintf(css->device.bus_id, "css%x", nr);
  681. css->device.release = channel_subsystem_release;
  682. tod_high = (u32) (get_clock() >> 32);
  683. css_generate_pgid(css, tod_high);
  684. return 0;
  685. }
  686. static int css_reboot_event(struct notifier_block *this,
  687. unsigned long event,
  688. void *ptr)
  689. {
  690. int ret, i;
  691. ret = NOTIFY_DONE;
  692. for (i = 0; i <= __MAX_CSSID; i++) {
  693. struct channel_subsystem *css;
  694. css = channel_subsystems[i];
  695. mutex_lock(&css->mutex);
  696. if (css->cm_enabled)
  697. if (chsc_secm(css, 0))
  698. ret = NOTIFY_BAD;
  699. mutex_unlock(&css->mutex);
  700. }
  701. return ret;
  702. }
  703. static struct notifier_block css_reboot_notifier = {
  704. .notifier_call = css_reboot_event,
  705. };
  706. /*
  707. * Now that the driver core is running, we can setup our channel subsystem.
  708. * The struct subchannel's are created during probing (except for the
  709. * static console subchannel).
  710. */
  711. static int __init
  712. init_channel_subsystem (void)
  713. {
  714. int ret, i;
  715. ret = chsc_determine_css_characteristics();
  716. if (ret == -ENOMEM)
  717. goto out; /* No need to continue. */
  718. if (ret == 0)
  719. css_characteristics_avail = 1;
  720. ret = chsc_alloc_sei_area();
  721. if (ret)
  722. goto out;
  723. ret = slow_subchannel_init();
  724. if (ret)
  725. goto out;
  726. if ((ret = bus_register(&css_bus_type)))
  727. goto out;
  728. /* Try to enable MSS. */
  729. ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
  730. switch (ret) {
  731. case 0: /* Success. */
  732. max_ssid = __MAX_SSID;
  733. break;
  734. case -ENOMEM:
  735. goto out_bus;
  736. default:
  737. max_ssid = 0;
  738. }
  739. /* Setup css structure. */
  740. for (i = 0; i <= __MAX_CSSID; i++) {
  741. struct channel_subsystem *css;
  742. css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
  743. if (!css) {
  744. ret = -ENOMEM;
  745. goto out_unregister;
  746. }
  747. channel_subsystems[i] = css;
  748. ret = setup_css(i);
  749. if (ret)
  750. goto out_free;
  751. ret = device_register(&css->device);
  752. if (ret)
  753. goto out_free_all;
  754. if (css_characteristics_avail &&
  755. css_chsc_characteristics.secm) {
  756. ret = device_create_file(&css->device,
  757. &dev_attr_cm_enable);
  758. if (ret)
  759. goto out_device;
  760. }
  761. ret = device_register(&css->pseudo_subchannel->dev);
  762. if (ret)
  763. goto out_file;
  764. }
  765. ret = register_reboot_notifier(&css_reboot_notifier);
  766. if (ret)
  767. goto out_pseudo;
  768. css_init_done = 1;
  769. ctl_set_bit(6, 28);
  770. for_each_subchannel(__init_channel_subsystem, NULL);
  771. return 0;
  772. out_pseudo:
  773. device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev);
  774. out_file:
  775. device_remove_file(&channel_subsystems[i]->device,
  776. &dev_attr_cm_enable);
  777. out_device:
  778. device_unregister(&channel_subsystems[i]->device);
  779. out_free_all:
  780. kfree(channel_subsystems[i]->pseudo_subchannel->lock);
  781. kfree(channel_subsystems[i]->pseudo_subchannel);
  782. out_free:
  783. kfree(channel_subsystems[i]);
  784. out_unregister:
  785. while (i > 0) {
  786. struct channel_subsystem *css;
  787. i--;
  788. css = channel_subsystems[i];
  789. device_unregister(&css->pseudo_subchannel->dev);
  790. if (css_characteristics_avail && css_chsc_characteristics.secm)
  791. device_remove_file(&css->device,
  792. &dev_attr_cm_enable);
  793. device_unregister(&css->device);
  794. }
  795. out_bus:
  796. bus_unregister(&css_bus_type);
  797. out:
  798. chsc_free_sei_area();
  799. kfree(slow_subchannel_set);
  800. printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
  801. ret);
  802. return ret;
  803. }
  804. int sch_is_pseudo_sch(struct subchannel *sch)
  805. {
  806. return sch == to_css(sch->dev.parent)->pseudo_subchannel;
  807. }
  808. /*
  809. * find a driver for a subchannel. They identify by the subchannel
  810. * type with the exception that the console subchannel driver has its own
  811. * subchannel type although the device is an i/o subchannel
  812. */
  813. static int
  814. css_bus_match (struct device *dev, struct device_driver *drv)
  815. {
  816. struct subchannel *sch = to_subchannel(dev);
  817. struct css_driver *driver = to_cssdriver(drv);
  818. if (sch->st == driver->subchannel_type)
  819. return 1;
  820. return 0;
  821. }
  822. static int css_probe(struct device *dev)
  823. {
  824. struct subchannel *sch;
  825. int ret;
  826. sch = to_subchannel(dev);
  827. sch->driver = to_cssdriver(dev->driver);
  828. ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
  829. if (ret)
  830. sch->driver = NULL;
  831. return ret;
  832. }
  833. static int css_remove(struct device *dev)
  834. {
  835. struct subchannel *sch;
  836. int ret;
  837. sch = to_subchannel(dev);
  838. ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
  839. sch->driver = NULL;
  840. return ret;
  841. }
  842. static void css_shutdown(struct device *dev)
  843. {
  844. struct subchannel *sch;
  845. sch = to_subchannel(dev);
  846. if (sch->driver && sch->driver->shutdown)
  847. sch->driver->shutdown(sch);
  848. }
  849. static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
  850. {
  851. struct subchannel *sch = to_subchannel(dev);
  852. int ret;
  853. ret = add_uevent_var(env, "ST=%01X", sch->st);
  854. if (ret)
  855. return ret;
  856. ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
  857. return ret;
  858. }
  859. struct bus_type css_bus_type = {
  860. .name = "css",
  861. .match = css_bus_match,
  862. .probe = css_probe,
  863. .remove = css_remove,
  864. .shutdown = css_shutdown,
  865. .uevent = css_uevent,
  866. };
  867. /**
  868. * css_driver_register - register a css driver
  869. * @cdrv: css driver to register
  870. *
  871. * This is mainly a wrapper around driver_register that sets name
  872. * and bus_type in the embedded struct device_driver correctly.
  873. */
  874. int css_driver_register(struct css_driver *cdrv)
  875. {
  876. cdrv->drv.name = cdrv->name;
  877. cdrv->drv.bus = &css_bus_type;
  878. cdrv->drv.owner = cdrv->owner;
  879. return driver_register(&cdrv->drv);
  880. }
  881. EXPORT_SYMBOL_GPL(css_driver_register);
  882. /**
  883. * css_driver_unregister - unregister a css driver
  884. * @cdrv: css driver to unregister
  885. *
  886. * This is a wrapper around driver_unregister.
  887. */
  888. void css_driver_unregister(struct css_driver *cdrv)
  889. {
  890. driver_unregister(&cdrv->drv);
  891. }
  892. EXPORT_SYMBOL_GPL(css_driver_unregister);
  893. subsys_initcall(init_channel_subsystem);
  894. MODULE_LICENSE("GPL");
  895. EXPORT_SYMBOL(css_bus_type);
  896. EXPORT_SYMBOL_GPL(css_characteristics_avail);