css.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * driver for channel subsystem
  3. *
  4. * Copyright IBM Corp. 2002, 2010
  5. *
  6. * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  7. * Cornelia Huck (cornelia.huck@de.ibm.com)
  8. */
  9. #define KMSG_COMPONENT "cio"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/device.h>
  14. #include <linux/slab.h>
  15. #include <linux/errno.h>
  16. #include <linux/list.h>
  17. #include <linux/reboot.h>
  18. #include <linux/suspend.h>
  19. #include <linux/proc_fs.h>
  20. #include <asm/isc.h>
  21. #include <asm/crw.h>
  22. #include "css.h"
  23. #include "cio.h"
  24. #include "cio_debug.h"
  25. #include "ioasm.h"
  26. #include "chsc.h"
  27. #include "device.h"
  28. #include "idset.h"
  29. #include "chp.h"
  30. int css_init_done = 0;
  31. int max_ssid;
  32. struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
  33. static struct bus_type css_bus_type;
  34. int
  35. for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  36. {
  37. struct subchannel_id schid;
  38. int ret;
  39. init_subchannel_id(&schid);
  40. ret = -ENODEV;
  41. do {
  42. do {
  43. ret = fn(schid, data);
  44. if (ret)
  45. break;
  46. } while (schid.sch_no++ < __MAX_SUBCHANNEL);
  47. schid.sch_no = 0;
  48. } while (schid.ssid++ < max_ssid);
  49. return ret;
  50. }
  51. struct cb_data {
  52. void *data;
  53. struct idset *set;
  54. int (*fn_known_sch)(struct subchannel *, void *);
  55. int (*fn_unknown_sch)(struct subchannel_id, void *);
  56. };
  57. static int call_fn_known_sch(struct device *dev, void *data)
  58. {
  59. struct subchannel *sch = to_subchannel(dev);
  60. struct cb_data *cb = data;
  61. int rc = 0;
  62. idset_sch_del(cb->set, sch->schid);
  63. if (cb->fn_known_sch)
  64. rc = cb->fn_known_sch(sch, cb->data);
  65. return rc;
  66. }
  67. static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  68. {
  69. struct cb_data *cb = data;
  70. int rc = 0;
  71. if (idset_sch_contains(cb->set, schid))
  72. rc = cb->fn_unknown_sch(schid, cb->data);
  73. return rc;
  74. }
  75. static int call_fn_all_sch(struct subchannel_id schid, void *data)
  76. {
  77. struct cb_data *cb = data;
  78. struct subchannel *sch;
  79. int rc = 0;
  80. sch = get_subchannel_by_schid(schid);
  81. if (sch) {
  82. if (cb->fn_known_sch)
  83. rc = cb->fn_known_sch(sch, cb->data);
  84. put_device(&sch->dev);
  85. } else {
  86. if (cb->fn_unknown_sch)
  87. rc = cb->fn_unknown_sch(schid, cb->data);
  88. }
  89. return rc;
  90. }
  91. int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
  92. int (*fn_unknown)(struct subchannel_id,
  93. void *), void *data)
  94. {
  95. struct cb_data cb;
  96. int rc;
  97. cb.data = data;
  98. cb.fn_known_sch = fn_known;
  99. cb.fn_unknown_sch = fn_unknown;
  100. cb.set = idset_sch_new();
  101. if (!cb.set)
  102. /* fall back to brute force scanning in case of oom */
  103. return for_each_subchannel(call_fn_all_sch, &cb);
  104. idset_fill(cb.set);
  105. /* Process registered subchannels. */
  106. rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
  107. if (rc)
  108. goto out;
  109. /* Process unregistered subchannels. */
  110. if (fn_unknown)
  111. rc = for_each_subchannel(call_fn_unknown_sch, &cb);
  112. out:
  113. idset_free(cb.set);
  114. return rc;
  115. }
  116. static void css_sch_todo(struct work_struct *work);
  117. static void css_subchannel_release(struct device *dev)
  118. {
  119. struct subchannel *sch;
  120. sch = to_subchannel(dev);
  121. if (!cio_is_console(sch->schid)) {
  122. /* Reset intparm to zeroes. */
  123. sch->config.intparm = 0;
  124. cio_commit_config(sch);
  125. kfree(sch->lock);
  126. kfree(sch);
  127. }
  128. }
  129. static struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
  130. {
  131. struct subchannel *sch;
  132. int ret;
  133. sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
  134. if (sch == NULL)
  135. return ERR_PTR(-ENOMEM);
  136. ret = cio_validate_subchannel (sch, schid);
  137. if (ret < 0) {
  138. kfree(sch);
  139. return ERR_PTR(ret);
  140. }
  141. INIT_WORK(&sch->todo_work, css_sch_todo);
  142. sch->dev.release = &css_subchannel_release;
  143. device_initialize(&sch->dev);
  144. return sch;
  145. }
  146. static int css_sch_device_register(struct subchannel *sch)
  147. {
  148. int ret;
  149. mutex_lock(&sch->reg_mutex);
  150. dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
  151. sch->schid.sch_no);
  152. ret = device_add(&sch->dev);
  153. mutex_unlock(&sch->reg_mutex);
  154. return ret;
  155. }
  156. /**
  157. * css_sch_device_unregister - unregister a subchannel
  158. * @sch: subchannel to be unregistered
  159. */
  160. void css_sch_device_unregister(struct subchannel *sch)
  161. {
  162. mutex_lock(&sch->reg_mutex);
  163. if (device_is_registered(&sch->dev))
  164. device_unregister(&sch->dev);
  165. mutex_unlock(&sch->reg_mutex);
  166. }
  167. EXPORT_SYMBOL_GPL(css_sch_device_unregister);
  168. static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
  169. {
  170. int i;
  171. int mask;
  172. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  173. ssd->path_mask = pmcw->pim;
  174. for (i = 0; i < 8; i++) {
  175. mask = 0x80 >> i;
  176. if (pmcw->pim & mask) {
  177. chp_id_init(&ssd->chpid[i]);
  178. ssd->chpid[i].id = pmcw->chpid[i];
  179. }
  180. }
  181. }
  182. static void ssd_register_chpids(struct chsc_ssd_info *ssd)
  183. {
  184. int i;
  185. int mask;
  186. for (i = 0; i < 8; i++) {
  187. mask = 0x80 >> i;
  188. if (ssd->path_mask & mask)
  189. if (!chp_is_registered(ssd->chpid[i]))
  190. chp_new(ssd->chpid[i]);
  191. }
  192. }
  193. void css_update_ssd_info(struct subchannel *sch)
  194. {
  195. int ret;
  196. if (cio_is_console(sch->schid)) {
  197. /* Console is initialized too early for functions requiring
  198. * memory allocation. */
  199. ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
  200. } else {
  201. ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
  202. if (ret)
  203. ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
  204. ssd_register_chpids(&sch->ssd_info);
  205. }
  206. }
  207. static ssize_t type_show(struct device *dev, struct device_attribute *attr,
  208. char *buf)
  209. {
  210. struct subchannel *sch = to_subchannel(dev);
  211. return sprintf(buf, "%01x\n", sch->st);
  212. }
  213. static DEVICE_ATTR(type, 0444, type_show, NULL);
  214. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
  215. char *buf)
  216. {
  217. struct subchannel *sch = to_subchannel(dev);
  218. return sprintf(buf, "css:t%01X\n", sch->st);
  219. }
  220. static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
  221. static struct attribute *subch_attrs[] = {
  222. &dev_attr_type.attr,
  223. &dev_attr_modalias.attr,
  224. NULL,
  225. };
  226. static struct attribute_group subch_attr_group = {
  227. .attrs = subch_attrs,
  228. };
  229. static const struct attribute_group *default_subch_attr_groups[] = {
  230. &subch_attr_group,
  231. NULL,
  232. };
  233. static int css_register_subchannel(struct subchannel *sch)
  234. {
  235. int ret;
  236. /* Initialize the subchannel structure */
  237. sch->dev.parent = &channel_subsystems[0]->device;
  238. sch->dev.bus = &css_bus_type;
  239. sch->dev.groups = default_subch_attr_groups;
  240. /*
  241. * We don't want to generate uevents for I/O subchannels that don't
  242. * have a working ccw device behind them since they will be
  243. * unregistered before they can be used anyway, so we delay the add
  244. * uevent until after device recognition was successful.
  245. * Note that we suppress the uevent for all subchannel types;
  246. * the subchannel driver can decide itself when it wants to inform
  247. * userspace of its existence.
  248. */
  249. dev_set_uevent_suppress(&sch->dev, 1);
  250. css_update_ssd_info(sch);
  251. /* make it known to the system */
  252. ret = css_sch_device_register(sch);
  253. if (ret) {
  254. CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
  255. sch->schid.ssid, sch->schid.sch_no, ret);
  256. return ret;
  257. }
  258. if (!sch->driver) {
  259. /*
  260. * No driver matched. Generate the uevent now so that
  261. * a fitting driver module may be loaded based on the
  262. * modalias.
  263. */
  264. dev_set_uevent_suppress(&sch->dev, 0);
  265. kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
  266. }
  267. return ret;
  268. }
  269. int css_probe_device(struct subchannel_id schid)
  270. {
  271. int ret;
  272. struct subchannel *sch;
  273. if (cio_is_console(schid))
  274. sch = cio_get_console_subchannel();
  275. else {
  276. sch = css_alloc_subchannel(schid);
  277. if (IS_ERR(sch))
  278. return PTR_ERR(sch);
  279. }
  280. ret = css_register_subchannel(sch);
  281. if (ret) {
  282. if (!cio_is_console(schid))
  283. put_device(&sch->dev);
  284. }
  285. return ret;
  286. }
  287. static int
  288. check_subchannel(struct device * dev, void * data)
  289. {
  290. struct subchannel *sch;
  291. struct subchannel_id *schid = data;
  292. sch = to_subchannel(dev);
  293. return schid_equal(&sch->schid, schid);
  294. }
  295. struct subchannel *
  296. get_subchannel_by_schid(struct subchannel_id schid)
  297. {
  298. struct device *dev;
  299. dev = bus_find_device(&css_bus_type, NULL,
  300. &schid, check_subchannel);
  301. return dev ? to_subchannel(dev) : NULL;
  302. }
  303. /**
  304. * css_sch_is_valid() - check if a subchannel is valid
  305. * @schib: subchannel information block for the subchannel
  306. */
  307. int css_sch_is_valid(struct schib *schib)
  308. {
  309. if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
  310. return 0;
  311. if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
  312. return 0;
  313. return 1;
  314. }
  315. EXPORT_SYMBOL_GPL(css_sch_is_valid);
  316. static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
  317. {
  318. struct schib schib;
  319. if (!slow) {
  320. /* Will be done on the slow path. */
  321. return -EAGAIN;
  322. }
  323. if (stsch_err(schid, &schib)) {
  324. /* Subchannel is not provided. */
  325. return -ENXIO;
  326. }
  327. if (!css_sch_is_valid(&schib)) {
  328. /* Unusable - ignore. */
  329. return 0;
  330. }
  331. CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
  332. schid.sch_no);
  333. return css_probe_device(schid);
  334. }
  335. static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
  336. {
  337. int ret = 0;
  338. if (sch->driver) {
  339. if (sch->driver->sch_event)
  340. ret = sch->driver->sch_event(sch, slow);
  341. else
  342. dev_dbg(&sch->dev,
  343. "Got subchannel machine check but "
  344. "no sch_event handler provided.\n");
  345. }
  346. if (ret != 0 && ret != -EAGAIN) {
  347. CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
  348. sch->schid.ssid, sch->schid.sch_no, ret);
  349. }
  350. return ret;
  351. }
  352. static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
  353. {
  354. struct subchannel *sch;
  355. int ret;
  356. sch = get_subchannel_by_schid(schid);
  357. if (sch) {
  358. ret = css_evaluate_known_subchannel(sch, slow);
  359. put_device(&sch->dev);
  360. } else
  361. ret = css_evaluate_new_subchannel(schid, slow);
  362. if (ret == -EAGAIN)
  363. css_schedule_eval(schid);
  364. }
  365. /**
  366. * css_sched_sch_todo - schedule a subchannel operation
  367. * @sch: subchannel
  368. * @todo: todo
  369. *
  370. * Schedule the operation identified by @todo to be performed on the slow path
  371. * workqueue. Do nothing if another operation with higher priority is already
  372. * scheduled. Needs to be called with subchannel lock held.
  373. */
  374. void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
  375. {
  376. CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
  377. sch->schid.ssid, sch->schid.sch_no, todo);
  378. if (sch->todo >= todo)
  379. return;
  380. /* Get workqueue ref. */
  381. if (!get_device(&sch->dev))
  382. return;
  383. sch->todo = todo;
  384. if (!queue_work(cio_work_q, &sch->todo_work)) {
  385. /* Already queued, release workqueue ref. */
  386. put_device(&sch->dev);
  387. }
  388. }
  389. EXPORT_SYMBOL_GPL(css_sched_sch_todo);
  390. static void css_sch_todo(struct work_struct *work)
  391. {
  392. struct subchannel *sch;
  393. enum sch_todo todo;
  394. int ret;
  395. sch = container_of(work, struct subchannel, todo_work);
  396. /* Find out todo. */
  397. spin_lock_irq(sch->lock);
  398. todo = sch->todo;
  399. CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
  400. sch->schid.sch_no, todo);
  401. sch->todo = SCH_TODO_NOTHING;
  402. spin_unlock_irq(sch->lock);
  403. /* Perform todo. */
  404. switch (todo) {
  405. case SCH_TODO_NOTHING:
  406. break;
  407. case SCH_TODO_EVAL:
  408. ret = css_evaluate_known_subchannel(sch, 1);
  409. if (ret == -EAGAIN) {
  410. spin_lock_irq(sch->lock);
  411. css_sched_sch_todo(sch, todo);
  412. spin_unlock_irq(sch->lock);
  413. }
  414. break;
  415. case SCH_TODO_UNREG:
  416. css_sch_device_unregister(sch);
  417. break;
  418. }
  419. /* Release workqueue ref. */
  420. put_device(&sch->dev);
  421. }
  422. static struct idset *slow_subchannel_set;
  423. static spinlock_t slow_subchannel_lock;
  424. static wait_queue_head_t css_eval_wq;
  425. static atomic_t css_eval_scheduled;
  426. static int __init slow_subchannel_init(void)
  427. {
  428. spin_lock_init(&slow_subchannel_lock);
  429. atomic_set(&css_eval_scheduled, 0);
  430. init_waitqueue_head(&css_eval_wq);
  431. slow_subchannel_set = idset_sch_new();
  432. if (!slow_subchannel_set) {
  433. CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
  434. return -ENOMEM;
  435. }
  436. return 0;
  437. }
  438. static int slow_eval_known_fn(struct subchannel *sch, void *data)
  439. {
  440. int eval;
  441. int rc;
  442. spin_lock_irq(&slow_subchannel_lock);
  443. eval = idset_sch_contains(slow_subchannel_set, sch->schid);
  444. idset_sch_del(slow_subchannel_set, sch->schid);
  445. spin_unlock_irq(&slow_subchannel_lock);
  446. if (eval) {
  447. rc = css_evaluate_known_subchannel(sch, 1);
  448. if (rc == -EAGAIN)
  449. css_schedule_eval(sch->schid);
  450. }
  451. return 0;
  452. }
  453. static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
  454. {
  455. int eval;
  456. int rc = 0;
  457. spin_lock_irq(&slow_subchannel_lock);
  458. eval = idset_sch_contains(slow_subchannel_set, schid);
  459. idset_sch_del(slow_subchannel_set, schid);
  460. spin_unlock_irq(&slow_subchannel_lock);
  461. if (eval) {
  462. rc = css_evaluate_new_subchannel(schid, 1);
  463. switch (rc) {
  464. case -EAGAIN:
  465. css_schedule_eval(schid);
  466. rc = 0;
  467. break;
  468. case -ENXIO:
  469. case -ENOMEM:
  470. case -EIO:
  471. /* These should abort looping */
  472. idset_sch_del_subseq(slow_subchannel_set, schid);
  473. break;
  474. default:
  475. rc = 0;
  476. }
  477. }
  478. return rc;
  479. }
  480. static void css_slow_path_func(struct work_struct *unused)
  481. {
  482. unsigned long flags;
  483. CIO_TRACE_EVENT(4, "slowpath");
  484. for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
  485. NULL);
  486. spin_lock_irqsave(&slow_subchannel_lock, flags);
  487. if (idset_is_empty(slow_subchannel_set)) {
  488. atomic_set(&css_eval_scheduled, 0);
  489. wake_up(&css_eval_wq);
  490. }
  491. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  492. }
  493. static DECLARE_WORK(slow_path_work, css_slow_path_func);
  494. struct workqueue_struct *cio_work_q;
  495. void css_schedule_eval(struct subchannel_id schid)
  496. {
  497. unsigned long flags;
  498. spin_lock_irqsave(&slow_subchannel_lock, flags);
  499. idset_sch_add(slow_subchannel_set, schid);
  500. atomic_set(&css_eval_scheduled, 1);
  501. queue_work(cio_work_q, &slow_path_work);
  502. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  503. }
  504. void css_schedule_eval_all(void)
  505. {
  506. unsigned long flags;
  507. spin_lock_irqsave(&slow_subchannel_lock, flags);
  508. idset_fill(slow_subchannel_set);
  509. atomic_set(&css_eval_scheduled, 1);
  510. queue_work(cio_work_q, &slow_path_work);
  511. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  512. }
  513. static int __unset_registered(struct device *dev, void *data)
  514. {
  515. struct idset *set = data;
  516. struct subchannel *sch = to_subchannel(dev);
  517. idset_sch_del(set, sch->schid);
  518. return 0;
  519. }
  520. static void css_schedule_eval_all_unreg(void)
  521. {
  522. unsigned long flags;
  523. struct idset *unreg_set;
  524. /* Find unregistered subchannels. */
  525. unreg_set = idset_sch_new();
  526. if (!unreg_set) {
  527. /* Fallback. */
  528. css_schedule_eval_all();
  529. return;
  530. }
  531. idset_fill(unreg_set);
  532. bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
  533. /* Apply to slow_subchannel_set. */
  534. spin_lock_irqsave(&slow_subchannel_lock, flags);
  535. idset_add_set(slow_subchannel_set, unreg_set);
  536. atomic_set(&css_eval_scheduled, 1);
  537. queue_work(cio_work_q, &slow_path_work);
  538. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  539. idset_free(unreg_set);
  540. }
  541. void css_wait_for_slow_path(void)
  542. {
  543. flush_workqueue(cio_work_q);
  544. }
  545. /* Schedule reprobing of all unregistered subchannels. */
  546. void css_schedule_reprobe(void)
  547. {
  548. css_schedule_eval_all_unreg();
  549. }
  550. EXPORT_SYMBOL_GPL(css_schedule_reprobe);
  551. /*
  552. * Called from the machine check handler for subchannel report words.
  553. */
  554. static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
  555. {
  556. struct subchannel_id mchk_schid;
  557. struct subchannel *sch;
  558. if (overflow) {
  559. css_schedule_eval_all();
  560. return;
  561. }
  562. CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
  563. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  564. crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
  565. crw0->erc, crw0->rsid);
  566. if (crw1)
  567. CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
  568. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  569. crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
  570. crw1->anc, crw1->erc, crw1->rsid);
  571. init_subchannel_id(&mchk_schid);
  572. mchk_schid.sch_no = crw0->rsid;
  573. if (crw1)
  574. mchk_schid.ssid = (crw1->rsid >> 4) & 3;
  575. if (crw0->erc == CRW_ERC_PMOD) {
  576. sch = get_subchannel_by_schid(mchk_schid);
  577. if (sch) {
  578. css_update_ssd_info(sch);
  579. put_device(&sch->dev);
  580. }
  581. }
  582. /*
  583. * Since we are always presented with IPI in the CRW, we have to
  584. * use stsch() to find out if the subchannel in question has come
  585. * or gone.
  586. */
  587. css_evaluate_subchannel(mchk_schid, 0);
  588. }
  589. static void __init
  590. css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
  591. {
  592. struct cpuid cpu_id;
  593. if (css_general_characteristics.mcss) {
  594. css->global_pgid.pgid_high.ext_cssid.version = 0x80;
  595. css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
  596. } else {
  597. #ifdef CONFIG_SMP
  598. css->global_pgid.pgid_high.cpu_addr = stap();
  599. #else
  600. css->global_pgid.pgid_high.cpu_addr = 0;
  601. #endif
  602. }
  603. get_cpu_id(&cpu_id);
  604. css->global_pgid.cpu_id = cpu_id.ident;
  605. css->global_pgid.cpu_model = cpu_id.machine;
  606. css->global_pgid.tod_high = tod_high;
  607. }
  608. static void
  609. channel_subsystem_release(struct device *dev)
  610. {
  611. struct channel_subsystem *css;
  612. css = to_css(dev);
  613. mutex_destroy(&css->mutex);
  614. if (css->pseudo_subchannel) {
  615. /* Implies that it has been generated but never registered. */
  616. css_subchannel_release(&css->pseudo_subchannel->dev);
  617. css->pseudo_subchannel = NULL;
  618. }
  619. kfree(css);
  620. }
  621. static ssize_t
  622. css_cm_enable_show(struct device *dev, struct device_attribute *attr,
  623. char *buf)
  624. {
  625. struct channel_subsystem *css = to_css(dev);
  626. int ret;
  627. if (!css)
  628. return 0;
  629. mutex_lock(&css->mutex);
  630. ret = sprintf(buf, "%x\n", css->cm_enabled);
  631. mutex_unlock(&css->mutex);
  632. return ret;
  633. }
  634. static ssize_t
  635. css_cm_enable_store(struct device *dev, struct device_attribute *attr,
  636. const char *buf, size_t count)
  637. {
  638. struct channel_subsystem *css = to_css(dev);
  639. int ret;
  640. unsigned long val;
  641. ret = strict_strtoul(buf, 16, &val);
  642. if (ret)
  643. return ret;
  644. mutex_lock(&css->mutex);
  645. switch (val) {
  646. case 0:
  647. ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
  648. break;
  649. case 1:
  650. ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
  651. break;
  652. default:
  653. ret = -EINVAL;
  654. }
  655. mutex_unlock(&css->mutex);
  656. return ret < 0 ? ret : count;
  657. }
  658. static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
  659. static int __init setup_css(int nr)
  660. {
  661. u32 tod_high;
  662. int ret;
  663. struct channel_subsystem *css;
  664. css = channel_subsystems[nr];
  665. memset(css, 0, sizeof(struct channel_subsystem));
  666. css->pseudo_subchannel =
  667. kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
  668. if (!css->pseudo_subchannel)
  669. return -ENOMEM;
  670. css->pseudo_subchannel->dev.parent = &css->device;
  671. css->pseudo_subchannel->dev.release = css_subchannel_release;
  672. dev_set_name(&css->pseudo_subchannel->dev, "defunct");
  673. mutex_init(&css->pseudo_subchannel->reg_mutex);
  674. ret = cio_create_sch_lock(css->pseudo_subchannel);
  675. if (ret) {
  676. kfree(css->pseudo_subchannel);
  677. return ret;
  678. }
  679. mutex_init(&css->mutex);
  680. css->valid = 1;
  681. css->cssid = nr;
  682. dev_set_name(&css->device, "css%x", nr);
  683. css->device.release = channel_subsystem_release;
  684. tod_high = (u32) (get_tod_clock() >> 32);
  685. css_generate_pgid(css, tod_high);
  686. return 0;
  687. }
  688. static int css_reboot_event(struct notifier_block *this,
  689. unsigned long event,
  690. void *ptr)
  691. {
  692. int ret, i;
  693. ret = NOTIFY_DONE;
  694. for (i = 0; i <= __MAX_CSSID; i++) {
  695. struct channel_subsystem *css;
  696. css = channel_subsystems[i];
  697. mutex_lock(&css->mutex);
  698. if (css->cm_enabled)
  699. if (chsc_secm(css, 0))
  700. ret = NOTIFY_BAD;
  701. mutex_unlock(&css->mutex);
  702. }
  703. return ret;
  704. }
  705. static struct notifier_block css_reboot_notifier = {
  706. .notifier_call = css_reboot_event,
  707. };
  708. /*
  709. * Since the css devices are neither on a bus nor have a class
  710. * nor have a special device type, we cannot stop/restart channel
  711. * path measurements via the normal suspend/resume callbacks, but have
  712. * to use notifiers.
  713. */
  714. static int css_power_event(struct notifier_block *this, unsigned long event,
  715. void *ptr)
  716. {
  717. int ret, i;
  718. switch (event) {
  719. case PM_HIBERNATION_PREPARE:
  720. case PM_SUSPEND_PREPARE:
  721. ret = NOTIFY_DONE;
  722. for (i = 0; i <= __MAX_CSSID; i++) {
  723. struct channel_subsystem *css;
  724. css = channel_subsystems[i];
  725. mutex_lock(&css->mutex);
  726. if (!css->cm_enabled) {
  727. mutex_unlock(&css->mutex);
  728. continue;
  729. }
  730. ret = __chsc_do_secm(css, 0);
  731. ret = notifier_from_errno(ret);
  732. mutex_unlock(&css->mutex);
  733. }
  734. break;
  735. case PM_POST_HIBERNATION:
  736. case PM_POST_SUSPEND:
  737. ret = NOTIFY_DONE;
  738. for (i = 0; i <= __MAX_CSSID; i++) {
  739. struct channel_subsystem *css;
  740. css = channel_subsystems[i];
  741. mutex_lock(&css->mutex);
  742. if (!css->cm_enabled) {
  743. mutex_unlock(&css->mutex);
  744. continue;
  745. }
  746. ret = __chsc_do_secm(css, 1);
  747. ret = notifier_from_errno(ret);
  748. mutex_unlock(&css->mutex);
  749. }
  750. /* search for subchannels, which appeared during hibernation */
  751. css_schedule_reprobe();
  752. break;
  753. default:
  754. ret = NOTIFY_DONE;
  755. }
  756. return ret;
  757. }
  758. static struct notifier_block css_power_notifier = {
  759. .notifier_call = css_power_event,
  760. };
  761. /*
  762. * Now that the driver core is running, we can setup our channel subsystem.
  763. * The struct subchannel's are created during probing (except for the
  764. * static console subchannel).
  765. */
  766. static int __init css_bus_init(void)
  767. {
  768. int ret, i;
  769. ret = chsc_init();
  770. if (ret)
  771. return ret;
  772. chsc_determine_css_characteristics();
  773. /* Try to enable MSS. */
  774. ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
  775. if (ret)
  776. max_ssid = 0;
  777. else /* Success. */
  778. max_ssid = __MAX_SSID;
  779. ret = slow_subchannel_init();
  780. if (ret)
  781. goto out;
  782. ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
  783. if (ret)
  784. goto out;
  785. if ((ret = bus_register(&css_bus_type)))
  786. goto out;
  787. /* Setup css structure. */
  788. for (i = 0; i <= __MAX_CSSID; i++) {
  789. struct channel_subsystem *css;
  790. css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
  791. if (!css) {
  792. ret = -ENOMEM;
  793. goto out_unregister;
  794. }
  795. channel_subsystems[i] = css;
  796. ret = setup_css(i);
  797. if (ret) {
  798. kfree(channel_subsystems[i]);
  799. goto out_unregister;
  800. }
  801. ret = device_register(&css->device);
  802. if (ret) {
  803. put_device(&css->device);
  804. goto out_unregister;
  805. }
  806. if (css_chsc_characteristics.secm) {
  807. ret = device_create_file(&css->device,
  808. &dev_attr_cm_enable);
  809. if (ret)
  810. goto out_device;
  811. }
  812. ret = device_register(&css->pseudo_subchannel->dev);
  813. if (ret) {
  814. put_device(&css->pseudo_subchannel->dev);
  815. goto out_file;
  816. }
  817. }
  818. ret = register_reboot_notifier(&css_reboot_notifier);
  819. if (ret)
  820. goto out_unregister;
  821. ret = register_pm_notifier(&css_power_notifier);
  822. if (ret) {
  823. unregister_reboot_notifier(&css_reboot_notifier);
  824. goto out_unregister;
  825. }
  826. css_init_done = 1;
  827. /* Enable default isc for I/O subchannels. */
  828. isc_register(IO_SCH_ISC);
  829. return 0;
  830. out_file:
  831. if (css_chsc_characteristics.secm)
  832. device_remove_file(&channel_subsystems[i]->device,
  833. &dev_attr_cm_enable);
  834. out_device:
  835. device_unregister(&channel_subsystems[i]->device);
  836. out_unregister:
  837. while (i > 0) {
  838. struct channel_subsystem *css;
  839. i--;
  840. css = channel_subsystems[i];
  841. device_unregister(&css->pseudo_subchannel->dev);
  842. css->pseudo_subchannel = NULL;
  843. if (css_chsc_characteristics.secm)
  844. device_remove_file(&css->device,
  845. &dev_attr_cm_enable);
  846. device_unregister(&css->device);
  847. }
  848. bus_unregister(&css_bus_type);
  849. out:
  850. crw_unregister_handler(CRW_RSC_SCH);
  851. idset_free(slow_subchannel_set);
  852. chsc_init_cleanup();
  853. pr_alert("The CSS device driver initialization failed with "
  854. "errno=%d\n", ret);
  855. return ret;
  856. }
  857. static void __init css_bus_cleanup(void)
  858. {
  859. struct channel_subsystem *css;
  860. int i;
  861. for (i = 0; i <= __MAX_CSSID; i++) {
  862. css = channel_subsystems[i];
  863. device_unregister(&css->pseudo_subchannel->dev);
  864. css->pseudo_subchannel = NULL;
  865. if (css_chsc_characteristics.secm)
  866. device_remove_file(&css->device, &dev_attr_cm_enable);
  867. device_unregister(&css->device);
  868. }
  869. bus_unregister(&css_bus_type);
  870. crw_unregister_handler(CRW_RSC_SCH);
  871. idset_free(slow_subchannel_set);
  872. chsc_init_cleanup();
  873. isc_unregister(IO_SCH_ISC);
  874. }
  875. static int __init channel_subsystem_init(void)
  876. {
  877. int ret;
  878. ret = css_bus_init();
  879. if (ret)
  880. return ret;
  881. cio_work_q = create_singlethread_workqueue("cio");
  882. if (!cio_work_q) {
  883. ret = -ENOMEM;
  884. goto out_bus;
  885. }
  886. ret = io_subchannel_init();
  887. if (ret)
  888. goto out_wq;
  889. return ret;
  890. out_wq:
  891. destroy_workqueue(cio_work_q);
  892. out_bus:
  893. css_bus_cleanup();
  894. return ret;
  895. }
  896. subsys_initcall(channel_subsystem_init);
  897. static int css_settle(struct device_driver *drv, void *unused)
  898. {
  899. struct css_driver *cssdrv = to_cssdriver(drv);
  900. if (cssdrv->settle)
  901. return cssdrv->settle();
  902. return 0;
  903. }
  904. int css_complete_work(void)
  905. {
  906. int ret;
  907. /* Wait for the evaluation of subchannels to finish. */
  908. ret = wait_event_interruptible(css_eval_wq,
  909. atomic_read(&css_eval_scheduled) == 0);
  910. if (ret)
  911. return -EINTR;
  912. flush_workqueue(cio_work_q);
  913. /* Wait for the subchannel type specific initialization to finish */
  914. return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
  915. }
  916. /*
  917. * Wait for the initialization of devices to finish, to make sure we are
  918. * done with our setup if the search for the root device starts.
  919. */
  920. static int __init channel_subsystem_init_sync(void)
  921. {
  922. /* Start initial subchannel evaluation. */
  923. css_schedule_eval_all();
  924. css_complete_work();
  925. return 0;
  926. }
  927. subsys_initcall_sync(channel_subsystem_init_sync);
  928. void channel_subsystem_reinit(void)
  929. {
  930. struct channel_path *chp;
  931. struct chp_id chpid;
  932. chsc_enable_facility(CHSC_SDA_OC_MSS);
  933. chp_id_for_each(&chpid) {
  934. chp = chpid_to_chp(chpid);
  935. if (chp)
  936. chp_update_desc(chp);
  937. }
  938. }
  939. #ifdef CONFIG_PROC_FS
  940. static ssize_t cio_settle_write(struct file *file, const char __user *buf,
  941. size_t count, loff_t *ppos)
  942. {
  943. int ret;
  944. /* Handle pending CRW's. */
  945. crw_wait_for_channel_report();
  946. ret = css_complete_work();
  947. return ret ? ret : count;
  948. }
  949. static const struct file_operations cio_settle_proc_fops = {
  950. .open = nonseekable_open,
  951. .write = cio_settle_write,
  952. .llseek = no_llseek,
  953. };
  954. static int __init cio_settle_init(void)
  955. {
  956. struct proc_dir_entry *entry;
  957. entry = proc_create("cio_settle", S_IWUSR, NULL,
  958. &cio_settle_proc_fops);
  959. if (!entry)
  960. return -ENOMEM;
  961. return 0;
  962. }
  963. device_initcall(cio_settle_init);
  964. #endif /*CONFIG_PROC_FS*/
  965. int sch_is_pseudo_sch(struct subchannel *sch)
  966. {
  967. return sch == to_css(sch->dev.parent)->pseudo_subchannel;
  968. }
  969. static int css_bus_match(struct device *dev, struct device_driver *drv)
  970. {
  971. struct subchannel *sch = to_subchannel(dev);
  972. struct css_driver *driver = to_cssdriver(drv);
  973. struct css_device_id *id;
  974. for (id = driver->subchannel_type; id->match_flags; id++) {
  975. if (sch->st == id->type)
  976. return 1;
  977. }
  978. return 0;
  979. }
  980. static int css_probe(struct device *dev)
  981. {
  982. struct subchannel *sch;
  983. int ret;
  984. sch = to_subchannel(dev);
  985. sch->driver = to_cssdriver(dev->driver);
  986. ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
  987. if (ret)
  988. sch->driver = NULL;
  989. return ret;
  990. }
  991. static int css_remove(struct device *dev)
  992. {
  993. struct subchannel *sch;
  994. int ret;
  995. sch = to_subchannel(dev);
  996. ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
  997. sch->driver = NULL;
  998. return ret;
  999. }
  1000. static void css_shutdown(struct device *dev)
  1001. {
  1002. struct subchannel *sch;
  1003. sch = to_subchannel(dev);
  1004. if (sch->driver && sch->driver->shutdown)
  1005. sch->driver->shutdown(sch);
  1006. }
  1007. static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
  1008. {
  1009. struct subchannel *sch = to_subchannel(dev);
  1010. int ret;
  1011. ret = add_uevent_var(env, "ST=%01X", sch->st);
  1012. if (ret)
  1013. return ret;
  1014. ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
  1015. return ret;
  1016. }
  1017. static int css_pm_prepare(struct device *dev)
  1018. {
  1019. struct subchannel *sch = to_subchannel(dev);
  1020. struct css_driver *drv;
  1021. if (mutex_is_locked(&sch->reg_mutex))
  1022. return -EAGAIN;
  1023. if (!sch->dev.driver)
  1024. return 0;
  1025. drv = to_cssdriver(sch->dev.driver);
  1026. /* Notify drivers that they may not register children. */
  1027. return drv->prepare ? drv->prepare(sch) : 0;
  1028. }
  1029. static void css_pm_complete(struct device *dev)
  1030. {
  1031. struct subchannel *sch = to_subchannel(dev);
  1032. struct css_driver *drv;
  1033. if (!sch->dev.driver)
  1034. return;
  1035. drv = to_cssdriver(sch->dev.driver);
  1036. if (drv->complete)
  1037. drv->complete(sch);
  1038. }
  1039. static int css_pm_freeze(struct device *dev)
  1040. {
  1041. struct subchannel *sch = to_subchannel(dev);
  1042. struct css_driver *drv;
  1043. if (!sch->dev.driver)
  1044. return 0;
  1045. drv = to_cssdriver(sch->dev.driver);
  1046. return drv->freeze ? drv->freeze(sch) : 0;
  1047. }
  1048. static int css_pm_thaw(struct device *dev)
  1049. {
  1050. struct subchannel *sch = to_subchannel(dev);
  1051. struct css_driver *drv;
  1052. if (!sch->dev.driver)
  1053. return 0;
  1054. drv = to_cssdriver(sch->dev.driver);
  1055. return drv->thaw ? drv->thaw(sch) : 0;
  1056. }
  1057. static int css_pm_restore(struct device *dev)
  1058. {
  1059. struct subchannel *sch = to_subchannel(dev);
  1060. struct css_driver *drv;
  1061. css_update_ssd_info(sch);
  1062. if (!sch->dev.driver)
  1063. return 0;
  1064. drv = to_cssdriver(sch->dev.driver);
  1065. return drv->restore ? drv->restore(sch) : 0;
  1066. }
  1067. static const struct dev_pm_ops css_pm_ops = {
  1068. .prepare = css_pm_prepare,
  1069. .complete = css_pm_complete,
  1070. .freeze = css_pm_freeze,
  1071. .thaw = css_pm_thaw,
  1072. .restore = css_pm_restore,
  1073. };
  1074. static struct bus_type css_bus_type = {
  1075. .name = "css",
  1076. .match = css_bus_match,
  1077. .probe = css_probe,
  1078. .remove = css_remove,
  1079. .shutdown = css_shutdown,
  1080. .uevent = css_uevent,
  1081. .pm = &css_pm_ops,
  1082. };
  1083. /**
  1084. * css_driver_register - register a css driver
  1085. * @cdrv: css driver to register
  1086. *
  1087. * This is mainly a wrapper around driver_register that sets name
  1088. * and bus_type in the embedded struct device_driver correctly.
  1089. */
  1090. int css_driver_register(struct css_driver *cdrv)
  1091. {
  1092. cdrv->drv.bus = &css_bus_type;
  1093. return driver_register(&cdrv->drv);
  1094. }
  1095. EXPORT_SYMBOL_GPL(css_driver_register);
  1096. /**
  1097. * css_driver_unregister - unregister a css driver
  1098. * @cdrv: css driver to unregister
  1099. *
  1100. * This is a wrapper around driver_unregister.
  1101. */
  1102. void css_driver_unregister(struct css_driver *cdrv)
  1103. {
  1104. driver_unregister(&cdrv->drv);
  1105. }
  1106. EXPORT_SYMBOL_GPL(css_driver_unregister);
  1107. MODULE_LICENSE("GPL");