chsc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * S/390 common I/O routines -- channel subsystem call
  3. *
  4. * Copyright IBM Corp. 1999,2012
  5. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  6. * Cornelia Huck (cornelia.huck@de.ibm.com)
  7. * Arnd Bergmann (arndb@de.ibm.com)
  8. */
  9. #define KMSG_COMPONENT "cio"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <linux/pci.h>
  16. #include <asm/cio.h>
  17. #include <asm/chpid.h>
  18. #include <asm/chsc.h>
  19. #include <asm/crw.h>
  20. #include "css.h"
  21. #include "cio.h"
  22. #include "cio_debug.h"
  23. #include "ioasm.h"
  24. #include "chp.h"
  25. #include "chsc.h"
  26. static void *sei_page;
  27. static void *chsc_page;
  28. static DEFINE_SPINLOCK(chsc_page_lock);
  29. /**
  30. * chsc_error_from_response() - convert a chsc response to an error
  31. * @response: chsc response code
  32. *
  33. * Returns an appropriate Linux error code for @response.
  34. */
  35. int chsc_error_from_response(int response)
  36. {
  37. switch (response) {
  38. case 0x0001:
  39. return 0;
  40. case 0x0002:
  41. case 0x0003:
  42. case 0x0006:
  43. case 0x0007:
  44. case 0x0008:
  45. case 0x000a:
  46. case 0x0104:
  47. return -EINVAL;
  48. case 0x0004:
  49. return -EOPNOTSUPP;
  50. case 0x000b:
  51. return -EBUSY;
  52. case 0x0100:
  53. case 0x0102:
  54. return -ENOMEM;
  55. default:
  56. return -EIO;
  57. }
  58. }
  59. EXPORT_SYMBOL_GPL(chsc_error_from_response);
  60. struct chsc_ssd_area {
  61. struct chsc_header request;
  62. u16 :10;
  63. u16 ssid:2;
  64. u16 :4;
  65. u16 f_sch; /* first subchannel */
  66. u16 :16;
  67. u16 l_sch; /* last subchannel */
  68. u32 :32;
  69. struct chsc_header response;
  70. u32 :32;
  71. u8 sch_valid : 1;
  72. u8 dev_valid : 1;
  73. u8 st : 3; /* subchannel type */
  74. u8 zeroes : 3;
  75. u8 unit_addr; /* unit address */
  76. u16 devno; /* device number */
  77. u8 path_mask;
  78. u8 fla_valid_mask;
  79. u16 sch; /* subchannel */
  80. u8 chpid[8]; /* chpids 0-7 */
  81. u16 fla[8]; /* full link addresses 0-7 */
  82. } __attribute__ ((packed));
  83. int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
  84. {
  85. struct chsc_ssd_area *ssd_area;
  86. int ccode;
  87. int ret;
  88. int i;
  89. int mask;
  90. spin_lock_irq(&chsc_page_lock);
  91. memset(chsc_page, 0, PAGE_SIZE);
  92. ssd_area = chsc_page;
  93. ssd_area->request.length = 0x0010;
  94. ssd_area->request.code = 0x0004;
  95. ssd_area->ssid = schid.ssid;
  96. ssd_area->f_sch = schid.sch_no;
  97. ssd_area->l_sch = schid.sch_no;
  98. ccode = chsc(ssd_area);
  99. /* Check response. */
  100. if (ccode > 0) {
  101. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  102. goto out;
  103. }
  104. ret = chsc_error_from_response(ssd_area->response.code);
  105. if (ret != 0) {
  106. CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
  107. schid.ssid, schid.sch_no,
  108. ssd_area->response.code);
  109. goto out;
  110. }
  111. if (!ssd_area->sch_valid) {
  112. ret = -ENODEV;
  113. goto out;
  114. }
  115. /* Copy data */
  116. ret = 0;
  117. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  118. if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
  119. (ssd_area->st != SUBCHANNEL_TYPE_MSG))
  120. goto out;
  121. ssd->path_mask = ssd_area->path_mask;
  122. ssd->fla_valid_mask = ssd_area->fla_valid_mask;
  123. for (i = 0; i < 8; i++) {
  124. mask = 0x80 >> i;
  125. if (ssd_area->path_mask & mask) {
  126. chp_id_init(&ssd->chpid[i]);
  127. ssd->chpid[i].id = ssd_area->chpid[i];
  128. }
  129. if (ssd_area->fla_valid_mask & mask)
  130. ssd->fla[i] = ssd_area->fla[i];
  131. }
  132. out:
  133. spin_unlock_irq(&chsc_page_lock);
  134. return ret;
  135. }
  136. static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
  137. {
  138. spin_lock_irq(sch->lock);
  139. if (sch->driver && sch->driver->chp_event)
  140. if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
  141. goto out_unreg;
  142. spin_unlock_irq(sch->lock);
  143. return 0;
  144. out_unreg:
  145. sch->lpm = 0;
  146. spin_unlock_irq(sch->lock);
  147. css_schedule_eval(sch->schid);
  148. return 0;
  149. }
  150. void chsc_chp_offline(struct chp_id chpid)
  151. {
  152. char dbf_txt[15];
  153. struct chp_link link;
  154. sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
  155. CIO_TRACE_EVENT(2, dbf_txt);
  156. if (chp_get_status(chpid) <= 0)
  157. return;
  158. memset(&link, 0, sizeof(struct chp_link));
  159. link.chpid = chpid;
  160. /* Wait until previous actions have settled. */
  161. css_wait_for_slow_path();
  162. for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
  163. }
  164. static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
  165. {
  166. struct schib schib;
  167. /*
  168. * We don't know the device yet, but since a path
  169. * may be available now to the device we'll have
  170. * to do recognition again.
  171. * Since we don't have any idea about which chpid
  172. * that beast may be on we'll have to do a stsch
  173. * on all devices, grr...
  174. */
  175. if (stsch_err(schid, &schib))
  176. /* We're through */
  177. return -ENXIO;
  178. /* Put it on the slow path. */
  179. css_schedule_eval(schid);
  180. return 0;
  181. }
  182. static int __s390_process_res_acc(struct subchannel *sch, void *data)
  183. {
  184. spin_lock_irq(sch->lock);
  185. if (sch->driver && sch->driver->chp_event)
  186. sch->driver->chp_event(sch, data, CHP_ONLINE);
  187. spin_unlock_irq(sch->lock);
  188. return 0;
  189. }
  190. static void s390_process_res_acc(struct chp_link *link)
  191. {
  192. char dbf_txt[15];
  193. sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
  194. link->chpid.id);
  195. CIO_TRACE_EVENT( 2, dbf_txt);
  196. if (link->fla != 0) {
  197. sprintf(dbf_txt, "fla%x", link->fla);
  198. CIO_TRACE_EVENT( 2, dbf_txt);
  199. }
  200. /* Wait until previous actions have settled. */
  201. css_wait_for_slow_path();
  202. /*
  203. * I/O resources may have become accessible.
  204. * Scan through all subchannels that may be concerned and
  205. * do a validation on those.
  206. * The more information we have (info), the less scanning
  207. * will we have to do.
  208. */
  209. for_each_subchannel_staged(__s390_process_res_acc,
  210. s390_process_res_acc_new_sch, link);
  211. }
  212. static int
  213. __get_chpid_from_lir(void *data)
  214. {
  215. struct lir {
  216. u8 iq;
  217. u8 ic;
  218. u16 sci;
  219. /* incident-node descriptor */
  220. u32 indesc[28];
  221. /* attached-node descriptor */
  222. u32 andesc[28];
  223. /* incident-specific information */
  224. u32 isinfo[28];
  225. } __attribute__ ((packed)) *lir;
  226. lir = data;
  227. if (!(lir->iq&0x80))
  228. /* NULL link incident record */
  229. return -EINVAL;
  230. if (!(lir->indesc[0]&0xc0000000))
  231. /* node descriptor not valid */
  232. return -EINVAL;
  233. if (!(lir->indesc[0]&0x10000000))
  234. /* don't handle device-type nodes - FIXME */
  235. return -EINVAL;
  236. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  237. return (u16) (lir->indesc[0]&0x000000ff);
  238. }
  239. struct chsc_sei_nt0_area {
  240. u8 flags;
  241. u8 vf; /* validity flags */
  242. u8 rs; /* reporting source */
  243. u8 cc; /* content code */
  244. u16 fla; /* full link address */
  245. u16 rsid; /* reporting source id */
  246. u32 reserved1;
  247. u32 reserved2;
  248. /* ccdf has to be big enough for a link-incident record */
  249. u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
  250. } __packed;
  251. struct chsc_sei_nt2_area {
  252. u8 flags; /* p and v bit */
  253. u8 reserved1;
  254. u8 reserved2;
  255. u8 cc; /* content code */
  256. u32 reserved3[13];
  257. u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
  258. } __packed;
  259. #define CHSC_SEI_NT0 (1ULL << 63)
  260. #define CHSC_SEI_NT2 (1ULL << 61)
  261. struct chsc_sei {
  262. struct chsc_header request;
  263. u32 reserved1;
  264. u64 ntsm; /* notification type mask */
  265. struct chsc_header response;
  266. u32 :24;
  267. u8 nt;
  268. union {
  269. struct chsc_sei_nt0_area nt0_area;
  270. struct chsc_sei_nt2_area nt2_area;
  271. u8 nt_area[PAGE_SIZE - 24];
  272. } u;
  273. } __packed;
  274. static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
  275. {
  276. struct chp_id chpid;
  277. int id;
  278. CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
  279. sei_area->rs, sei_area->rsid);
  280. if (sei_area->rs != 4)
  281. return;
  282. id = __get_chpid_from_lir(sei_area->ccdf);
  283. if (id < 0)
  284. CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
  285. else {
  286. chp_id_init(&chpid);
  287. chpid.id = id;
  288. chsc_chp_offline(chpid);
  289. }
  290. }
  291. static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
  292. {
  293. struct chp_link link;
  294. struct chp_id chpid;
  295. int status;
  296. CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
  297. "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
  298. if (sei_area->rs != 4)
  299. return;
  300. chp_id_init(&chpid);
  301. chpid.id = sei_area->rsid;
  302. /* allocate a new channel path structure, if needed */
  303. status = chp_get_status(chpid);
  304. if (status < 0)
  305. chp_new(chpid);
  306. else if (!status)
  307. return;
  308. memset(&link, 0, sizeof(struct chp_link));
  309. link.chpid = chpid;
  310. if ((sei_area->vf & 0xc0) != 0) {
  311. link.fla = sei_area->fla;
  312. if ((sei_area->vf & 0xc0) == 0xc0)
  313. /* full link address */
  314. link.fla_mask = 0xffff;
  315. else
  316. /* link address */
  317. link.fla_mask = 0xff00;
  318. }
  319. s390_process_res_acc(&link);
  320. }
  321. static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
  322. {
  323. struct channel_path *chp;
  324. struct chp_id chpid;
  325. u8 *data;
  326. int num;
  327. CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
  328. if (sei_area->rs != 0)
  329. return;
  330. data = sei_area->ccdf;
  331. chp_id_init(&chpid);
  332. for (num = 0; num <= __MAX_CHPID; num++) {
  333. if (!chp_test_bit(data, num))
  334. continue;
  335. chpid.id = num;
  336. CIO_CRW_EVENT(4, "Update information for channel path "
  337. "%x.%02x\n", chpid.cssid, chpid.id);
  338. chp = chpid_to_chp(chpid);
  339. if (!chp) {
  340. chp_new(chpid);
  341. continue;
  342. }
  343. mutex_lock(&chp->lock);
  344. chp_update_desc(chp);
  345. mutex_unlock(&chp->lock);
  346. }
  347. }
  348. struct chp_config_data {
  349. u8 map[32];
  350. u8 op;
  351. u8 pc;
  352. };
  353. static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
  354. {
  355. struct chp_config_data *data;
  356. struct chp_id chpid;
  357. int num;
  358. char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
  359. CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
  360. if (sei_area->rs != 0)
  361. return;
  362. data = (struct chp_config_data *) &(sei_area->ccdf);
  363. chp_id_init(&chpid);
  364. for (num = 0; num <= __MAX_CHPID; num++) {
  365. if (!chp_test_bit(data->map, num))
  366. continue;
  367. chpid.id = num;
  368. pr_notice("Processing %s for channel path %x.%02x\n",
  369. events[data->op], chpid.cssid, chpid.id);
  370. switch (data->op) {
  371. case 0:
  372. chp_cfg_schedule(chpid, 1);
  373. break;
  374. case 1:
  375. chp_cfg_schedule(chpid, 0);
  376. break;
  377. case 2:
  378. chp_cfg_cancel_deconfigure(chpid);
  379. break;
  380. }
  381. }
  382. }
  383. static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
  384. {
  385. int ret;
  386. CIO_CRW_EVENT(4, "chsc: scm change notification\n");
  387. if (sei_area->rs != 7)
  388. return;
  389. ret = scm_update_information();
  390. if (ret)
  391. CIO_CRW_EVENT(0, "chsc: updating change notification"
  392. " failed (rc=%d).\n", ret);
  393. }
  394. static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
  395. {
  396. int ret;
  397. CIO_CRW_EVENT(4, "chsc: scm available information\n");
  398. if (sei_area->rs != 7)
  399. return;
  400. ret = scm_process_availability_information();
  401. if (ret)
  402. CIO_CRW_EVENT(0, "chsc: process availability information"
  403. " failed (rc=%d).\n", ret);
  404. }
  405. static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
  406. {
  407. switch (sei_area->cc) {
  408. case 1:
  409. zpci_event_error(sei_area->ccdf);
  410. break;
  411. case 2:
  412. zpci_event_availability(sei_area->ccdf);
  413. break;
  414. default:
  415. CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
  416. sei_area->cc);
  417. break;
  418. }
  419. }
  420. static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
  421. {
  422. /* which kind of information was stored? */
  423. switch (sei_area->cc) {
  424. case 1: /* link incident*/
  425. chsc_process_sei_link_incident(sei_area);
  426. break;
  427. case 2: /* i/o resource accessibility */
  428. chsc_process_sei_res_acc(sei_area);
  429. break;
  430. case 7: /* channel-path-availability information */
  431. chsc_process_sei_chp_avail(sei_area);
  432. break;
  433. case 8: /* channel-path-configuration notification */
  434. chsc_process_sei_chp_config(sei_area);
  435. break;
  436. case 12: /* scm change notification */
  437. chsc_process_sei_scm_change(sei_area);
  438. break;
  439. case 14: /* scm available notification */
  440. chsc_process_sei_scm_avail(sei_area);
  441. break;
  442. default: /* other stuff */
  443. CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
  444. sei_area->cc);
  445. break;
  446. }
  447. /* Check if we might have lost some information. */
  448. if (sei_area->flags & 0x40) {
  449. CIO_CRW_EVENT(2, "chsc: event overflow\n");
  450. css_schedule_eval_all();
  451. }
  452. }
  453. static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
  454. {
  455. do {
  456. memset(sei, 0, sizeof(*sei));
  457. sei->request.length = 0x0010;
  458. sei->request.code = 0x000e;
  459. sei->ntsm = ntsm;
  460. if (chsc(sei))
  461. break;
  462. if (sei->response.code != 0x0001) {
  463. CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
  464. sei->response.code);
  465. break;
  466. }
  467. CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
  468. switch (sei->nt) {
  469. case 0:
  470. chsc_process_sei_nt0(&sei->u.nt0_area);
  471. break;
  472. case 2:
  473. chsc_process_sei_nt2(&sei->u.nt2_area);
  474. break;
  475. default:
  476. CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
  477. break;
  478. }
  479. } while (sei->u.nt0_area.flags & 0x80);
  480. }
  481. /*
  482. * Handle channel subsystem related CRWs.
  483. * Use store event information to find out what's going on.
  484. *
  485. * Note: Access to sei_page is serialized through machine check handler
  486. * thread, so no need for locking.
  487. */
  488. static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
  489. {
  490. struct chsc_sei *sei = sei_page;
  491. if (overflow) {
  492. css_schedule_eval_all();
  493. return;
  494. }
  495. CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
  496. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  497. crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
  498. crw0->erc, crw0->rsid);
  499. CIO_TRACE_EVENT(2, "prcss");
  500. chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
  501. }
  502. void chsc_chp_online(struct chp_id chpid)
  503. {
  504. char dbf_txt[15];
  505. struct chp_link link;
  506. sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
  507. CIO_TRACE_EVENT(2, dbf_txt);
  508. if (chp_get_status(chpid) != 0) {
  509. memset(&link, 0, sizeof(struct chp_link));
  510. link.chpid = chpid;
  511. /* Wait until previous actions have settled. */
  512. css_wait_for_slow_path();
  513. for_each_subchannel_staged(__s390_process_res_acc, NULL,
  514. &link);
  515. }
  516. }
  517. static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  518. struct chp_id chpid, int on)
  519. {
  520. unsigned long flags;
  521. struct chp_link link;
  522. memset(&link, 0, sizeof(struct chp_link));
  523. link.chpid = chpid;
  524. spin_lock_irqsave(sch->lock, flags);
  525. if (sch->driver && sch->driver->chp_event)
  526. sch->driver->chp_event(sch, &link,
  527. on ? CHP_VARY_ON : CHP_VARY_OFF);
  528. spin_unlock_irqrestore(sch->lock, flags);
  529. }
  530. static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
  531. {
  532. struct chp_id *chpid = data;
  533. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  534. return 0;
  535. }
  536. static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
  537. {
  538. struct chp_id *chpid = data;
  539. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  540. return 0;
  541. }
  542. static int
  543. __s390_vary_chpid_on(struct subchannel_id schid, void *data)
  544. {
  545. struct schib schib;
  546. if (stsch_err(schid, &schib))
  547. /* We're through */
  548. return -ENXIO;
  549. /* Put it on the slow path. */
  550. css_schedule_eval(schid);
  551. return 0;
  552. }
  553. /**
  554. * chsc_chp_vary - propagate channel-path vary operation to subchannels
  555. * @chpid: channl-path ID
  556. * @on: non-zero for vary online, zero for vary offline
  557. */
  558. int chsc_chp_vary(struct chp_id chpid, int on)
  559. {
  560. struct channel_path *chp = chpid_to_chp(chpid);
  561. /* Wait until previous actions have settled. */
  562. css_wait_for_slow_path();
  563. /*
  564. * Redo PathVerification on the devices the chpid connects to
  565. */
  566. if (on) {
  567. /* Try to update the channel path description. */
  568. chp_update_desc(chp);
  569. for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
  570. __s390_vary_chpid_on, &chpid);
  571. } else
  572. for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
  573. NULL, &chpid);
  574. return 0;
  575. }
  576. static void
  577. chsc_remove_cmg_attr(struct channel_subsystem *css)
  578. {
  579. int i;
  580. for (i = 0; i <= __MAX_CHPID; i++) {
  581. if (!css->chps[i])
  582. continue;
  583. chp_remove_cmg_attr(css->chps[i]);
  584. }
  585. }
  586. static int
  587. chsc_add_cmg_attr(struct channel_subsystem *css)
  588. {
  589. int i, ret;
  590. ret = 0;
  591. for (i = 0; i <= __MAX_CHPID; i++) {
  592. if (!css->chps[i])
  593. continue;
  594. ret = chp_add_cmg_attr(css->chps[i]);
  595. if (ret)
  596. goto cleanup;
  597. }
  598. return ret;
  599. cleanup:
  600. for (--i; i >= 0; i--) {
  601. if (!css->chps[i])
  602. continue;
  603. chp_remove_cmg_attr(css->chps[i]);
  604. }
  605. return ret;
  606. }
  607. int __chsc_do_secm(struct channel_subsystem *css, int enable)
  608. {
  609. struct {
  610. struct chsc_header request;
  611. u32 operation_code : 2;
  612. u32 : 30;
  613. u32 key : 4;
  614. u32 : 28;
  615. u32 zeroes1;
  616. u32 cub_addr1;
  617. u32 zeroes2;
  618. u32 cub_addr2;
  619. u32 reserved[13];
  620. struct chsc_header response;
  621. u32 status : 8;
  622. u32 : 4;
  623. u32 fmt : 4;
  624. u32 : 16;
  625. } __attribute__ ((packed)) *secm_area;
  626. int ret, ccode;
  627. spin_lock_irq(&chsc_page_lock);
  628. memset(chsc_page, 0, PAGE_SIZE);
  629. secm_area = chsc_page;
  630. secm_area->request.length = 0x0050;
  631. secm_area->request.code = 0x0016;
  632. secm_area->key = PAGE_DEFAULT_KEY >> 4;
  633. secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
  634. secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
  635. secm_area->operation_code = enable ? 0 : 1;
  636. ccode = chsc(secm_area);
  637. if (ccode > 0) {
  638. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  639. goto out;
  640. }
  641. switch (secm_area->response.code) {
  642. case 0x0102:
  643. case 0x0103:
  644. ret = -EINVAL;
  645. break;
  646. default:
  647. ret = chsc_error_from_response(secm_area->response.code);
  648. }
  649. if (ret != 0)
  650. CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
  651. secm_area->response.code);
  652. out:
  653. spin_unlock_irq(&chsc_page_lock);
  654. return ret;
  655. }
  656. int
  657. chsc_secm(struct channel_subsystem *css, int enable)
  658. {
  659. int ret;
  660. if (enable && !css->cm_enabled) {
  661. css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  662. css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  663. if (!css->cub_addr1 || !css->cub_addr2) {
  664. free_page((unsigned long)css->cub_addr1);
  665. free_page((unsigned long)css->cub_addr2);
  666. return -ENOMEM;
  667. }
  668. }
  669. ret = __chsc_do_secm(css, enable);
  670. if (!ret) {
  671. css->cm_enabled = enable;
  672. if (css->cm_enabled) {
  673. ret = chsc_add_cmg_attr(css);
  674. if (ret) {
  675. __chsc_do_secm(css, 0);
  676. css->cm_enabled = 0;
  677. }
  678. } else
  679. chsc_remove_cmg_attr(css);
  680. }
  681. if (!css->cm_enabled) {
  682. free_page((unsigned long)css->cub_addr1);
  683. free_page((unsigned long)css->cub_addr2);
  684. }
  685. return ret;
  686. }
  687. int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
  688. int c, int m, void *page)
  689. {
  690. struct chsc_scpd *scpd_area;
  691. int ccode, ret;
  692. if ((rfmt == 1) && !css_general_characteristics.fcs)
  693. return -EINVAL;
  694. if ((rfmt == 2) && !css_general_characteristics.cib)
  695. return -EINVAL;
  696. memset(page, 0, PAGE_SIZE);
  697. scpd_area = page;
  698. scpd_area->request.length = 0x0010;
  699. scpd_area->request.code = 0x0002;
  700. scpd_area->cssid = chpid.cssid;
  701. scpd_area->first_chpid = chpid.id;
  702. scpd_area->last_chpid = chpid.id;
  703. scpd_area->m = m;
  704. scpd_area->c = c;
  705. scpd_area->fmt = fmt;
  706. scpd_area->rfmt = rfmt;
  707. ccode = chsc(scpd_area);
  708. if (ccode > 0)
  709. return (ccode == 3) ? -ENODEV : -EBUSY;
  710. ret = chsc_error_from_response(scpd_area->response.code);
  711. if (ret)
  712. CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
  713. scpd_area->response.code);
  714. return ret;
  715. }
  716. EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
  717. int chsc_determine_base_channel_path_desc(struct chp_id chpid,
  718. struct channel_path_desc *desc)
  719. {
  720. struct chsc_response_struct *chsc_resp;
  721. struct chsc_scpd *scpd_area;
  722. unsigned long flags;
  723. int ret;
  724. spin_lock_irqsave(&chsc_page_lock, flags);
  725. scpd_area = chsc_page;
  726. ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
  727. if (ret)
  728. goto out;
  729. chsc_resp = (void *)&scpd_area->response;
  730. memcpy(desc, &chsc_resp->data, sizeof(*desc));
  731. out:
  732. spin_unlock_irqrestore(&chsc_page_lock, flags);
  733. return ret;
  734. }
  735. int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
  736. struct channel_path_desc_fmt1 *desc)
  737. {
  738. struct chsc_response_struct *chsc_resp;
  739. struct chsc_scpd *scpd_area;
  740. unsigned long flags;
  741. int ret;
  742. spin_lock_irqsave(&chsc_page_lock, flags);
  743. scpd_area = chsc_page;
  744. ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
  745. if (ret)
  746. goto out;
  747. chsc_resp = (void *)&scpd_area->response;
  748. memcpy(desc, &chsc_resp->data, sizeof(*desc));
  749. out:
  750. spin_unlock_irqrestore(&chsc_page_lock, flags);
  751. return ret;
  752. }
  753. static void
  754. chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
  755. struct cmg_chars *chars)
  756. {
  757. struct cmg_chars *cmg_chars;
  758. int i, mask;
  759. cmg_chars = chp->cmg_chars;
  760. for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
  761. mask = 0x80 >> (i + 3);
  762. if (cmcv & mask)
  763. cmg_chars->values[i] = chars->values[i];
  764. else
  765. cmg_chars->values[i] = 0;
  766. }
  767. }
  768. int chsc_get_channel_measurement_chars(struct channel_path *chp)
  769. {
  770. struct cmg_chars *cmg_chars;
  771. int ccode, ret;
  772. struct {
  773. struct chsc_header request;
  774. u32 : 24;
  775. u32 first_chpid : 8;
  776. u32 : 24;
  777. u32 last_chpid : 8;
  778. u32 zeroes1;
  779. struct chsc_header response;
  780. u32 zeroes2;
  781. u32 not_valid : 1;
  782. u32 shared : 1;
  783. u32 : 22;
  784. u32 chpid : 8;
  785. u32 cmcv : 5;
  786. u32 : 11;
  787. u32 cmgq : 8;
  788. u32 cmg : 8;
  789. u32 zeroes3;
  790. u32 data[NR_MEASUREMENT_CHARS];
  791. } __attribute__ ((packed)) *scmc_area;
  792. chp->cmg_chars = NULL;
  793. cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
  794. if (!cmg_chars)
  795. return -ENOMEM;
  796. spin_lock_irq(&chsc_page_lock);
  797. memset(chsc_page, 0, PAGE_SIZE);
  798. scmc_area = chsc_page;
  799. scmc_area->request.length = 0x0010;
  800. scmc_area->request.code = 0x0022;
  801. scmc_area->first_chpid = chp->chpid.id;
  802. scmc_area->last_chpid = chp->chpid.id;
  803. ccode = chsc(scmc_area);
  804. if (ccode > 0) {
  805. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  806. goto out;
  807. }
  808. ret = chsc_error_from_response(scmc_area->response.code);
  809. if (ret) {
  810. CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
  811. scmc_area->response.code);
  812. goto out;
  813. }
  814. if (scmc_area->not_valid) {
  815. chp->cmg = -1;
  816. chp->shared = -1;
  817. goto out;
  818. }
  819. chp->cmg = scmc_area->cmg;
  820. chp->shared = scmc_area->shared;
  821. if (chp->cmg != 2 && chp->cmg != 3) {
  822. /* No cmg-dependent data. */
  823. goto out;
  824. }
  825. chp->cmg_chars = cmg_chars;
  826. chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
  827. (struct cmg_chars *) &scmc_area->data);
  828. out:
  829. spin_unlock_irq(&chsc_page_lock);
  830. if (!chp->cmg_chars)
  831. kfree(cmg_chars);
  832. return ret;
  833. }
  834. int __init chsc_init(void)
  835. {
  836. int ret;
  837. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  838. chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  839. if (!sei_page || !chsc_page) {
  840. ret = -ENOMEM;
  841. goto out_err;
  842. }
  843. ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
  844. if (ret)
  845. goto out_err;
  846. return ret;
  847. out_err:
  848. free_page((unsigned long)chsc_page);
  849. free_page((unsigned long)sei_page);
  850. return ret;
  851. }
  852. void __init chsc_init_cleanup(void)
  853. {
  854. crw_unregister_handler(CRW_RSC_CSS);
  855. free_page((unsigned long)chsc_page);
  856. free_page((unsigned long)sei_page);
  857. }
  858. int chsc_enable_facility(int operation_code)
  859. {
  860. unsigned long flags;
  861. int ret;
  862. struct {
  863. struct chsc_header request;
  864. u8 reserved1:4;
  865. u8 format:4;
  866. u8 reserved2;
  867. u16 operation_code;
  868. u32 reserved3;
  869. u32 reserved4;
  870. u32 operation_data_area[252];
  871. struct chsc_header response;
  872. u32 reserved5:4;
  873. u32 format2:4;
  874. u32 reserved6:24;
  875. } __attribute__ ((packed)) *sda_area;
  876. spin_lock_irqsave(&chsc_page_lock, flags);
  877. memset(chsc_page, 0, PAGE_SIZE);
  878. sda_area = chsc_page;
  879. sda_area->request.length = 0x0400;
  880. sda_area->request.code = 0x0031;
  881. sda_area->operation_code = operation_code;
  882. ret = chsc(sda_area);
  883. if (ret > 0) {
  884. ret = (ret == 3) ? -ENODEV : -EBUSY;
  885. goto out;
  886. }
  887. switch (sda_area->response.code) {
  888. case 0x0101:
  889. ret = -EOPNOTSUPP;
  890. break;
  891. default:
  892. ret = chsc_error_from_response(sda_area->response.code);
  893. }
  894. if (ret != 0)
  895. CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
  896. operation_code, sda_area->response.code);
  897. out:
  898. spin_unlock_irqrestore(&chsc_page_lock, flags);
  899. return ret;
  900. }
  901. struct css_general_char css_general_characteristics;
  902. struct css_chsc_char css_chsc_characteristics;
  903. int __init
  904. chsc_determine_css_characteristics(void)
  905. {
  906. int result;
  907. struct {
  908. struct chsc_header request;
  909. u32 reserved1;
  910. u32 reserved2;
  911. u32 reserved3;
  912. struct chsc_header response;
  913. u32 reserved4;
  914. u32 general_char[510];
  915. u32 chsc_char[508];
  916. } __attribute__ ((packed)) *scsc_area;
  917. spin_lock_irq(&chsc_page_lock);
  918. memset(chsc_page, 0, PAGE_SIZE);
  919. scsc_area = chsc_page;
  920. scsc_area->request.length = 0x0010;
  921. scsc_area->request.code = 0x0010;
  922. result = chsc(scsc_area);
  923. if (result) {
  924. result = (result == 3) ? -ENODEV : -EBUSY;
  925. goto exit;
  926. }
  927. result = chsc_error_from_response(scsc_area->response.code);
  928. if (result == 0) {
  929. memcpy(&css_general_characteristics, scsc_area->general_char,
  930. sizeof(css_general_characteristics));
  931. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  932. sizeof(css_chsc_characteristics));
  933. } else
  934. CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
  935. scsc_area->response.code);
  936. exit:
  937. spin_unlock_irq(&chsc_page_lock);
  938. return result;
  939. }
  940. EXPORT_SYMBOL_GPL(css_general_characteristics);
  941. EXPORT_SYMBOL_GPL(css_chsc_characteristics);
  942. int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
  943. {
  944. struct {
  945. struct chsc_header request;
  946. unsigned int rsvd0;
  947. unsigned int op : 8;
  948. unsigned int rsvd1 : 8;
  949. unsigned int ctrl : 16;
  950. unsigned int rsvd2[5];
  951. struct chsc_header response;
  952. unsigned int rsvd3[7];
  953. } __attribute__ ((packed)) *rr;
  954. int rc;
  955. memset(page, 0, PAGE_SIZE);
  956. rr = page;
  957. rr->request.length = 0x0020;
  958. rr->request.code = 0x0033;
  959. rr->op = op;
  960. rr->ctrl = ctrl;
  961. rc = chsc(rr);
  962. if (rc)
  963. return -EIO;
  964. rc = (rr->response.code == 0x0001) ? 0 : -EIO;
  965. return rc;
  966. }
  967. int chsc_sstpi(void *page, void *result, size_t size)
  968. {
  969. struct {
  970. struct chsc_header request;
  971. unsigned int rsvd0[3];
  972. struct chsc_header response;
  973. char data[size];
  974. } __attribute__ ((packed)) *rr;
  975. int rc;
  976. memset(page, 0, PAGE_SIZE);
  977. rr = page;
  978. rr->request.length = 0x0010;
  979. rr->request.code = 0x0038;
  980. rc = chsc(rr);
  981. if (rc)
  982. return -EIO;
  983. memcpy(result, &rr->data, size);
  984. return (rr->response.code == 0x0001) ? 0 : -EIO;
  985. }
  986. int chsc_siosl(struct subchannel_id schid)
  987. {
  988. struct {
  989. struct chsc_header request;
  990. u32 word1;
  991. struct subchannel_id sid;
  992. u32 word3;
  993. struct chsc_header response;
  994. u32 word[11];
  995. } __attribute__ ((packed)) *siosl_area;
  996. unsigned long flags;
  997. int ccode;
  998. int rc;
  999. spin_lock_irqsave(&chsc_page_lock, flags);
  1000. memset(chsc_page, 0, PAGE_SIZE);
  1001. siosl_area = chsc_page;
  1002. siosl_area->request.length = 0x0010;
  1003. siosl_area->request.code = 0x0046;
  1004. siosl_area->word1 = 0x80000000;
  1005. siosl_area->sid = schid;
  1006. ccode = chsc(siosl_area);
  1007. if (ccode > 0) {
  1008. if (ccode == 3)
  1009. rc = -ENODEV;
  1010. else
  1011. rc = -EBUSY;
  1012. CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
  1013. schid.ssid, schid.sch_no, ccode);
  1014. goto out;
  1015. }
  1016. rc = chsc_error_from_response(siosl_area->response.code);
  1017. if (rc)
  1018. CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
  1019. schid.ssid, schid.sch_no,
  1020. siosl_area->response.code);
  1021. else
  1022. CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
  1023. schid.ssid, schid.sch_no);
  1024. out:
  1025. spin_unlock_irqrestore(&chsc_page_lock, flags);
  1026. return rc;
  1027. }
  1028. EXPORT_SYMBOL_GPL(chsc_siosl);
  1029. /**
  1030. * chsc_scm_info() - store SCM information (SSI)
  1031. * @scm_area: request and response block for SSI
  1032. * @token: continuation token
  1033. *
  1034. * Returns 0 on success.
  1035. */
  1036. int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
  1037. {
  1038. int ccode, ret;
  1039. memset(scm_area, 0, sizeof(*scm_area));
  1040. scm_area->request.length = 0x0020;
  1041. scm_area->request.code = 0x004C;
  1042. scm_area->reqtok = token;
  1043. ccode = chsc(scm_area);
  1044. if (ccode > 0) {
  1045. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  1046. goto out;
  1047. }
  1048. ret = chsc_error_from_response(scm_area->response.code);
  1049. if (ret != 0)
  1050. CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
  1051. scm_area->response.code);
  1052. out:
  1053. return ret;
  1054. }
  1055. EXPORT_SYMBOL_GPL(chsc_scm_info);