chsc.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. /*
  2. * drivers/s390/cio/chsc.c
  3. * S/390 common I/O routines -- channel subsystem call
  4. *
  5. * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
  6. * IBM Corporation
  7. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  8. * Cornelia Huck (cornelia.huck@de.ibm.com)
  9. * Arnd Bergmann (arndb@de.ibm.com)
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <asm/cio.h>
  16. #include <asm/chpid.h>
  17. #include "css.h"
  18. #include "cio.h"
  19. #include "cio_debug.h"
  20. #include "ioasm.h"
  21. #include "chp.h"
  22. #include "chsc.h"
  23. static void *sei_page;
  24. static int chsc_error_from_response(int response)
  25. {
  26. switch (response) {
  27. case 0x0001:
  28. return 0;
  29. case 0x0002:
  30. case 0x0003:
  31. case 0x0006:
  32. case 0x0007:
  33. case 0x0008:
  34. case 0x000a:
  35. return -EINVAL;
  36. case 0x0004:
  37. return -EOPNOTSUPP;
  38. default:
  39. return -EIO;
  40. }
  41. }
  42. struct chsc_ssd_area {
  43. struct chsc_header request;
  44. u16 :10;
  45. u16 ssid:2;
  46. u16 :4;
  47. u16 f_sch; /* first subchannel */
  48. u16 :16;
  49. u16 l_sch; /* last subchannel */
  50. u32 :32;
  51. struct chsc_header response;
  52. u32 :32;
  53. u8 sch_valid : 1;
  54. u8 dev_valid : 1;
  55. u8 st : 3; /* subchannel type */
  56. u8 zeroes : 3;
  57. u8 unit_addr; /* unit address */
  58. u16 devno; /* device number */
  59. u8 path_mask;
  60. u8 fla_valid_mask;
  61. u16 sch; /* subchannel */
  62. u8 chpid[8]; /* chpids 0-7 */
  63. u16 fla[8]; /* full link addresses 0-7 */
  64. } __attribute__ ((packed));
  65. int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
  66. {
  67. unsigned long page;
  68. struct chsc_ssd_area *ssd_area;
  69. int ccode;
  70. int ret;
  71. int i;
  72. int mask;
  73. page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
  74. if (!page)
  75. return -ENOMEM;
  76. ssd_area = (struct chsc_ssd_area *) page;
  77. ssd_area->request.length = 0x0010;
  78. ssd_area->request.code = 0x0004;
  79. ssd_area->ssid = schid.ssid;
  80. ssd_area->f_sch = schid.sch_no;
  81. ssd_area->l_sch = schid.sch_no;
  82. ccode = chsc(ssd_area);
  83. /* Check response. */
  84. if (ccode > 0) {
  85. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  86. goto out_free;
  87. }
  88. ret = chsc_error_from_response(ssd_area->response.code);
  89. if (ret != 0) {
  90. CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
  91. schid.ssid, schid.sch_no,
  92. ssd_area->response.code);
  93. goto out_free;
  94. }
  95. if (!ssd_area->sch_valid) {
  96. ret = -ENODEV;
  97. goto out_free;
  98. }
  99. /* Copy data */
  100. ret = 0;
  101. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  102. if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
  103. (ssd_area->st != SUBCHANNEL_TYPE_MSG))
  104. goto out_free;
  105. ssd->path_mask = ssd_area->path_mask;
  106. ssd->fla_valid_mask = ssd_area->fla_valid_mask;
  107. for (i = 0; i < 8; i++) {
  108. mask = 0x80 >> i;
  109. if (ssd_area->path_mask & mask) {
  110. chp_id_init(&ssd->chpid[i]);
  111. ssd->chpid[i].id = ssd_area->chpid[i];
  112. }
  113. if (ssd_area->fla_valid_mask & mask)
  114. ssd->fla[i] = ssd_area->fla[i];
  115. }
  116. out_free:
  117. free_page(page);
  118. return ret;
  119. }
  120. static int check_for_io_on_path(struct subchannel *sch, int mask)
  121. {
  122. int cc;
  123. cc = stsch(sch->schid, &sch->schib);
  124. if (cc)
  125. return 0;
  126. if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
  127. return 1;
  128. return 0;
  129. }
  130. static void terminate_internal_io(struct subchannel *sch)
  131. {
  132. if (cio_clear(sch)) {
  133. /* Recheck device in case clear failed. */
  134. sch->lpm = 0;
  135. if (device_trigger_verify(sch) != 0)
  136. css_schedule_eval(sch->schid);
  137. return;
  138. }
  139. /* Request retry of internal operation. */
  140. device_set_intretry(sch);
  141. /* Call handler. */
  142. if (sch->driver && sch->driver->termination)
  143. sch->driver->termination(sch);
  144. }
  145. static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
  146. {
  147. int j;
  148. int mask;
  149. struct chp_id *chpid = data;
  150. struct schib schib;
  151. for (j = 0; j < 8; j++) {
  152. mask = 0x80 >> j;
  153. if ((sch->schib.pmcw.pim & mask) &&
  154. (sch->schib.pmcw.chpid[j] == chpid->id))
  155. break;
  156. }
  157. if (j >= 8)
  158. return 0;
  159. spin_lock_irq(sch->lock);
  160. stsch(sch->schid, &schib);
  161. if (!css_sch_is_valid(&schib))
  162. goto out_unreg;
  163. memcpy(&sch->schib, &schib, sizeof(struct schib));
  164. /* Check for single path devices. */
  165. if (sch->schib.pmcw.pim == 0x80)
  166. goto out_unreg;
  167. if (check_for_io_on_path(sch, mask)) {
  168. if (device_is_online(sch))
  169. device_kill_io(sch);
  170. else {
  171. terminate_internal_io(sch);
  172. /* Re-start path verification. */
  173. if (sch->driver && sch->driver->verify)
  174. sch->driver->verify(sch);
  175. }
  176. } else {
  177. /* trigger path verification. */
  178. if (sch->driver && sch->driver->verify)
  179. sch->driver->verify(sch);
  180. else if (sch->lpm == mask)
  181. goto out_unreg;
  182. }
  183. spin_unlock_irq(sch->lock);
  184. return 0;
  185. out_unreg:
  186. sch->lpm = 0;
  187. spin_unlock_irq(sch->lock);
  188. css_schedule_eval(sch->schid);
  189. return 0;
  190. }
  191. void chsc_chp_offline(struct chp_id chpid)
  192. {
  193. char dbf_txt[15];
  194. sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
  195. CIO_TRACE_EVENT(2, dbf_txt);
  196. if (chp_get_status(chpid) <= 0)
  197. return;
  198. /* Wait until previous actions have settled. */
  199. css_wait_for_slow_path();
  200. for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
  201. }
  202. static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
  203. {
  204. struct schib schib;
  205. /*
  206. * We don't know the device yet, but since a path
  207. * may be available now to the device we'll have
  208. * to do recognition again.
  209. * Since we don't have any idea about which chpid
  210. * that beast may be on we'll have to do a stsch
  211. * on all devices, grr...
  212. */
  213. if (stsch_err(schid, &schib))
  214. /* We're through */
  215. return -ENXIO;
  216. /* Put it on the slow path. */
  217. css_schedule_eval(schid);
  218. return 0;
  219. }
  220. struct res_acc_data {
  221. struct chp_id chpid;
  222. u32 fla_mask;
  223. u16 fla;
  224. };
  225. static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
  226. struct res_acc_data *data)
  227. {
  228. int i;
  229. int mask;
  230. for (i = 0; i < 8; i++) {
  231. mask = 0x80 >> i;
  232. if (!(ssd->path_mask & mask))
  233. continue;
  234. if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
  235. continue;
  236. if ((ssd->fla_valid_mask & mask) &&
  237. ((ssd->fla[i] & data->fla_mask) != data->fla))
  238. continue;
  239. return mask;
  240. }
  241. return 0;
  242. }
  243. static int __s390_process_res_acc(struct subchannel *sch, void *data)
  244. {
  245. int chp_mask, old_lpm;
  246. struct res_acc_data *res_data = data;
  247. spin_lock_irq(sch->lock);
  248. chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
  249. if (chp_mask == 0)
  250. goto out;
  251. if (stsch(sch->schid, &sch->schib))
  252. goto out;
  253. old_lpm = sch->lpm;
  254. sch->lpm = ((sch->schib.pmcw.pim &
  255. sch->schib.pmcw.pam &
  256. sch->schib.pmcw.pom)
  257. | chp_mask) & sch->opm;
  258. if (!old_lpm && sch->lpm)
  259. device_trigger_reprobe(sch);
  260. else if (sch->driver && sch->driver->verify)
  261. sch->driver->verify(sch);
  262. out:
  263. spin_unlock_irq(sch->lock);
  264. return 0;
  265. }
  266. static void s390_process_res_acc (struct res_acc_data *res_data)
  267. {
  268. char dbf_txt[15];
  269. sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
  270. res_data->chpid.id);
  271. CIO_TRACE_EVENT( 2, dbf_txt);
  272. if (res_data->fla != 0) {
  273. sprintf(dbf_txt, "fla%x", res_data->fla);
  274. CIO_TRACE_EVENT( 2, dbf_txt);
  275. }
  276. /* Wait until previous actions have settled. */
  277. css_wait_for_slow_path();
  278. /*
  279. * I/O resources may have become accessible.
  280. * Scan through all subchannels that may be concerned and
  281. * do a validation on those.
  282. * The more information we have (info), the less scanning
  283. * will we have to do.
  284. */
  285. for_each_subchannel_staged(__s390_process_res_acc,
  286. s390_process_res_acc_new_sch, res_data);
  287. }
  288. static int
  289. __get_chpid_from_lir(void *data)
  290. {
  291. struct lir {
  292. u8 iq;
  293. u8 ic;
  294. u16 sci;
  295. /* incident-node descriptor */
  296. u32 indesc[28];
  297. /* attached-node descriptor */
  298. u32 andesc[28];
  299. /* incident-specific information */
  300. u32 isinfo[28];
  301. } __attribute__ ((packed)) *lir;
  302. lir = data;
  303. if (!(lir->iq&0x80))
  304. /* NULL link incident record */
  305. return -EINVAL;
  306. if (!(lir->indesc[0]&0xc0000000))
  307. /* node descriptor not valid */
  308. return -EINVAL;
  309. if (!(lir->indesc[0]&0x10000000))
  310. /* don't handle device-type nodes - FIXME */
  311. return -EINVAL;
  312. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  313. return (u16) (lir->indesc[0]&0x000000ff);
  314. }
  315. struct chsc_sei_area {
  316. struct chsc_header request;
  317. u32 reserved1;
  318. u32 reserved2;
  319. u32 reserved3;
  320. struct chsc_header response;
  321. u32 reserved4;
  322. u8 flags;
  323. u8 vf; /* validity flags */
  324. u8 rs; /* reporting source */
  325. u8 cc; /* content code */
  326. u16 fla; /* full link address */
  327. u16 rsid; /* reporting source id */
  328. u32 reserved5;
  329. u32 reserved6;
  330. u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
  331. /* ccdf has to be big enough for a link-incident record */
  332. } __attribute__ ((packed));
  333. static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
  334. {
  335. struct chp_id chpid;
  336. int id;
  337. CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
  338. sei_area->rs, sei_area->rsid);
  339. if (sei_area->rs != 4)
  340. return;
  341. id = __get_chpid_from_lir(sei_area->ccdf);
  342. if (id < 0)
  343. CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
  344. else {
  345. chp_id_init(&chpid);
  346. chpid.id = id;
  347. chsc_chp_offline(chpid);
  348. }
  349. }
  350. static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
  351. {
  352. struct res_acc_data res_data;
  353. struct chp_id chpid;
  354. int status;
  355. CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
  356. "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
  357. if (sei_area->rs != 4)
  358. return;
  359. chp_id_init(&chpid);
  360. chpid.id = sei_area->rsid;
  361. /* allocate a new channel path structure, if needed */
  362. status = chp_get_status(chpid);
  363. if (status < 0)
  364. chp_new(chpid);
  365. else if (!status)
  366. return;
  367. memset(&res_data, 0, sizeof(struct res_acc_data));
  368. res_data.chpid = chpid;
  369. if ((sei_area->vf & 0xc0) != 0) {
  370. res_data.fla = sei_area->fla;
  371. if ((sei_area->vf & 0xc0) == 0xc0)
  372. /* full link address */
  373. res_data.fla_mask = 0xffff;
  374. else
  375. /* link address */
  376. res_data.fla_mask = 0xff00;
  377. }
  378. s390_process_res_acc(&res_data);
  379. }
  380. struct chp_config_data {
  381. u8 map[32];
  382. u8 op;
  383. u8 pc;
  384. };
  385. static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
  386. {
  387. struct chp_config_data *data;
  388. struct chp_id chpid;
  389. int num;
  390. CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
  391. if (sei_area->rs != 0)
  392. return;
  393. data = (struct chp_config_data *) &(sei_area->ccdf);
  394. chp_id_init(&chpid);
  395. for (num = 0; num <= __MAX_CHPID; num++) {
  396. if (!chp_test_bit(data->map, num))
  397. continue;
  398. chpid.id = num;
  399. printk(KERN_WARNING "cio: processing configure event %d for "
  400. "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
  401. switch (data->op) {
  402. case 0:
  403. chp_cfg_schedule(chpid, 1);
  404. break;
  405. case 1:
  406. chp_cfg_schedule(chpid, 0);
  407. break;
  408. case 2:
  409. chp_cfg_cancel_deconfigure(chpid);
  410. break;
  411. }
  412. }
  413. }
  414. static void chsc_process_sei(struct chsc_sei_area *sei_area)
  415. {
  416. /* Check if we might have lost some information. */
  417. if (sei_area->flags & 0x40) {
  418. CIO_CRW_EVENT(2, "chsc: event overflow\n");
  419. css_schedule_eval_all();
  420. }
  421. /* which kind of information was stored? */
  422. switch (sei_area->cc) {
  423. case 1: /* link incident*/
  424. chsc_process_sei_link_incident(sei_area);
  425. break;
  426. case 2: /* i/o resource accessibiliy */
  427. chsc_process_sei_res_acc(sei_area);
  428. break;
  429. case 8: /* channel-path-configuration notification */
  430. chsc_process_sei_chp_config(sei_area);
  431. break;
  432. default: /* other stuff */
  433. CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
  434. sei_area->cc);
  435. break;
  436. }
  437. }
  438. void chsc_process_crw(void)
  439. {
  440. struct chsc_sei_area *sei_area;
  441. if (!sei_page)
  442. return;
  443. /* Access to sei_page is serialized through machine check handler
  444. * thread, so no need for locking. */
  445. sei_area = sei_page;
  446. CIO_TRACE_EVENT( 2, "prcss");
  447. do {
  448. memset(sei_area, 0, sizeof(*sei_area));
  449. sei_area->request.length = 0x0010;
  450. sei_area->request.code = 0x000e;
  451. if (chsc(sei_area))
  452. break;
  453. if (sei_area->response.code == 0x0001) {
  454. CIO_CRW_EVENT(4, "chsc: sei successful\n");
  455. chsc_process_sei(sei_area);
  456. } else {
  457. CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
  458. sei_area->response.code);
  459. break;
  460. }
  461. } while (sei_area->flags & 0x80);
  462. }
  463. static int __chp_add_new_sch(struct subchannel_id schid, void *data)
  464. {
  465. struct schib schib;
  466. if (stsch_err(schid, &schib))
  467. /* We're through */
  468. return -ENXIO;
  469. /* Put it on the slow path. */
  470. css_schedule_eval(schid);
  471. return 0;
  472. }
  473. static int __chp_add(struct subchannel *sch, void *data)
  474. {
  475. int i, mask;
  476. struct chp_id *chpid = data;
  477. spin_lock_irq(sch->lock);
  478. for (i=0; i<8; i++) {
  479. mask = 0x80 >> i;
  480. if ((sch->schib.pmcw.pim & mask) &&
  481. (sch->schib.pmcw.chpid[i] == chpid->id))
  482. break;
  483. }
  484. if (i==8) {
  485. spin_unlock_irq(sch->lock);
  486. return 0;
  487. }
  488. if (stsch(sch->schid, &sch->schib)) {
  489. spin_unlock_irq(sch->lock);
  490. css_schedule_eval(sch->schid);
  491. return 0;
  492. }
  493. sch->lpm = ((sch->schib.pmcw.pim &
  494. sch->schib.pmcw.pam &
  495. sch->schib.pmcw.pom)
  496. | mask) & sch->opm;
  497. if (sch->driver && sch->driver->verify)
  498. sch->driver->verify(sch);
  499. spin_unlock_irq(sch->lock);
  500. return 0;
  501. }
  502. void chsc_chp_online(struct chp_id chpid)
  503. {
  504. char dbf_txt[15];
  505. sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
  506. CIO_TRACE_EVENT(2, dbf_txt);
  507. if (chp_get_status(chpid) != 0) {
  508. /* Wait until previous actions have settled. */
  509. css_wait_for_slow_path();
  510. for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
  511. &chpid);
  512. }
  513. }
  514. static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  515. struct chp_id chpid, int on)
  516. {
  517. int chp, old_lpm;
  518. int mask;
  519. unsigned long flags;
  520. spin_lock_irqsave(sch->lock, flags);
  521. old_lpm = sch->lpm;
  522. for (chp = 0; chp < 8; chp++) {
  523. mask = 0x80 >> chp;
  524. if (!(sch->ssd_info.path_mask & mask))
  525. continue;
  526. if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
  527. continue;
  528. if (on) {
  529. sch->opm |= mask;
  530. sch->lpm |= mask;
  531. if (!old_lpm)
  532. device_trigger_reprobe(sch);
  533. else if (sch->driver && sch->driver->verify)
  534. sch->driver->verify(sch);
  535. break;
  536. }
  537. sch->opm &= ~mask;
  538. sch->lpm &= ~mask;
  539. if (check_for_io_on_path(sch, mask)) {
  540. if (device_is_online(sch))
  541. /* Path verification is done after killing. */
  542. device_kill_io(sch);
  543. else {
  544. /* Kill and retry internal I/O. */
  545. terminate_internal_io(sch);
  546. /* Re-start path verification. */
  547. if (sch->driver && sch->driver->verify)
  548. sch->driver->verify(sch);
  549. }
  550. } else if (!sch->lpm) {
  551. if (device_trigger_verify(sch) != 0)
  552. css_schedule_eval(sch->schid);
  553. } else if (sch->driver && sch->driver->verify)
  554. sch->driver->verify(sch);
  555. break;
  556. }
  557. spin_unlock_irqrestore(sch->lock, flags);
  558. }
  559. static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
  560. {
  561. struct chp_id *chpid = data;
  562. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  563. return 0;
  564. }
  565. static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
  566. {
  567. struct chp_id *chpid = data;
  568. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  569. return 0;
  570. }
  571. static int
  572. __s390_vary_chpid_on(struct subchannel_id schid, void *data)
  573. {
  574. struct schib schib;
  575. if (stsch_err(schid, &schib))
  576. /* We're through */
  577. return -ENXIO;
  578. /* Put it on the slow path. */
  579. css_schedule_eval(schid);
  580. return 0;
  581. }
  582. /**
  583. * chsc_chp_vary - propagate channel-path vary operation to subchannels
  584. * @chpid: channl-path ID
  585. * @on: non-zero for vary online, zero for vary offline
  586. */
  587. int chsc_chp_vary(struct chp_id chpid, int on)
  588. {
  589. /* Wait until previous actions have settled. */
  590. css_wait_for_slow_path();
  591. /*
  592. * Redo PathVerification on the devices the chpid connects to
  593. */
  594. if (on)
  595. for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
  596. __s390_vary_chpid_on, &chpid);
  597. else
  598. for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
  599. NULL, &chpid);
  600. return 0;
  601. }
  602. static void
  603. chsc_remove_cmg_attr(struct channel_subsystem *css)
  604. {
  605. int i;
  606. for (i = 0; i <= __MAX_CHPID; i++) {
  607. if (!css->chps[i])
  608. continue;
  609. chp_remove_cmg_attr(css->chps[i]);
  610. }
  611. }
  612. static int
  613. chsc_add_cmg_attr(struct channel_subsystem *css)
  614. {
  615. int i, ret;
  616. ret = 0;
  617. for (i = 0; i <= __MAX_CHPID; i++) {
  618. if (!css->chps[i])
  619. continue;
  620. ret = chp_add_cmg_attr(css->chps[i]);
  621. if (ret)
  622. goto cleanup;
  623. }
  624. return ret;
  625. cleanup:
  626. for (--i; i >= 0; i--) {
  627. if (!css->chps[i])
  628. continue;
  629. chp_remove_cmg_attr(css->chps[i]);
  630. }
  631. return ret;
  632. }
  633. static int
  634. __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
  635. {
  636. struct {
  637. struct chsc_header request;
  638. u32 operation_code : 2;
  639. u32 : 30;
  640. u32 key : 4;
  641. u32 : 28;
  642. u32 zeroes1;
  643. u32 cub_addr1;
  644. u32 zeroes2;
  645. u32 cub_addr2;
  646. u32 reserved[13];
  647. struct chsc_header response;
  648. u32 status : 8;
  649. u32 : 4;
  650. u32 fmt : 4;
  651. u32 : 16;
  652. } __attribute__ ((packed)) *secm_area;
  653. int ret, ccode;
  654. secm_area = page;
  655. secm_area->request.length = 0x0050;
  656. secm_area->request.code = 0x0016;
  657. secm_area->key = PAGE_DEFAULT_KEY;
  658. secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
  659. secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
  660. secm_area->operation_code = enable ? 0 : 1;
  661. ccode = chsc(secm_area);
  662. if (ccode > 0)
  663. return (ccode == 3) ? -ENODEV : -EBUSY;
  664. switch (secm_area->response.code) {
  665. case 0x0102:
  666. case 0x0103:
  667. ret = -EINVAL;
  668. default:
  669. ret = chsc_error_from_response(secm_area->response.code);
  670. }
  671. if (ret != 0)
  672. CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
  673. secm_area->response.code);
  674. return ret;
  675. }
  676. int
  677. chsc_secm(struct channel_subsystem *css, int enable)
  678. {
  679. void *secm_area;
  680. int ret;
  681. secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  682. if (!secm_area)
  683. return -ENOMEM;
  684. if (enable && !css->cm_enabled) {
  685. css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  686. css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  687. if (!css->cub_addr1 || !css->cub_addr2) {
  688. free_page((unsigned long)css->cub_addr1);
  689. free_page((unsigned long)css->cub_addr2);
  690. free_page((unsigned long)secm_area);
  691. return -ENOMEM;
  692. }
  693. }
  694. ret = __chsc_do_secm(css, enable, secm_area);
  695. if (!ret) {
  696. css->cm_enabled = enable;
  697. if (css->cm_enabled) {
  698. ret = chsc_add_cmg_attr(css);
  699. if (ret) {
  700. memset(secm_area, 0, PAGE_SIZE);
  701. __chsc_do_secm(css, 0, secm_area);
  702. css->cm_enabled = 0;
  703. }
  704. } else
  705. chsc_remove_cmg_attr(css);
  706. }
  707. if (!css->cm_enabled) {
  708. free_page((unsigned long)css->cub_addr1);
  709. free_page((unsigned long)css->cub_addr2);
  710. }
  711. free_page((unsigned long)secm_area);
  712. return ret;
  713. }
  714. int chsc_determine_channel_path_description(struct chp_id chpid,
  715. struct channel_path_desc *desc)
  716. {
  717. int ccode, ret;
  718. struct {
  719. struct chsc_header request;
  720. u32 : 24;
  721. u32 first_chpid : 8;
  722. u32 : 24;
  723. u32 last_chpid : 8;
  724. u32 zeroes1;
  725. struct chsc_header response;
  726. u32 zeroes2;
  727. struct channel_path_desc desc;
  728. } __attribute__ ((packed)) *scpd_area;
  729. scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  730. if (!scpd_area)
  731. return -ENOMEM;
  732. scpd_area->request.length = 0x0010;
  733. scpd_area->request.code = 0x0002;
  734. scpd_area->first_chpid = chpid.id;
  735. scpd_area->last_chpid = chpid.id;
  736. ccode = chsc(scpd_area);
  737. if (ccode > 0) {
  738. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  739. goto out;
  740. }
  741. ret = chsc_error_from_response(scpd_area->response.code);
  742. if (ret == 0)
  743. /* Success. */
  744. memcpy(desc, &scpd_area->desc,
  745. sizeof(struct channel_path_desc));
  746. else
  747. CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
  748. scpd_area->response.code);
  749. out:
  750. free_page((unsigned long)scpd_area);
  751. return ret;
  752. }
  753. static void
  754. chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
  755. struct cmg_chars *chars)
  756. {
  757. switch (chp->cmg) {
  758. case 2:
  759. case 3:
  760. chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
  761. GFP_KERNEL);
  762. if (chp->cmg_chars) {
  763. int i, mask;
  764. struct cmg_chars *cmg_chars;
  765. cmg_chars = chp->cmg_chars;
  766. for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
  767. mask = 0x80 >> (i + 3);
  768. if (cmcv & mask)
  769. cmg_chars->values[i] = chars->values[i];
  770. else
  771. cmg_chars->values[i] = 0;
  772. }
  773. }
  774. break;
  775. default:
  776. /* No cmg-dependent data. */
  777. break;
  778. }
  779. }
  780. int chsc_get_channel_measurement_chars(struct channel_path *chp)
  781. {
  782. int ccode, ret;
  783. struct {
  784. struct chsc_header request;
  785. u32 : 24;
  786. u32 first_chpid : 8;
  787. u32 : 24;
  788. u32 last_chpid : 8;
  789. u32 zeroes1;
  790. struct chsc_header response;
  791. u32 zeroes2;
  792. u32 not_valid : 1;
  793. u32 shared : 1;
  794. u32 : 22;
  795. u32 chpid : 8;
  796. u32 cmcv : 5;
  797. u32 : 11;
  798. u32 cmgq : 8;
  799. u32 cmg : 8;
  800. u32 zeroes3;
  801. u32 data[NR_MEASUREMENT_CHARS];
  802. } __attribute__ ((packed)) *scmc_area;
  803. scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  804. if (!scmc_area)
  805. return -ENOMEM;
  806. scmc_area->request.length = 0x0010;
  807. scmc_area->request.code = 0x0022;
  808. scmc_area->first_chpid = chp->chpid.id;
  809. scmc_area->last_chpid = chp->chpid.id;
  810. ccode = chsc(scmc_area);
  811. if (ccode > 0) {
  812. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  813. goto out;
  814. }
  815. ret = chsc_error_from_response(scmc_area->response.code);
  816. if (ret == 0) {
  817. /* Success. */
  818. if (!scmc_area->not_valid) {
  819. chp->cmg = scmc_area->cmg;
  820. chp->shared = scmc_area->shared;
  821. chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
  822. (struct cmg_chars *)
  823. &scmc_area->data);
  824. } else {
  825. chp->cmg = -1;
  826. chp->shared = -1;
  827. }
  828. } else {
  829. CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
  830. scmc_area->response.code);
  831. }
  832. out:
  833. free_page((unsigned long)scmc_area);
  834. return ret;
  835. }
  836. int __init chsc_alloc_sei_area(void)
  837. {
  838. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  839. if (!sei_page)
  840. CIO_MSG_EVENT(0, "Can't allocate page for processing of "
  841. "chsc machine checks!\n");
  842. return (sei_page ? 0 : -ENOMEM);
  843. }
  844. void __init chsc_free_sei_area(void)
  845. {
  846. kfree(sei_page);
  847. }
  848. int __init
  849. chsc_enable_facility(int operation_code)
  850. {
  851. int ret;
  852. struct {
  853. struct chsc_header request;
  854. u8 reserved1:4;
  855. u8 format:4;
  856. u8 reserved2;
  857. u16 operation_code;
  858. u32 reserved3;
  859. u32 reserved4;
  860. u32 operation_data_area[252];
  861. struct chsc_header response;
  862. u32 reserved5:4;
  863. u32 format2:4;
  864. u32 reserved6:24;
  865. } __attribute__ ((packed)) *sda_area;
  866. sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
  867. if (!sda_area)
  868. return -ENOMEM;
  869. sda_area->request.length = 0x0400;
  870. sda_area->request.code = 0x0031;
  871. sda_area->operation_code = operation_code;
  872. ret = chsc(sda_area);
  873. if (ret > 0) {
  874. ret = (ret == 3) ? -ENODEV : -EBUSY;
  875. goto out;
  876. }
  877. switch (sda_area->response.code) {
  878. case 0x0101:
  879. ret = -EOPNOTSUPP;
  880. break;
  881. default:
  882. ret = chsc_error_from_response(sda_area->response.code);
  883. }
  884. if (ret != 0)
  885. CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
  886. operation_code, sda_area->response.code);
  887. out:
  888. free_page((unsigned long)sda_area);
  889. return ret;
  890. }
  891. struct css_general_char css_general_characteristics;
  892. struct css_chsc_char css_chsc_characteristics;
  893. int __init
  894. chsc_determine_css_characteristics(void)
  895. {
  896. int result;
  897. struct {
  898. struct chsc_header request;
  899. u32 reserved1;
  900. u32 reserved2;
  901. u32 reserved3;
  902. struct chsc_header response;
  903. u32 reserved4;
  904. u32 general_char[510];
  905. u32 chsc_char[518];
  906. } __attribute__ ((packed)) *scsc_area;
  907. scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  908. if (!scsc_area)
  909. return -ENOMEM;
  910. scsc_area->request.length = 0x0010;
  911. scsc_area->request.code = 0x0010;
  912. result = chsc(scsc_area);
  913. if (result) {
  914. result = (result == 3) ? -ENODEV : -EBUSY;
  915. goto exit;
  916. }
  917. result = chsc_error_from_response(scsc_area->response.code);
  918. if (result == 0) {
  919. memcpy(&css_general_characteristics, scsc_area->general_char,
  920. sizeof(css_general_characteristics));
  921. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  922. sizeof(css_chsc_characteristics));
  923. } else
  924. CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
  925. scsc_area->response.code);
  926. exit:
  927. free_page ((unsigned long) scsc_area);
  928. return result;
  929. }
  930. EXPORT_SYMBOL_GPL(css_general_characteristics);
  931. EXPORT_SYMBOL_GPL(css_chsc_characteristics);