chsc.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108
  1. /*
  2. * drivers/s390/cio/chsc.c
  3. * S/390 common I/O routines -- channel subsystem call
  4. *
  5. * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
  6. * IBM Corporation
  7. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  8. * Cornelia Huck (cornelia.huck@de.ibm.com)
  9. * Arnd Bergmann (arndb@de.ibm.com)
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <asm/cio.h>
  16. #include <asm/chpid.h>
  17. #include "css.h"
  18. #include "cio.h"
  19. #include "cio_debug.h"
  20. #include "ioasm.h"
  21. #include "chp.h"
  22. #include "chsc.h"
  23. static void *sei_page;
  24. struct chsc_ssd_area {
  25. struct chsc_header request;
  26. u16 :10;
  27. u16 ssid:2;
  28. u16 :4;
  29. u16 f_sch; /* first subchannel */
  30. u16 :16;
  31. u16 l_sch; /* last subchannel */
  32. u32 :32;
  33. struct chsc_header response;
  34. u32 :32;
  35. u8 sch_valid : 1;
  36. u8 dev_valid : 1;
  37. u8 st : 3; /* subchannel type */
  38. u8 zeroes : 3;
  39. u8 unit_addr; /* unit address */
  40. u16 devno; /* device number */
  41. u8 path_mask;
  42. u8 fla_valid_mask;
  43. u16 sch; /* subchannel */
  44. u8 chpid[8]; /* chpids 0-7 */
  45. u16 fla[8]; /* full link addresses 0-7 */
  46. } __attribute__ ((packed));
  47. int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
  48. {
  49. unsigned long page;
  50. struct chsc_ssd_area *ssd_area;
  51. int ccode;
  52. int ret;
  53. int i;
  54. int mask;
  55. page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
  56. if (!page)
  57. return -ENOMEM;
  58. ssd_area = (struct chsc_ssd_area *) page;
  59. ssd_area->request.length = 0x0010;
  60. ssd_area->request.code = 0x0004;
  61. ssd_area->ssid = schid.ssid;
  62. ssd_area->f_sch = schid.sch_no;
  63. ssd_area->l_sch = schid.sch_no;
  64. ccode = chsc(ssd_area);
  65. /* Check response. */
  66. if (ccode > 0) {
  67. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  68. goto out_free;
  69. }
  70. if (ssd_area->response.code != 0x0001) {
  71. CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
  72. schid.ssid, schid.sch_no,
  73. ssd_area->response.code);
  74. ret = -EIO;
  75. goto out_free;
  76. }
  77. if (!ssd_area->sch_valid) {
  78. ret = -ENODEV;
  79. goto out_free;
  80. }
  81. /* Copy data */
  82. ret = 0;
  83. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  84. if ((ssd_area->st != 0) && (ssd_area->st != 2))
  85. goto out_free;
  86. ssd->path_mask = ssd_area->path_mask;
  87. ssd->fla_valid_mask = ssd_area->fla_valid_mask;
  88. for (i = 0; i < 8; i++) {
  89. mask = 0x80 >> i;
  90. if (ssd_area->path_mask & mask) {
  91. chp_id_init(&ssd->chpid[i]);
  92. ssd->chpid[i].id = ssd_area->chpid[i];
  93. }
  94. if (ssd_area->fla_valid_mask & mask)
  95. ssd->fla[i] = ssd_area->fla[i];
  96. }
  97. out_free:
  98. free_page(page);
  99. return ret;
  100. }
  101. static int check_for_io_on_path(struct subchannel *sch, int mask)
  102. {
  103. int cc;
  104. cc = stsch(sch->schid, &sch->schib);
  105. if (cc)
  106. return 0;
  107. if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
  108. return 1;
  109. return 0;
  110. }
  111. static void terminate_internal_io(struct subchannel *sch)
  112. {
  113. if (cio_clear(sch)) {
  114. /* Recheck device in case clear failed. */
  115. sch->lpm = 0;
  116. if (device_trigger_verify(sch) != 0)
  117. css_schedule_eval(sch->schid);
  118. return;
  119. }
  120. /* Request retry of internal operation. */
  121. device_set_intretry(sch);
  122. /* Call handler. */
  123. if (sch->driver && sch->driver->termination)
  124. sch->driver->termination(&sch->dev);
  125. }
  126. static int
  127. s390_subchannel_remove_chpid(struct device *dev, void *data)
  128. {
  129. int j;
  130. int mask;
  131. struct subchannel *sch;
  132. struct chp_id *chpid;
  133. struct schib schib;
  134. sch = to_subchannel(dev);
  135. chpid = data;
  136. for (j = 0; j < 8; j++) {
  137. mask = 0x80 >> j;
  138. if ((sch->schib.pmcw.pim & mask) &&
  139. (sch->schib.pmcw.chpid[j] == chpid->id))
  140. break;
  141. }
  142. if (j >= 8)
  143. return 0;
  144. spin_lock_irq(sch->lock);
  145. stsch(sch->schid, &schib);
  146. if (!schib.pmcw.dnv)
  147. goto out_unreg;
  148. memcpy(&sch->schib, &schib, sizeof(struct schib));
  149. /* Check for single path devices. */
  150. if (sch->schib.pmcw.pim == 0x80)
  151. goto out_unreg;
  152. if (check_for_io_on_path(sch, mask)) {
  153. if (device_is_online(sch))
  154. device_kill_io(sch);
  155. else {
  156. terminate_internal_io(sch);
  157. /* Re-start path verification. */
  158. if (sch->driver && sch->driver->verify)
  159. sch->driver->verify(&sch->dev);
  160. }
  161. } else {
  162. /* trigger path verification. */
  163. if (sch->driver && sch->driver->verify)
  164. sch->driver->verify(&sch->dev);
  165. else if (sch->lpm == mask)
  166. goto out_unreg;
  167. }
  168. spin_unlock_irq(sch->lock);
  169. return 0;
  170. out_unreg:
  171. sch->lpm = 0;
  172. spin_unlock_irq(sch->lock);
  173. css_schedule_eval(sch->schid);
  174. return 0;
  175. }
  176. void chsc_chp_offline(struct chp_id chpid)
  177. {
  178. char dbf_txt[15];
  179. sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
  180. CIO_TRACE_EVENT(2, dbf_txt);
  181. if (chp_get_status(chpid) <= 0)
  182. return;
  183. bus_for_each_dev(&css_bus_type, NULL, &chpid,
  184. s390_subchannel_remove_chpid);
  185. }
  186. static int
  187. s390_process_res_acc_new_sch(struct subchannel_id schid)
  188. {
  189. struct schib schib;
  190. /*
  191. * We don't know the device yet, but since a path
  192. * may be available now to the device we'll have
  193. * to do recognition again.
  194. * Since we don't have any idea about which chpid
  195. * that beast may be on we'll have to do a stsch
  196. * on all devices, grr...
  197. */
  198. if (stsch_err(schid, &schib))
  199. /* We're through */
  200. return -ENXIO;
  201. /* Put it on the slow path. */
  202. css_schedule_eval(schid);
  203. return 0;
  204. }
  205. struct res_acc_data {
  206. struct chp_id chpid;
  207. u32 fla_mask;
  208. u16 fla;
  209. };
  210. static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
  211. struct res_acc_data *data)
  212. {
  213. int i;
  214. int mask;
  215. for (i = 0; i < 8; i++) {
  216. mask = 0x80 >> i;
  217. if (!(ssd->path_mask & mask))
  218. continue;
  219. if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
  220. continue;
  221. if ((ssd->fla_valid_mask & mask) &&
  222. ((ssd->fla[i] & data->fla_mask) != data->fla))
  223. continue;
  224. return mask;
  225. }
  226. return 0;
  227. }
  228. static int
  229. __s390_process_res_acc(struct subchannel_id schid, void *data)
  230. {
  231. int chp_mask, old_lpm;
  232. struct res_acc_data *res_data;
  233. struct subchannel *sch;
  234. res_data = data;
  235. sch = get_subchannel_by_schid(schid);
  236. if (!sch)
  237. /* Check if a subchannel is newly available. */
  238. return s390_process_res_acc_new_sch(schid);
  239. spin_lock_irq(sch->lock);
  240. chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
  241. if (chp_mask == 0)
  242. goto out;
  243. if (stsch(sch->schid, &sch->schib))
  244. goto out;
  245. old_lpm = sch->lpm;
  246. sch->lpm = ((sch->schib.pmcw.pim &
  247. sch->schib.pmcw.pam &
  248. sch->schib.pmcw.pom)
  249. | chp_mask) & sch->opm;
  250. if (!old_lpm && sch->lpm)
  251. device_trigger_reprobe(sch);
  252. else if (sch->driver && sch->driver->verify)
  253. sch->driver->verify(&sch->dev);
  254. out:
  255. spin_unlock_irq(sch->lock);
  256. put_device(&sch->dev);
  257. return 0;
  258. }
  259. static void s390_process_res_acc (struct res_acc_data *res_data)
  260. {
  261. char dbf_txt[15];
  262. sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
  263. res_data->chpid.id);
  264. CIO_TRACE_EVENT( 2, dbf_txt);
  265. if (res_data->fla != 0) {
  266. sprintf(dbf_txt, "fla%x", res_data->fla);
  267. CIO_TRACE_EVENT( 2, dbf_txt);
  268. }
  269. /*
  270. * I/O resources may have become accessible.
  271. * Scan through all subchannels that may be concerned and
  272. * do a validation on those.
  273. * The more information we have (info), the less scanning
  274. * will we have to do.
  275. */
  276. for_each_subchannel(__s390_process_res_acc, res_data);
  277. }
  278. static int
  279. __get_chpid_from_lir(void *data)
  280. {
  281. struct lir {
  282. u8 iq;
  283. u8 ic;
  284. u16 sci;
  285. /* incident-node descriptor */
  286. u32 indesc[28];
  287. /* attached-node descriptor */
  288. u32 andesc[28];
  289. /* incident-specific information */
  290. u32 isinfo[28];
  291. } __attribute__ ((packed)) *lir;
  292. lir = data;
  293. if (!(lir->iq&0x80))
  294. /* NULL link incident record */
  295. return -EINVAL;
  296. if (!(lir->indesc[0]&0xc0000000))
  297. /* node descriptor not valid */
  298. return -EINVAL;
  299. if (!(lir->indesc[0]&0x10000000))
  300. /* don't handle device-type nodes - FIXME */
  301. return -EINVAL;
  302. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  303. return (u16) (lir->indesc[0]&0x000000ff);
  304. }
  305. struct chsc_sei_area {
  306. struct chsc_header request;
  307. u32 reserved1;
  308. u32 reserved2;
  309. u32 reserved3;
  310. struct chsc_header response;
  311. u32 reserved4;
  312. u8 flags;
  313. u8 vf; /* validity flags */
  314. u8 rs; /* reporting source */
  315. u8 cc; /* content code */
  316. u16 fla; /* full link address */
  317. u16 rsid; /* reporting source id */
  318. u32 reserved5;
  319. u32 reserved6;
  320. u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
  321. /* ccdf has to be big enough for a link-incident record */
  322. } __attribute__ ((packed));
  323. static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
  324. {
  325. struct chp_id chpid;
  326. int id;
  327. CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
  328. sei_area->rs, sei_area->rsid);
  329. if (sei_area->rs != 4)
  330. return;
  331. id = __get_chpid_from_lir(sei_area->ccdf);
  332. if (id < 0)
  333. CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
  334. else {
  335. chp_id_init(&chpid);
  336. chpid.id = id;
  337. chsc_chp_offline(chpid);
  338. }
  339. }
  340. static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
  341. {
  342. struct res_acc_data res_data;
  343. struct chp_id chpid;
  344. int status;
  345. CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
  346. "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
  347. if (sei_area->rs != 4)
  348. return;
  349. chp_id_init(&chpid);
  350. chpid.id = sei_area->rsid;
  351. /* allocate a new channel path structure, if needed */
  352. status = chp_get_status(chpid);
  353. if (status < 0)
  354. chp_new(chpid);
  355. else if (!status)
  356. return;
  357. memset(&res_data, 0, sizeof(struct res_acc_data));
  358. res_data.chpid = chpid;
  359. if ((sei_area->vf & 0xc0) != 0) {
  360. res_data.fla = sei_area->fla;
  361. if ((sei_area->vf & 0xc0) == 0xc0)
  362. /* full link address */
  363. res_data.fla_mask = 0xffff;
  364. else
  365. /* link address */
  366. res_data.fla_mask = 0xff00;
  367. }
  368. s390_process_res_acc(&res_data);
  369. }
  370. struct chp_config_data {
  371. u8 map[32];
  372. u8 op;
  373. u8 pc;
  374. };
  375. static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
  376. {
  377. struct chp_config_data *data;
  378. struct chp_id chpid;
  379. int num;
  380. CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
  381. if (sei_area->rs != 0)
  382. return;
  383. data = (struct chp_config_data *) &(sei_area->ccdf);
  384. chp_id_init(&chpid);
  385. for (num = 0; num <= __MAX_CHPID; num++) {
  386. if (!chp_test_bit(data->map, num))
  387. continue;
  388. chpid.id = num;
  389. printk(KERN_WARNING "cio: processing configure event %d for "
  390. "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
  391. switch (data->op) {
  392. case 0:
  393. chp_cfg_schedule(chpid, 1);
  394. break;
  395. case 1:
  396. chp_cfg_schedule(chpid, 0);
  397. break;
  398. case 2:
  399. chp_cfg_cancel_deconfigure(chpid);
  400. break;
  401. }
  402. }
  403. }
  404. static void chsc_process_sei(struct chsc_sei_area *sei_area)
  405. {
  406. /* Check if we might have lost some information. */
  407. if (sei_area->flags & 0x40) {
  408. CIO_CRW_EVENT(2, "chsc: event overflow\n");
  409. css_schedule_eval_all();
  410. }
  411. /* which kind of information was stored? */
  412. switch (sei_area->cc) {
  413. case 1: /* link incident*/
  414. chsc_process_sei_link_incident(sei_area);
  415. break;
  416. case 2: /* i/o resource accessibiliy */
  417. chsc_process_sei_res_acc(sei_area);
  418. break;
  419. case 8: /* channel-path-configuration notification */
  420. chsc_process_sei_chp_config(sei_area);
  421. break;
  422. default: /* other stuff */
  423. CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
  424. sei_area->cc);
  425. break;
  426. }
  427. }
  428. void chsc_process_crw(void)
  429. {
  430. struct chsc_sei_area *sei_area;
  431. if (!sei_page)
  432. return;
  433. /* Access to sei_page is serialized through machine check handler
  434. * thread, so no need for locking. */
  435. sei_area = sei_page;
  436. CIO_TRACE_EVENT( 2, "prcss");
  437. do {
  438. memset(sei_area, 0, sizeof(*sei_area));
  439. sei_area->request.length = 0x0010;
  440. sei_area->request.code = 0x000e;
  441. if (chsc(sei_area))
  442. break;
  443. if (sei_area->response.code == 0x0001) {
  444. CIO_CRW_EVENT(4, "chsc: sei successful\n");
  445. chsc_process_sei(sei_area);
  446. } else {
  447. CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
  448. sei_area->response.code);
  449. break;
  450. }
  451. } while (sei_area->flags & 0x80);
  452. }
  453. static int
  454. __chp_add_new_sch(struct subchannel_id schid)
  455. {
  456. struct schib schib;
  457. if (stsch_err(schid, &schib))
  458. /* We're through */
  459. return -ENXIO;
  460. /* Put it on the slow path. */
  461. css_schedule_eval(schid);
  462. return 0;
  463. }
  464. static int
  465. __chp_add(struct subchannel_id schid, void *data)
  466. {
  467. int i, mask;
  468. struct chp_id *chpid;
  469. struct subchannel *sch;
  470. chpid = data;
  471. sch = get_subchannel_by_schid(schid);
  472. if (!sch)
  473. /* Check if the subchannel is now available. */
  474. return __chp_add_new_sch(schid);
  475. spin_lock_irq(sch->lock);
  476. for (i=0; i<8; i++) {
  477. mask = 0x80 >> i;
  478. if ((sch->schib.pmcw.pim & mask) &&
  479. (sch->schib.pmcw.chpid[i] == chpid->id)) {
  480. if (stsch(sch->schid, &sch->schib) != 0) {
  481. /* Endgame. */
  482. spin_unlock_irq(sch->lock);
  483. return -ENXIO;
  484. }
  485. break;
  486. }
  487. }
  488. if (i==8) {
  489. spin_unlock_irq(sch->lock);
  490. return 0;
  491. }
  492. sch->lpm = ((sch->schib.pmcw.pim &
  493. sch->schib.pmcw.pam &
  494. sch->schib.pmcw.pom)
  495. | mask) & sch->opm;
  496. if (sch->driver && sch->driver->verify)
  497. sch->driver->verify(&sch->dev);
  498. spin_unlock_irq(sch->lock);
  499. put_device(&sch->dev);
  500. return 0;
  501. }
  502. void chsc_chp_online(struct chp_id chpid)
  503. {
  504. char dbf_txt[15];
  505. sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
  506. CIO_TRACE_EVENT(2, dbf_txt);
  507. if (chp_get_status(chpid) != 0)
  508. for_each_subchannel(__chp_add, &chpid);
  509. }
  510. static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  511. struct chp_id chpid, int on)
  512. {
  513. int chp, old_lpm;
  514. int mask;
  515. unsigned long flags;
  516. spin_lock_irqsave(sch->lock, flags);
  517. old_lpm = sch->lpm;
  518. for (chp = 0; chp < 8; chp++) {
  519. mask = 0x80 >> chp;
  520. if (!(sch->ssd_info.path_mask & mask))
  521. continue;
  522. if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
  523. continue;
  524. if (on) {
  525. sch->opm |= mask;
  526. sch->lpm |= mask;
  527. if (!old_lpm)
  528. device_trigger_reprobe(sch);
  529. else if (sch->driver && sch->driver->verify)
  530. sch->driver->verify(&sch->dev);
  531. break;
  532. }
  533. sch->opm &= ~mask;
  534. sch->lpm &= ~mask;
  535. if (check_for_io_on_path(sch, mask)) {
  536. if (device_is_online(sch))
  537. /* Path verification is done after killing. */
  538. device_kill_io(sch);
  539. else {
  540. /* Kill and retry internal I/O. */
  541. terminate_internal_io(sch);
  542. /* Re-start path verification. */
  543. if (sch->driver && sch->driver->verify)
  544. sch->driver->verify(&sch->dev);
  545. }
  546. } else if (!sch->lpm) {
  547. if (device_trigger_verify(sch) != 0)
  548. css_schedule_eval(sch->schid);
  549. } else if (sch->driver && sch->driver->verify)
  550. sch->driver->verify(&sch->dev);
  551. break;
  552. }
  553. spin_unlock_irqrestore(sch->lock, flags);
  554. }
  555. static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
  556. {
  557. struct subchannel *sch;
  558. struct chp_id *chpid;
  559. sch = to_subchannel(dev);
  560. chpid = data;
  561. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  562. return 0;
  563. }
  564. static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
  565. {
  566. struct subchannel *sch;
  567. struct chp_id *chpid;
  568. sch = to_subchannel(dev);
  569. chpid = data;
  570. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  571. return 0;
  572. }
  573. static int
  574. __s390_vary_chpid_on(struct subchannel_id schid, void *data)
  575. {
  576. struct schib schib;
  577. struct subchannel *sch;
  578. sch = get_subchannel_by_schid(schid);
  579. if (sch) {
  580. put_device(&sch->dev);
  581. return 0;
  582. }
  583. if (stsch_err(schid, &schib))
  584. /* We're through */
  585. return -ENXIO;
  586. /* Put it on the slow path. */
  587. css_schedule_eval(schid);
  588. return 0;
  589. }
  590. /**
  591. * chsc_chp_vary - propagate channel-path vary operation to subchannels
  592. * @chpid: channl-path ID
  593. * @on: non-zero for vary online, zero for vary offline
  594. */
  595. int chsc_chp_vary(struct chp_id chpid, int on)
  596. {
  597. /*
  598. * Redo PathVerification on the devices the chpid connects to
  599. */
  600. bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
  601. s390_subchannel_vary_chpid_on :
  602. s390_subchannel_vary_chpid_off);
  603. if (on)
  604. /* Scan for new devices on varied on path. */
  605. for_each_subchannel(__s390_vary_chpid_on, NULL);
  606. return 0;
  607. }
  608. static void
  609. chsc_remove_cmg_attr(struct channel_subsystem *css)
  610. {
  611. int i;
  612. for (i = 0; i <= __MAX_CHPID; i++) {
  613. if (!css->chps[i])
  614. continue;
  615. chp_remove_cmg_attr(css->chps[i]);
  616. }
  617. }
  618. static int
  619. chsc_add_cmg_attr(struct channel_subsystem *css)
  620. {
  621. int i, ret;
  622. ret = 0;
  623. for (i = 0; i <= __MAX_CHPID; i++) {
  624. if (!css->chps[i])
  625. continue;
  626. ret = chp_add_cmg_attr(css->chps[i]);
  627. if (ret)
  628. goto cleanup;
  629. }
  630. return ret;
  631. cleanup:
  632. for (--i; i >= 0; i--) {
  633. if (!css->chps[i])
  634. continue;
  635. chp_remove_cmg_attr(css->chps[i]);
  636. }
  637. return ret;
  638. }
  639. static int
  640. __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
  641. {
  642. struct {
  643. struct chsc_header request;
  644. u32 operation_code : 2;
  645. u32 : 30;
  646. u32 key : 4;
  647. u32 : 28;
  648. u32 zeroes1;
  649. u32 cub_addr1;
  650. u32 zeroes2;
  651. u32 cub_addr2;
  652. u32 reserved[13];
  653. struct chsc_header response;
  654. u32 status : 8;
  655. u32 : 4;
  656. u32 fmt : 4;
  657. u32 : 16;
  658. } __attribute__ ((packed)) *secm_area;
  659. int ret, ccode;
  660. secm_area = page;
  661. secm_area->request.length = 0x0050;
  662. secm_area->request.code = 0x0016;
  663. secm_area->key = PAGE_DEFAULT_KEY;
  664. secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
  665. secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
  666. secm_area->operation_code = enable ? 0 : 1;
  667. ccode = chsc(secm_area);
  668. if (ccode > 0)
  669. return (ccode == 3) ? -ENODEV : -EBUSY;
  670. switch (secm_area->response.code) {
  671. case 0x0001: /* Success. */
  672. ret = 0;
  673. break;
  674. case 0x0003: /* Invalid block. */
  675. case 0x0007: /* Invalid format. */
  676. case 0x0008: /* Other invalid block. */
  677. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  678. ret = -EINVAL;
  679. break;
  680. case 0x0004: /* Command not provided in model. */
  681. CIO_CRW_EVENT(2, "Model does not provide secm\n");
  682. ret = -EOPNOTSUPP;
  683. break;
  684. case 0x0102: /* cub adresses incorrect */
  685. CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
  686. ret = -EINVAL;
  687. break;
  688. case 0x0103: /* key error */
  689. CIO_CRW_EVENT(2, "Access key error in secm\n");
  690. ret = -EINVAL;
  691. break;
  692. case 0x0105: /* error while starting */
  693. CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
  694. ret = -EIO;
  695. break;
  696. default:
  697. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  698. secm_area->response.code);
  699. ret = -EIO;
  700. }
  701. return ret;
  702. }
  703. int
  704. chsc_secm(struct channel_subsystem *css, int enable)
  705. {
  706. void *secm_area;
  707. int ret;
  708. secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  709. if (!secm_area)
  710. return -ENOMEM;
  711. mutex_lock(&css->mutex);
  712. if (enable && !css->cm_enabled) {
  713. css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  714. css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  715. if (!css->cub_addr1 || !css->cub_addr2) {
  716. free_page((unsigned long)css->cub_addr1);
  717. free_page((unsigned long)css->cub_addr2);
  718. free_page((unsigned long)secm_area);
  719. mutex_unlock(&css->mutex);
  720. return -ENOMEM;
  721. }
  722. }
  723. ret = __chsc_do_secm(css, enable, secm_area);
  724. if (!ret) {
  725. css->cm_enabled = enable;
  726. if (css->cm_enabled) {
  727. ret = chsc_add_cmg_attr(css);
  728. if (ret) {
  729. memset(secm_area, 0, PAGE_SIZE);
  730. __chsc_do_secm(css, 0, secm_area);
  731. css->cm_enabled = 0;
  732. }
  733. } else
  734. chsc_remove_cmg_attr(css);
  735. }
  736. if (!css->cm_enabled) {
  737. free_page((unsigned long)css->cub_addr1);
  738. free_page((unsigned long)css->cub_addr2);
  739. }
  740. mutex_unlock(&css->mutex);
  741. free_page((unsigned long)secm_area);
  742. return ret;
  743. }
  744. int chsc_determine_channel_path_description(struct chp_id chpid,
  745. struct channel_path_desc *desc)
  746. {
  747. int ccode, ret;
  748. struct {
  749. struct chsc_header request;
  750. u32 : 24;
  751. u32 first_chpid : 8;
  752. u32 : 24;
  753. u32 last_chpid : 8;
  754. u32 zeroes1;
  755. struct chsc_header response;
  756. u32 zeroes2;
  757. struct channel_path_desc desc;
  758. } __attribute__ ((packed)) *scpd_area;
  759. scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  760. if (!scpd_area)
  761. return -ENOMEM;
  762. scpd_area->request.length = 0x0010;
  763. scpd_area->request.code = 0x0002;
  764. scpd_area->first_chpid = chpid.id;
  765. scpd_area->last_chpid = chpid.id;
  766. ccode = chsc(scpd_area);
  767. if (ccode > 0) {
  768. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  769. goto out;
  770. }
  771. switch (scpd_area->response.code) {
  772. case 0x0001: /* Success. */
  773. memcpy(desc, &scpd_area->desc,
  774. sizeof(struct channel_path_desc));
  775. ret = 0;
  776. break;
  777. case 0x0003: /* Invalid block. */
  778. case 0x0007: /* Invalid format. */
  779. case 0x0008: /* Other invalid block. */
  780. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  781. ret = -EINVAL;
  782. break;
  783. case 0x0004: /* Command not provided in model. */
  784. CIO_CRW_EVENT(2, "Model does not provide scpd\n");
  785. ret = -EOPNOTSUPP;
  786. break;
  787. default:
  788. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  789. scpd_area->response.code);
  790. ret = -EIO;
  791. }
  792. out:
  793. free_page((unsigned long)scpd_area);
  794. return ret;
  795. }
  796. static void
  797. chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
  798. struct cmg_chars *chars)
  799. {
  800. switch (chp->cmg) {
  801. case 2:
  802. case 3:
  803. chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
  804. GFP_KERNEL);
  805. if (chp->cmg_chars) {
  806. int i, mask;
  807. struct cmg_chars *cmg_chars;
  808. cmg_chars = chp->cmg_chars;
  809. for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
  810. mask = 0x80 >> (i + 3);
  811. if (cmcv & mask)
  812. cmg_chars->values[i] = chars->values[i];
  813. else
  814. cmg_chars->values[i] = 0;
  815. }
  816. }
  817. break;
  818. default:
  819. /* No cmg-dependent data. */
  820. break;
  821. }
  822. }
  823. int chsc_get_channel_measurement_chars(struct channel_path *chp)
  824. {
  825. int ccode, ret;
  826. struct {
  827. struct chsc_header request;
  828. u32 : 24;
  829. u32 first_chpid : 8;
  830. u32 : 24;
  831. u32 last_chpid : 8;
  832. u32 zeroes1;
  833. struct chsc_header response;
  834. u32 zeroes2;
  835. u32 not_valid : 1;
  836. u32 shared : 1;
  837. u32 : 22;
  838. u32 chpid : 8;
  839. u32 cmcv : 5;
  840. u32 : 11;
  841. u32 cmgq : 8;
  842. u32 cmg : 8;
  843. u32 zeroes3;
  844. u32 data[NR_MEASUREMENT_CHARS];
  845. } __attribute__ ((packed)) *scmc_area;
  846. scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  847. if (!scmc_area)
  848. return -ENOMEM;
  849. scmc_area->request.length = 0x0010;
  850. scmc_area->request.code = 0x0022;
  851. scmc_area->first_chpid = chp->chpid.id;
  852. scmc_area->last_chpid = chp->chpid.id;
  853. ccode = chsc(scmc_area);
  854. if (ccode > 0) {
  855. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  856. goto out;
  857. }
  858. switch (scmc_area->response.code) {
  859. case 0x0001: /* Success. */
  860. if (!scmc_area->not_valid) {
  861. chp->cmg = scmc_area->cmg;
  862. chp->shared = scmc_area->shared;
  863. chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
  864. (struct cmg_chars *)
  865. &scmc_area->data);
  866. } else {
  867. chp->cmg = -1;
  868. chp->shared = -1;
  869. }
  870. ret = 0;
  871. break;
  872. case 0x0003: /* Invalid block. */
  873. case 0x0007: /* Invalid format. */
  874. case 0x0008: /* Invalid bit combination. */
  875. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  876. ret = -EINVAL;
  877. break;
  878. case 0x0004: /* Command not provided. */
  879. CIO_CRW_EVENT(2, "Model does not provide scmc\n");
  880. ret = -EOPNOTSUPP;
  881. break;
  882. default:
  883. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  884. scmc_area->response.code);
  885. ret = -EIO;
  886. }
  887. out:
  888. free_page((unsigned long)scmc_area);
  889. return ret;
  890. }
  891. static int __init
  892. chsc_alloc_sei_area(void)
  893. {
  894. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  895. if (!sei_page)
  896. printk(KERN_WARNING"Can't allocate page for processing of " \
  897. "chsc machine checks!\n");
  898. return (sei_page ? 0 : -ENOMEM);
  899. }
  900. int __init
  901. chsc_enable_facility(int operation_code)
  902. {
  903. int ret;
  904. struct {
  905. struct chsc_header request;
  906. u8 reserved1:4;
  907. u8 format:4;
  908. u8 reserved2;
  909. u16 operation_code;
  910. u32 reserved3;
  911. u32 reserved4;
  912. u32 operation_data_area[252];
  913. struct chsc_header response;
  914. u32 reserved5:4;
  915. u32 format2:4;
  916. u32 reserved6:24;
  917. } __attribute__ ((packed)) *sda_area;
  918. sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
  919. if (!sda_area)
  920. return -ENOMEM;
  921. sda_area->request.length = 0x0400;
  922. sda_area->request.code = 0x0031;
  923. sda_area->operation_code = operation_code;
  924. ret = chsc(sda_area);
  925. if (ret > 0) {
  926. ret = (ret == 3) ? -ENODEV : -EBUSY;
  927. goto out;
  928. }
  929. switch (sda_area->response.code) {
  930. case 0x0001: /* everything ok */
  931. ret = 0;
  932. break;
  933. case 0x0003: /* invalid request block */
  934. case 0x0007:
  935. ret = -EINVAL;
  936. break;
  937. case 0x0004: /* command not provided */
  938. case 0x0101: /* facility not provided */
  939. ret = -EOPNOTSUPP;
  940. break;
  941. default: /* something went wrong */
  942. ret = -EIO;
  943. }
  944. out:
  945. free_page((unsigned long)sda_area);
  946. return ret;
  947. }
  948. subsys_initcall(chsc_alloc_sei_area);
  949. struct css_general_char css_general_characteristics;
  950. struct css_chsc_char css_chsc_characteristics;
  951. int __init
  952. chsc_determine_css_characteristics(void)
  953. {
  954. int result;
  955. struct {
  956. struct chsc_header request;
  957. u32 reserved1;
  958. u32 reserved2;
  959. u32 reserved3;
  960. struct chsc_header response;
  961. u32 reserved4;
  962. u32 general_char[510];
  963. u32 chsc_char[518];
  964. } __attribute__ ((packed)) *scsc_area;
  965. scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  966. if (!scsc_area) {
  967. printk(KERN_WARNING"cio: Was not able to determine available" \
  968. "CHSCs due to no memory.\n");
  969. return -ENOMEM;
  970. }
  971. scsc_area->request.length = 0x0010;
  972. scsc_area->request.code = 0x0010;
  973. result = chsc(scsc_area);
  974. if (result) {
  975. printk(KERN_WARNING"cio: Was not able to determine " \
  976. "available CHSCs, cc=%i.\n", result);
  977. result = -EIO;
  978. goto exit;
  979. }
  980. if (scsc_area->response.code != 1) {
  981. printk(KERN_WARNING"cio: Was not able to determine " \
  982. "available CHSCs.\n");
  983. result = -EIO;
  984. goto exit;
  985. }
  986. memcpy(&css_general_characteristics, scsc_area->general_char,
  987. sizeof(css_general_characteristics));
  988. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  989. sizeof(css_chsc_characteristics));
  990. exit:
  991. free_page ((unsigned long) scsc_area);
  992. return result;
  993. }
  994. EXPORT_SYMBOL_GPL(css_general_characteristics);
  995. EXPORT_SYMBOL_GPL(css_chsc_characteristics);