chsc.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. /*
  2. * drivers/s390/cio/chsc.c
  3. * S/390 common I/O routines -- channel subsystem call
  4. *
  5. * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
  6. * IBM Corporation
  7. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  8. * Cornelia Huck (cornelia.huck@de.ibm.com)
  9. * Arnd Bergmann (arndb@de.ibm.com)
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <asm/cio.h>
  16. #include "css.h"
  17. #include "cio.h"
  18. #include "cio_debug.h"
  19. #include "ioasm.h"
  20. #include "chpid.h"
  21. #include "chp.h"
  22. #include "chsc.h"
  23. static void *sei_page;
  24. /* FIXME: this is _always_ called for every subchannel. shouldn't we
  25. * process more than one at a time? */
  26. static int
  27. chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
  28. {
  29. int ccode, j;
  30. struct {
  31. struct chsc_header request;
  32. u16 reserved1a:10;
  33. u16 ssid:2;
  34. u16 reserved1b:4;
  35. u16 f_sch; /* first subchannel */
  36. u16 reserved2;
  37. u16 l_sch; /* last subchannel */
  38. u32 reserved3;
  39. struct chsc_header response;
  40. u32 reserved4;
  41. u8 sch_valid : 1;
  42. u8 dev_valid : 1;
  43. u8 st : 3; /* subchannel type */
  44. u8 zeroes : 3;
  45. u8 unit_addr; /* unit address */
  46. u16 devno; /* device number */
  47. u8 path_mask;
  48. u8 fla_valid_mask;
  49. u16 sch; /* subchannel */
  50. u8 chpid[8]; /* chpids 0-7 */
  51. u16 fla[8]; /* full link addresses 0-7 */
  52. } __attribute__ ((packed)) *ssd_area;
  53. ssd_area = page;
  54. ssd_area->request.length = 0x0010;
  55. ssd_area->request.code = 0x0004;
  56. ssd_area->ssid = sch->schid.ssid;
  57. ssd_area->f_sch = sch->schid.sch_no;
  58. ssd_area->l_sch = sch->schid.sch_no;
  59. ccode = chsc(ssd_area);
  60. if (ccode > 0) {
  61. pr_debug("chsc returned with ccode = %d\n", ccode);
  62. return (ccode == 3) ? -ENODEV : -EBUSY;
  63. }
  64. switch (ssd_area->response.code) {
  65. case 0x0001: /* everything ok */
  66. break;
  67. case 0x0002:
  68. CIO_CRW_EVENT(2, "Invalid command!\n");
  69. return -EINVAL;
  70. case 0x0003:
  71. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  72. return -EINVAL;
  73. case 0x0004:
  74. CIO_CRW_EVENT(2, "Model does not provide ssd\n");
  75. return -EOPNOTSUPP;
  76. default:
  77. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  78. ssd_area->response.code);
  79. return -EIO;
  80. }
  81. /*
  82. * ssd_area->st stores the type of the detected
  83. * subchannel, with the following definitions:
  84. *
  85. * 0: I/O subchannel: All fields have meaning
  86. * 1: CHSC subchannel: Only sch_val, st and sch
  87. * have meaning
  88. * 2: Message subchannel: All fields except unit_addr
  89. * have meaning
  90. * 3: ADM subchannel: Only sch_val, st and sch
  91. * have meaning
  92. *
  93. * Other types are currently undefined.
  94. */
  95. if (ssd_area->st > 3) { /* uhm, that looks strange... */
  96. CIO_CRW_EVENT(0, "Strange subchannel type %d"
  97. " for sch 0.%x.%04x\n", ssd_area->st,
  98. sch->schid.ssid, sch->schid.sch_no);
  99. /*
  100. * There may have been a new subchannel type defined in the
  101. * time since this code was written; since we don't know which
  102. * fields have meaning and what to do with it we just jump out
  103. */
  104. return 0;
  105. } else {
  106. const char *type[4] = {"I/O", "chsc", "message", "ADM"};
  107. CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
  108. sch->schid.ssid, sch->schid.sch_no,
  109. type[ssd_area->st]);
  110. sch->ssd_info.valid = 1;
  111. sch->ssd_info.type = ssd_area->st;
  112. }
  113. if (ssd_area->st == 0 || ssd_area->st == 2) {
  114. for (j = 0; j < 8; j++) {
  115. if (!((0x80 >> j) & ssd_area->path_mask &
  116. ssd_area->fla_valid_mask))
  117. continue;
  118. sch->ssd_info.chpid[j] = ssd_area->chpid[j];
  119. sch->ssd_info.fla[j] = ssd_area->fla[j];
  120. }
  121. }
  122. return 0;
  123. }
  124. int
  125. css_get_ssd_info(struct subchannel *sch)
  126. {
  127. int ret;
  128. void *page;
  129. page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  130. if (!page)
  131. return -ENOMEM;
  132. spin_lock_irq(sch->lock);
  133. ret = chsc_get_sch_desc_irq(sch, page);
  134. if (ret) {
  135. static int cio_chsc_err_msg;
  136. if (!cio_chsc_err_msg) {
  137. printk(KERN_ERR
  138. "chsc_get_sch_descriptions:"
  139. " Error %d while doing chsc; "
  140. "processing some machine checks may "
  141. "not work\n", ret);
  142. cio_chsc_err_msg = 1;
  143. }
  144. }
  145. spin_unlock_irq(sch->lock);
  146. free_page((unsigned long)page);
  147. if (!ret) {
  148. int j, mask;
  149. struct chp_id chpid;
  150. chp_id_init(&chpid);
  151. /* Allocate channel path structures, if needed. */
  152. for (j = 0; j < 8; j++) {
  153. mask = 0x80 >> j;
  154. chpid.id = sch->ssd_info.chpid[j];
  155. if ((sch->schib.pmcw.pim & mask) &&
  156. !chp_is_registered(chpid))
  157. chp_new(chpid);
  158. }
  159. }
  160. return ret;
  161. }
  162. static int
  163. s390_subchannel_remove_chpid(struct device *dev, void *data)
  164. {
  165. int j;
  166. int mask;
  167. struct subchannel *sch;
  168. struct chp_id *chpid;
  169. struct schib schib;
  170. sch = to_subchannel(dev);
  171. chpid = data;
  172. for (j = 0; j < 8; j++) {
  173. mask = 0x80 >> j;
  174. if ((sch->schib.pmcw.pim & mask) &&
  175. (sch->schib.pmcw.chpid[j] == chpid->id))
  176. break;
  177. }
  178. if (j >= 8)
  179. return 0;
  180. spin_lock_irq(sch->lock);
  181. stsch(sch->schid, &schib);
  182. if (!schib.pmcw.dnv)
  183. goto out_unreg;
  184. memcpy(&sch->schib, &schib, sizeof(struct schib));
  185. /* Check for single path devices. */
  186. if (sch->schib.pmcw.pim == 0x80)
  187. goto out_unreg;
  188. if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
  189. (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
  190. (sch->schib.pmcw.lpum == mask)) {
  191. int cc;
  192. cc = cio_clear(sch);
  193. if (cc == -ENODEV)
  194. goto out_unreg;
  195. /* Request retry of internal operation. */
  196. device_set_intretry(sch);
  197. /* Call handler. */
  198. if (sch->driver && sch->driver->termination)
  199. sch->driver->termination(&sch->dev);
  200. goto out_unlock;
  201. }
  202. /* trigger path verification. */
  203. if (sch->driver && sch->driver->verify)
  204. sch->driver->verify(&sch->dev);
  205. else if (sch->lpm == mask)
  206. goto out_unreg;
  207. out_unlock:
  208. spin_unlock_irq(sch->lock);
  209. return 0;
  210. out_unreg:
  211. spin_unlock_irq(sch->lock);
  212. sch->lpm = 0;
  213. if (css_enqueue_subchannel_slow(sch->schid)) {
  214. css_clear_subchannel_slow_list();
  215. need_rescan = 1;
  216. }
  217. return 0;
  218. }
  219. void chsc_chp_offline(struct chp_id chpid)
  220. {
  221. char dbf_txt[15];
  222. sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
  223. CIO_TRACE_EVENT(2, dbf_txt);
  224. if (chp_get_status(chpid) <= 0)
  225. return;
  226. bus_for_each_dev(&css_bus_type, NULL, &chpid,
  227. s390_subchannel_remove_chpid);
  228. if (need_rescan || css_slow_subchannels_exist())
  229. queue_work(slow_path_wq, &slow_path_work);
  230. }
  231. struct res_acc_data {
  232. struct chp_id chpid;
  233. u32 fla_mask;
  234. u16 fla;
  235. };
  236. static int s390_process_res_acc_sch(struct res_acc_data *res_data,
  237. struct subchannel *sch)
  238. {
  239. int found;
  240. int chp;
  241. int ccode;
  242. found = 0;
  243. for (chp = 0; chp <= 7; chp++)
  244. /*
  245. * check if chpid is in information updated by ssd
  246. */
  247. if (sch->ssd_info.valid &&
  248. sch->ssd_info.chpid[chp] == res_data->chpid.id &&
  249. (sch->ssd_info.fla[chp] & res_data->fla_mask)
  250. == res_data->fla) {
  251. found = 1;
  252. break;
  253. }
  254. if (found == 0)
  255. return 0;
  256. /*
  257. * Do a stsch to update our subchannel structure with the
  258. * new path information and eventually check for logically
  259. * offline chpids.
  260. */
  261. ccode = stsch(sch->schid, &sch->schib);
  262. if (ccode > 0)
  263. return 0;
  264. return 0x80 >> chp;
  265. }
  266. static int
  267. s390_process_res_acc_new_sch(struct subchannel_id schid)
  268. {
  269. struct schib schib;
  270. int ret;
  271. /*
  272. * We don't know the device yet, but since a path
  273. * may be available now to the device we'll have
  274. * to do recognition again.
  275. * Since we don't have any idea about which chpid
  276. * that beast may be on we'll have to do a stsch
  277. * on all devices, grr...
  278. */
  279. if (stsch_err(schid, &schib))
  280. /* We're through */
  281. return need_rescan ? -EAGAIN : -ENXIO;
  282. /* Put it on the slow path. */
  283. ret = css_enqueue_subchannel_slow(schid);
  284. if (ret) {
  285. css_clear_subchannel_slow_list();
  286. need_rescan = 1;
  287. return -EAGAIN;
  288. }
  289. return 0;
  290. }
  291. static int
  292. __s390_process_res_acc(struct subchannel_id schid, void *data)
  293. {
  294. int chp_mask, old_lpm;
  295. struct res_acc_data *res_data;
  296. struct subchannel *sch;
  297. res_data = data;
  298. sch = get_subchannel_by_schid(schid);
  299. if (!sch)
  300. /* Check if a subchannel is newly available. */
  301. return s390_process_res_acc_new_sch(schid);
  302. spin_lock_irq(sch->lock);
  303. chp_mask = s390_process_res_acc_sch(res_data, sch);
  304. if (chp_mask == 0) {
  305. spin_unlock_irq(sch->lock);
  306. put_device(&sch->dev);
  307. return 0;
  308. }
  309. old_lpm = sch->lpm;
  310. sch->lpm = ((sch->schib.pmcw.pim &
  311. sch->schib.pmcw.pam &
  312. sch->schib.pmcw.pom)
  313. | chp_mask) & sch->opm;
  314. if (!old_lpm && sch->lpm)
  315. device_trigger_reprobe(sch);
  316. else if (sch->driver && sch->driver->verify)
  317. sch->driver->verify(&sch->dev);
  318. spin_unlock_irq(sch->lock);
  319. put_device(&sch->dev);
  320. return 0;
  321. }
  322. static int
  323. s390_process_res_acc (struct res_acc_data *res_data)
  324. {
  325. int rc;
  326. char dbf_txt[15];
  327. sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
  328. res_data->chpid.id);
  329. CIO_TRACE_EVENT( 2, dbf_txt);
  330. if (res_data->fla != 0) {
  331. sprintf(dbf_txt, "fla%x", res_data->fla);
  332. CIO_TRACE_EVENT( 2, dbf_txt);
  333. }
  334. /*
  335. * I/O resources may have become accessible.
  336. * Scan through all subchannels that may be concerned and
  337. * do a validation on those.
  338. * The more information we have (info), the less scanning
  339. * will we have to do.
  340. */
  341. rc = for_each_subchannel(__s390_process_res_acc, res_data);
  342. if (css_slow_subchannels_exist())
  343. rc = -EAGAIN;
  344. else if (rc != -EAGAIN)
  345. rc = 0;
  346. return rc;
  347. }
  348. static int
  349. __get_chpid_from_lir(void *data)
  350. {
  351. struct lir {
  352. u8 iq;
  353. u8 ic;
  354. u16 sci;
  355. /* incident-node descriptor */
  356. u32 indesc[28];
  357. /* attached-node descriptor */
  358. u32 andesc[28];
  359. /* incident-specific information */
  360. u32 isinfo[28];
  361. } __attribute__ ((packed)) *lir;
  362. lir = data;
  363. if (!(lir->iq&0x80))
  364. /* NULL link incident record */
  365. return -EINVAL;
  366. if (!(lir->indesc[0]&0xc0000000))
  367. /* node descriptor not valid */
  368. return -EINVAL;
  369. if (!(lir->indesc[0]&0x10000000))
  370. /* don't handle device-type nodes - FIXME */
  371. return -EINVAL;
  372. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  373. return (u16) (lir->indesc[0]&0x000000ff);
  374. }
  375. struct chsc_sei_area {
  376. struct chsc_header request;
  377. u32 reserved1;
  378. u32 reserved2;
  379. u32 reserved3;
  380. struct chsc_header response;
  381. u32 reserved4;
  382. u8 flags;
  383. u8 vf; /* validity flags */
  384. u8 rs; /* reporting source */
  385. u8 cc; /* content code */
  386. u16 fla; /* full link address */
  387. u16 rsid; /* reporting source id */
  388. u32 reserved5;
  389. u32 reserved6;
  390. u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
  391. /* ccdf has to be big enough for a link-incident record */
  392. } __attribute__ ((packed));
  393. static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
  394. {
  395. struct chp_id chpid;
  396. int id;
  397. CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
  398. sei_area->rs, sei_area->rsid);
  399. if (sei_area->rs != 4)
  400. return 0;
  401. id = __get_chpid_from_lir(sei_area->ccdf);
  402. if (id < 0)
  403. CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
  404. else {
  405. chp_id_init(&chpid);
  406. chpid.id = id;
  407. chsc_chp_offline(chpid);
  408. }
  409. return 0;
  410. }
  411. static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
  412. {
  413. struct res_acc_data res_data;
  414. struct chp_id chpid;
  415. int status;
  416. int rc;
  417. CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
  418. "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
  419. if (sei_area->rs != 4)
  420. return 0;
  421. chp_id_init(&chpid);
  422. chpid.id = sei_area->rsid;
  423. /* allocate a new channel path structure, if needed */
  424. status = chp_get_status(chpid);
  425. if (status < 0)
  426. chp_new(chpid);
  427. else if (!status)
  428. return 0;
  429. memset(&res_data, 0, sizeof(struct res_acc_data));
  430. res_data.chpid = chpid;
  431. if ((sei_area->vf & 0xc0) != 0) {
  432. res_data.fla = sei_area->fla;
  433. if ((sei_area->vf & 0xc0) == 0xc0)
  434. /* full link address */
  435. res_data.fla_mask = 0xffff;
  436. else
  437. /* link address */
  438. res_data.fla_mask = 0xff00;
  439. }
  440. rc = s390_process_res_acc(&res_data);
  441. return rc;
  442. }
  443. static int chsc_process_sei(struct chsc_sei_area *sei_area)
  444. {
  445. int rc;
  446. /* Check if we might have lost some information. */
  447. if (sei_area->flags & 0x40)
  448. CIO_CRW_EVENT(2, "chsc: event overflow\n");
  449. /* which kind of information was stored? */
  450. rc = 0;
  451. switch (sei_area->cc) {
  452. case 1: /* link incident*/
  453. rc = chsc_process_sei_link_incident(sei_area);
  454. break;
  455. case 2: /* i/o resource accessibiliy */
  456. rc = chsc_process_sei_res_acc(sei_area);
  457. break;
  458. default: /* other stuff */
  459. CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
  460. sei_area->cc);
  461. break;
  462. }
  463. return rc;
  464. }
  465. int chsc_process_crw(void)
  466. {
  467. struct chsc_sei_area *sei_area;
  468. int ret;
  469. int rc;
  470. if (!sei_page)
  471. return 0;
  472. /* Access to sei_page is serialized through machine check handler
  473. * thread, so no need for locking. */
  474. sei_area = sei_page;
  475. CIO_TRACE_EVENT( 2, "prcss");
  476. ret = 0;
  477. do {
  478. memset(sei_area, 0, sizeof(*sei_area));
  479. sei_area->request.length = 0x0010;
  480. sei_area->request.code = 0x000e;
  481. if (chsc(sei_area))
  482. break;
  483. if (sei_area->response.code == 0x0001) {
  484. CIO_CRW_EVENT(4, "chsc: sei successful\n");
  485. rc = chsc_process_sei(sei_area);
  486. if (rc)
  487. ret = rc;
  488. } else {
  489. CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
  490. sei_area->response.code);
  491. ret = 0;
  492. break;
  493. }
  494. } while (sei_area->flags & 0x80);
  495. return ret;
  496. }
  497. static int
  498. __chp_add_new_sch(struct subchannel_id schid)
  499. {
  500. struct schib schib;
  501. int ret;
  502. if (stsch_err(schid, &schib))
  503. /* We're through */
  504. return need_rescan ? -EAGAIN : -ENXIO;
  505. /* Put it on the slow path. */
  506. ret = css_enqueue_subchannel_slow(schid);
  507. if (ret) {
  508. css_clear_subchannel_slow_list();
  509. need_rescan = 1;
  510. return -EAGAIN;
  511. }
  512. return 0;
  513. }
  514. static int
  515. __chp_add(struct subchannel_id schid, void *data)
  516. {
  517. int i, mask;
  518. struct chp_id *chpid;
  519. struct subchannel *sch;
  520. chpid = data;
  521. sch = get_subchannel_by_schid(schid);
  522. if (!sch)
  523. /* Check if the subchannel is now available. */
  524. return __chp_add_new_sch(schid);
  525. spin_lock_irq(sch->lock);
  526. for (i=0; i<8; i++) {
  527. mask = 0x80 >> i;
  528. if ((sch->schib.pmcw.pim & mask) &&
  529. (sch->schib.pmcw.chpid[i] == chpid->id)) {
  530. if (stsch(sch->schid, &sch->schib) != 0) {
  531. /* Endgame. */
  532. spin_unlock_irq(sch->lock);
  533. return -ENXIO;
  534. }
  535. break;
  536. }
  537. }
  538. if (i==8) {
  539. spin_unlock_irq(sch->lock);
  540. return 0;
  541. }
  542. sch->lpm = ((sch->schib.pmcw.pim &
  543. sch->schib.pmcw.pam &
  544. sch->schib.pmcw.pom)
  545. | mask) & sch->opm;
  546. if (sch->driver && sch->driver->verify)
  547. sch->driver->verify(&sch->dev);
  548. spin_unlock_irq(sch->lock);
  549. put_device(&sch->dev);
  550. return 0;
  551. }
  552. int chsc_chp_online(struct chp_id chpid)
  553. {
  554. int rc;
  555. char dbf_txt[15];
  556. sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
  557. CIO_TRACE_EVENT(2, dbf_txt);
  558. if (chp_get_status(chpid) == 0)
  559. return 0;
  560. rc = for_each_subchannel(__chp_add, &chpid);
  561. if (css_slow_subchannels_exist())
  562. rc = -EAGAIN;
  563. if (rc != -EAGAIN)
  564. rc = 0;
  565. return rc;
  566. }
  567. static int check_for_io_on_path(struct subchannel *sch, int index)
  568. {
  569. int cc;
  570. cc = stsch(sch->schid, &sch->schib);
  571. if (cc)
  572. return 0;
  573. if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
  574. return 1;
  575. return 0;
  576. }
  577. static void terminate_internal_io(struct subchannel *sch)
  578. {
  579. if (cio_clear(sch)) {
  580. /* Recheck device in case clear failed. */
  581. sch->lpm = 0;
  582. if (device_trigger_verify(sch) != 0) {
  583. if(css_enqueue_subchannel_slow(sch->schid)) {
  584. css_clear_subchannel_slow_list();
  585. need_rescan = 1;
  586. }
  587. }
  588. return;
  589. }
  590. /* Request retry of internal operation. */
  591. device_set_intretry(sch);
  592. /* Call handler. */
  593. if (sch->driver && sch->driver->termination)
  594. sch->driver->termination(&sch->dev);
  595. }
  596. static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  597. struct chp_id chpid, int on)
  598. {
  599. int chp, old_lpm;
  600. unsigned long flags;
  601. if (!sch->ssd_info.valid)
  602. return;
  603. spin_lock_irqsave(sch->lock, flags);
  604. old_lpm = sch->lpm;
  605. for (chp = 0; chp < 8; chp++) {
  606. if (sch->ssd_info.chpid[chp] != chpid.id)
  607. continue;
  608. if (on) {
  609. sch->opm |= (0x80 >> chp);
  610. sch->lpm |= (0x80 >> chp);
  611. if (!old_lpm)
  612. device_trigger_reprobe(sch);
  613. else if (sch->driver && sch->driver->verify)
  614. sch->driver->verify(&sch->dev);
  615. break;
  616. }
  617. sch->opm &= ~(0x80 >> chp);
  618. sch->lpm &= ~(0x80 >> chp);
  619. if (check_for_io_on_path(sch, chp)) {
  620. if (device_is_online(sch))
  621. /* Path verification is done after killing. */
  622. device_kill_io(sch);
  623. else
  624. /* Kill and retry internal I/O. */
  625. terminate_internal_io(sch);
  626. } else if (!sch->lpm) {
  627. if (device_trigger_verify(sch) != 0) {
  628. if (css_enqueue_subchannel_slow(sch->schid)) {
  629. css_clear_subchannel_slow_list();
  630. need_rescan = 1;
  631. }
  632. }
  633. } else if (sch->driver && sch->driver->verify)
  634. sch->driver->verify(&sch->dev);
  635. break;
  636. }
  637. spin_unlock_irqrestore(sch->lock, flags);
  638. }
  639. static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
  640. {
  641. struct subchannel *sch;
  642. struct chp_id *chpid;
  643. sch = to_subchannel(dev);
  644. chpid = data;
  645. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  646. return 0;
  647. }
  648. static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
  649. {
  650. struct subchannel *sch;
  651. struct chp_id *chpid;
  652. sch = to_subchannel(dev);
  653. chpid = data;
  654. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  655. return 0;
  656. }
  657. static int
  658. __s390_vary_chpid_on(struct subchannel_id schid, void *data)
  659. {
  660. struct schib schib;
  661. struct subchannel *sch;
  662. sch = get_subchannel_by_schid(schid);
  663. if (sch) {
  664. put_device(&sch->dev);
  665. return 0;
  666. }
  667. if (stsch_err(schid, &schib))
  668. /* We're through */
  669. return -ENXIO;
  670. /* Put it on the slow path. */
  671. if (css_enqueue_subchannel_slow(schid)) {
  672. css_clear_subchannel_slow_list();
  673. need_rescan = 1;
  674. return -EAGAIN;
  675. }
  676. return 0;
  677. }
  678. /**
  679. * chsc_chp_vary - propagate channel-path vary operation to subchannels
  680. * @chpid: channl-path ID
  681. * @on: non-zero for vary online, zero for vary offline
  682. */
  683. int chsc_chp_vary(struct chp_id chpid, int on)
  684. {
  685. /*
  686. * Redo PathVerification on the devices the chpid connects to
  687. */
  688. bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
  689. s390_subchannel_vary_chpid_on :
  690. s390_subchannel_vary_chpid_off);
  691. if (on)
  692. /* Scan for new devices on varied on path. */
  693. for_each_subchannel(__s390_vary_chpid_on, NULL);
  694. if (need_rescan || css_slow_subchannels_exist())
  695. queue_work(slow_path_wq, &slow_path_work);
  696. return 0;
  697. }
  698. static void
  699. chsc_remove_cmg_attr(struct channel_subsystem *css)
  700. {
  701. int i;
  702. for (i = 0; i <= __MAX_CHPID; i++) {
  703. if (!css->chps[i])
  704. continue;
  705. chp_remove_cmg_attr(css->chps[i]);
  706. }
  707. }
  708. static int
  709. chsc_add_cmg_attr(struct channel_subsystem *css)
  710. {
  711. int i, ret;
  712. ret = 0;
  713. for (i = 0; i <= __MAX_CHPID; i++) {
  714. if (!css->chps[i])
  715. continue;
  716. ret = chp_add_cmg_attr(css->chps[i]);
  717. if (ret)
  718. goto cleanup;
  719. }
  720. return ret;
  721. cleanup:
  722. for (--i; i >= 0; i--) {
  723. if (!css->chps[i])
  724. continue;
  725. chp_remove_cmg_attr(css->chps[i]);
  726. }
  727. return ret;
  728. }
  729. static int
  730. __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
  731. {
  732. struct {
  733. struct chsc_header request;
  734. u32 operation_code : 2;
  735. u32 : 30;
  736. u32 key : 4;
  737. u32 : 28;
  738. u32 zeroes1;
  739. u32 cub_addr1;
  740. u32 zeroes2;
  741. u32 cub_addr2;
  742. u32 reserved[13];
  743. struct chsc_header response;
  744. u32 status : 8;
  745. u32 : 4;
  746. u32 fmt : 4;
  747. u32 : 16;
  748. } __attribute__ ((packed)) *secm_area;
  749. int ret, ccode;
  750. secm_area = page;
  751. secm_area->request.length = 0x0050;
  752. secm_area->request.code = 0x0016;
  753. secm_area->key = PAGE_DEFAULT_KEY;
  754. secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
  755. secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
  756. secm_area->operation_code = enable ? 0 : 1;
  757. ccode = chsc(secm_area);
  758. if (ccode > 0)
  759. return (ccode == 3) ? -ENODEV : -EBUSY;
  760. switch (secm_area->response.code) {
  761. case 0x0001: /* Success. */
  762. ret = 0;
  763. break;
  764. case 0x0003: /* Invalid block. */
  765. case 0x0007: /* Invalid format. */
  766. case 0x0008: /* Other invalid block. */
  767. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  768. ret = -EINVAL;
  769. break;
  770. case 0x0004: /* Command not provided in model. */
  771. CIO_CRW_EVENT(2, "Model does not provide secm\n");
  772. ret = -EOPNOTSUPP;
  773. break;
  774. case 0x0102: /* cub adresses incorrect */
  775. CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
  776. ret = -EINVAL;
  777. break;
  778. case 0x0103: /* key error */
  779. CIO_CRW_EVENT(2, "Access key error in secm\n");
  780. ret = -EINVAL;
  781. break;
  782. case 0x0105: /* error while starting */
  783. CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
  784. ret = -EIO;
  785. break;
  786. default:
  787. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  788. secm_area->response.code);
  789. ret = -EIO;
  790. }
  791. return ret;
  792. }
  793. int
  794. chsc_secm(struct channel_subsystem *css, int enable)
  795. {
  796. void *secm_area;
  797. int ret;
  798. secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  799. if (!secm_area)
  800. return -ENOMEM;
  801. mutex_lock(&css->mutex);
  802. if (enable && !css->cm_enabled) {
  803. css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  804. css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  805. if (!css->cub_addr1 || !css->cub_addr2) {
  806. free_page((unsigned long)css->cub_addr1);
  807. free_page((unsigned long)css->cub_addr2);
  808. free_page((unsigned long)secm_area);
  809. mutex_unlock(&css->mutex);
  810. return -ENOMEM;
  811. }
  812. }
  813. ret = __chsc_do_secm(css, enable, secm_area);
  814. if (!ret) {
  815. css->cm_enabled = enable;
  816. if (css->cm_enabled) {
  817. ret = chsc_add_cmg_attr(css);
  818. if (ret) {
  819. memset(secm_area, 0, PAGE_SIZE);
  820. __chsc_do_secm(css, 0, secm_area);
  821. css->cm_enabled = 0;
  822. }
  823. } else
  824. chsc_remove_cmg_attr(css);
  825. }
  826. if (enable && !css->cm_enabled) {
  827. free_page((unsigned long)css->cub_addr1);
  828. free_page((unsigned long)css->cub_addr2);
  829. }
  830. mutex_unlock(&css->mutex);
  831. free_page((unsigned long)secm_area);
  832. return ret;
  833. }
  834. int chsc_determine_channel_path_description(struct chp_id chpid,
  835. struct channel_path_desc *desc)
  836. {
  837. int ccode, ret;
  838. struct {
  839. struct chsc_header request;
  840. u32 : 24;
  841. u32 first_chpid : 8;
  842. u32 : 24;
  843. u32 last_chpid : 8;
  844. u32 zeroes1;
  845. struct chsc_header response;
  846. u32 zeroes2;
  847. struct channel_path_desc desc;
  848. } __attribute__ ((packed)) *scpd_area;
  849. scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  850. if (!scpd_area)
  851. return -ENOMEM;
  852. scpd_area->request.length = 0x0010;
  853. scpd_area->request.code = 0x0002;
  854. scpd_area->first_chpid = chpid.id;
  855. scpd_area->last_chpid = chpid.id;
  856. ccode = chsc(scpd_area);
  857. if (ccode > 0) {
  858. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  859. goto out;
  860. }
  861. switch (scpd_area->response.code) {
  862. case 0x0001: /* Success. */
  863. memcpy(desc, &scpd_area->desc,
  864. sizeof(struct channel_path_desc));
  865. ret = 0;
  866. break;
  867. case 0x0003: /* Invalid block. */
  868. case 0x0007: /* Invalid format. */
  869. case 0x0008: /* Other invalid block. */
  870. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  871. ret = -EINVAL;
  872. break;
  873. case 0x0004: /* Command not provided in model. */
  874. CIO_CRW_EVENT(2, "Model does not provide scpd\n");
  875. ret = -EOPNOTSUPP;
  876. break;
  877. default:
  878. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  879. scpd_area->response.code);
  880. ret = -EIO;
  881. }
  882. out:
  883. free_page((unsigned long)scpd_area);
  884. return ret;
  885. }
  886. static void
  887. chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
  888. struct cmg_chars *chars)
  889. {
  890. switch (chp->cmg) {
  891. case 2:
  892. case 3:
  893. chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
  894. GFP_KERNEL);
  895. if (chp->cmg_chars) {
  896. int i, mask;
  897. struct cmg_chars *cmg_chars;
  898. cmg_chars = chp->cmg_chars;
  899. for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
  900. mask = 0x80 >> (i + 3);
  901. if (cmcv & mask)
  902. cmg_chars->values[i] = chars->values[i];
  903. else
  904. cmg_chars->values[i] = 0;
  905. }
  906. }
  907. break;
  908. default:
  909. /* No cmg-dependent data. */
  910. break;
  911. }
  912. }
  913. int chsc_get_channel_measurement_chars(struct channel_path *chp)
  914. {
  915. int ccode, ret;
  916. struct {
  917. struct chsc_header request;
  918. u32 : 24;
  919. u32 first_chpid : 8;
  920. u32 : 24;
  921. u32 last_chpid : 8;
  922. u32 zeroes1;
  923. struct chsc_header response;
  924. u32 zeroes2;
  925. u32 not_valid : 1;
  926. u32 shared : 1;
  927. u32 : 22;
  928. u32 chpid : 8;
  929. u32 cmcv : 5;
  930. u32 : 11;
  931. u32 cmgq : 8;
  932. u32 cmg : 8;
  933. u32 zeroes3;
  934. u32 data[NR_MEASUREMENT_CHARS];
  935. } __attribute__ ((packed)) *scmc_area;
  936. scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  937. if (!scmc_area)
  938. return -ENOMEM;
  939. scmc_area->request.length = 0x0010;
  940. scmc_area->request.code = 0x0022;
  941. scmc_area->first_chpid = chp->chpid.id;
  942. scmc_area->last_chpid = chp->chpid.id;
  943. ccode = chsc(scmc_area);
  944. if (ccode > 0) {
  945. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  946. goto out;
  947. }
  948. switch (scmc_area->response.code) {
  949. case 0x0001: /* Success. */
  950. if (!scmc_area->not_valid) {
  951. chp->cmg = scmc_area->cmg;
  952. chp->shared = scmc_area->shared;
  953. chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
  954. (struct cmg_chars *)
  955. &scmc_area->data);
  956. } else {
  957. chp->cmg = -1;
  958. chp->shared = -1;
  959. }
  960. ret = 0;
  961. break;
  962. case 0x0003: /* Invalid block. */
  963. case 0x0007: /* Invalid format. */
  964. case 0x0008: /* Invalid bit combination. */
  965. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  966. ret = -EINVAL;
  967. break;
  968. case 0x0004: /* Command not provided. */
  969. CIO_CRW_EVENT(2, "Model does not provide scmc\n");
  970. ret = -EOPNOTSUPP;
  971. break;
  972. default:
  973. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  974. scmc_area->response.code);
  975. ret = -EIO;
  976. }
  977. out:
  978. free_page((unsigned long)scmc_area);
  979. return ret;
  980. }
  981. static int __init
  982. chsc_alloc_sei_area(void)
  983. {
  984. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  985. if (!sei_page)
  986. printk(KERN_WARNING"Can't allocate page for processing of " \
  987. "chsc machine checks!\n");
  988. return (sei_page ? 0 : -ENOMEM);
  989. }
  990. int __init
  991. chsc_enable_facility(int operation_code)
  992. {
  993. int ret;
  994. struct {
  995. struct chsc_header request;
  996. u8 reserved1:4;
  997. u8 format:4;
  998. u8 reserved2;
  999. u16 operation_code;
  1000. u32 reserved3;
  1001. u32 reserved4;
  1002. u32 operation_data_area[252];
  1003. struct chsc_header response;
  1004. u32 reserved5:4;
  1005. u32 format2:4;
  1006. u32 reserved6:24;
  1007. } __attribute__ ((packed)) *sda_area;
  1008. sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
  1009. if (!sda_area)
  1010. return -ENOMEM;
  1011. sda_area->request.length = 0x0400;
  1012. sda_area->request.code = 0x0031;
  1013. sda_area->operation_code = operation_code;
  1014. ret = chsc(sda_area);
  1015. if (ret > 0) {
  1016. ret = (ret == 3) ? -ENODEV : -EBUSY;
  1017. goto out;
  1018. }
  1019. switch (sda_area->response.code) {
  1020. case 0x0001: /* everything ok */
  1021. ret = 0;
  1022. break;
  1023. case 0x0003: /* invalid request block */
  1024. case 0x0007:
  1025. ret = -EINVAL;
  1026. break;
  1027. case 0x0004: /* command not provided */
  1028. case 0x0101: /* facility not provided */
  1029. ret = -EOPNOTSUPP;
  1030. break;
  1031. default: /* something went wrong */
  1032. ret = -EIO;
  1033. }
  1034. out:
  1035. free_page((unsigned long)sda_area);
  1036. return ret;
  1037. }
  1038. subsys_initcall(chsc_alloc_sei_area);
  1039. struct css_general_char css_general_characteristics;
  1040. struct css_chsc_char css_chsc_characteristics;
  1041. int __init
  1042. chsc_determine_css_characteristics(void)
  1043. {
  1044. int result;
  1045. struct {
  1046. struct chsc_header request;
  1047. u32 reserved1;
  1048. u32 reserved2;
  1049. u32 reserved3;
  1050. struct chsc_header response;
  1051. u32 reserved4;
  1052. u32 general_char[510];
  1053. u32 chsc_char[518];
  1054. } __attribute__ ((packed)) *scsc_area;
  1055. scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  1056. if (!scsc_area) {
  1057. printk(KERN_WARNING"cio: Was not able to determine available" \
  1058. "CHSCs due to no memory.\n");
  1059. return -ENOMEM;
  1060. }
  1061. scsc_area->request.length = 0x0010;
  1062. scsc_area->request.code = 0x0010;
  1063. result = chsc(scsc_area);
  1064. if (result) {
  1065. printk(KERN_WARNING"cio: Was not able to determine " \
  1066. "available CHSCs, cc=%i.\n", result);
  1067. result = -EIO;
  1068. goto exit;
  1069. }
  1070. if (scsc_area->response.code != 1) {
  1071. printk(KERN_WARNING"cio: Was not able to determine " \
  1072. "available CHSCs.\n");
  1073. result = -EIO;
  1074. goto exit;
  1075. }
  1076. memcpy(&css_general_characteristics, scsc_area->general_char,
  1077. sizeof(css_general_characteristics));
  1078. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  1079. sizeof(css_chsc_characteristics));
  1080. exit:
  1081. free_page ((unsigned long) scsc_area);
  1082. return result;
  1083. }
  1084. EXPORT_SYMBOL_GPL(css_general_characteristics);
  1085. EXPORT_SYMBOL_GPL(css_chsc_characteristics);