chsc.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261
  1. /*
  2. * drivers/s390/cio/chsc.c
  3. * S/390 common I/O routines -- channel subsystem call
  4. *
  5. * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
  6. * IBM Corporation
  7. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  8. * Cornelia Huck (cornelia.huck@de.ibm.com)
  9. * Arnd Bergmann (arndb@de.ibm.com)
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <asm/cio.h>
  16. #include <asm/chpid.h>
  17. #include "css.h"
  18. #include "cio.h"
  19. #include "cio_debug.h"
  20. #include "ioasm.h"
  21. #include "chp.h"
  22. #include "chsc.h"
  23. static void *sei_page;
  24. /* FIXME: this is _always_ called for every subchannel. shouldn't we
  25. * process more than one at a time? */
  26. static int
  27. chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
  28. {
  29. int ccode, j;
  30. struct {
  31. struct chsc_header request;
  32. u16 reserved1a:10;
  33. u16 ssid:2;
  34. u16 reserved1b:4;
  35. u16 f_sch; /* first subchannel */
  36. u16 reserved2;
  37. u16 l_sch; /* last subchannel */
  38. u32 reserved3;
  39. struct chsc_header response;
  40. u32 reserved4;
  41. u8 sch_valid : 1;
  42. u8 dev_valid : 1;
  43. u8 st : 3; /* subchannel type */
  44. u8 zeroes : 3;
  45. u8 unit_addr; /* unit address */
  46. u16 devno; /* device number */
  47. u8 path_mask;
  48. u8 fla_valid_mask;
  49. u16 sch; /* subchannel */
  50. u8 chpid[8]; /* chpids 0-7 */
  51. u16 fla[8]; /* full link addresses 0-7 */
  52. } __attribute__ ((packed)) *ssd_area;
  53. ssd_area = page;
  54. ssd_area->request.length = 0x0010;
  55. ssd_area->request.code = 0x0004;
  56. ssd_area->ssid = sch->schid.ssid;
  57. ssd_area->f_sch = sch->schid.sch_no;
  58. ssd_area->l_sch = sch->schid.sch_no;
  59. ccode = chsc(ssd_area);
  60. if (ccode > 0) {
  61. pr_debug("chsc returned with ccode = %d\n", ccode);
  62. return (ccode == 3) ? -ENODEV : -EBUSY;
  63. }
  64. switch (ssd_area->response.code) {
  65. case 0x0001: /* everything ok */
  66. break;
  67. case 0x0002:
  68. CIO_CRW_EVENT(2, "Invalid command!\n");
  69. return -EINVAL;
  70. case 0x0003:
  71. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  72. return -EINVAL;
  73. case 0x0004:
  74. CIO_CRW_EVENT(2, "Model does not provide ssd\n");
  75. return -EOPNOTSUPP;
  76. default:
  77. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  78. ssd_area->response.code);
  79. return -EIO;
  80. }
  81. /*
  82. * ssd_area->st stores the type of the detected
  83. * subchannel, with the following definitions:
  84. *
  85. * 0: I/O subchannel: All fields have meaning
  86. * 1: CHSC subchannel: Only sch_val, st and sch
  87. * have meaning
  88. * 2: Message subchannel: All fields except unit_addr
  89. * have meaning
  90. * 3: ADM subchannel: Only sch_val, st and sch
  91. * have meaning
  92. *
  93. * Other types are currently undefined.
  94. */
  95. if (ssd_area->st > 3) { /* uhm, that looks strange... */
  96. CIO_CRW_EVENT(0, "Strange subchannel type %d"
  97. " for sch 0.%x.%04x\n", ssd_area->st,
  98. sch->schid.ssid, sch->schid.sch_no);
  99. /*
  100. * There may have been a new subchannel type defined in the
  101. * time since this code was written; since we don't know which
  102. * fields have meaning and what to do with it we just jump out
  103. */
  104. return 0;
  105. } else {
  106. const char *type[4] = {"I/O", "chsc", "message", "ADM"};
  107. CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
  108. sch->schid.ssid, sch->schid.sch_no,
  109. type[ssd_area->st]);
  110. sch->ssd_info.valid = 1;
  111. sch->ssd_info.type = ssd_area->st;
  112. }
  113. if (ssd_area->st == 0 || ssd_area->st == 2) {
  114. for (j = 0; j < 8; j++) {
  115. if (!((0x80 >> j) & ssd_area->path_mask &
  116. ssd_area->fla_valid_mask))
  117. continue;
  118. sch->ssd_info.chpid[j] = ssd_area->chpid[j];
  119. sch->ssd_info.fla[j] = ssd_area->fla[j];
  120. }
  121. }
  122. return 0;
  123. }
  124. int
  125. css_get_ssd_info(struct subchannel *sch)
  126. {
  127. int ret;
  128. void *page;
  129. page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  130. if (!page)
  131. return -ENOMEM;
  132. spin_lock_irq(sch->lock);
  133. ret = chsc_get_sch_desc_irq(sch, page);
  134. if (ret) {
  135. static int cio_chsc_err_msg;
  136. if (!cio_chsc_err_msg) {
  137. printk(KERN_ERR
  138. "chsc_get_sch_descriptions:"
  139. " Error %d while doing chsc; "
  140. "processing some machine checks may "
  141. "not work\n", ret);
  142. cio_chsc_err_msg = 1;
  143. }
  144. }
  145. spin_unlock_irq(sch->lock);
  146. free_page((unsigned long)page);
  147. if (!ret) {
  148. int j, mask;
  149. struct chp_id chpid;
  150. chp_id_init(&chpid);
  151. /* Allocate channel path structures, if needed. */
  152. for (j = 0; j < 8; j++) {
  153. mask = 0x80 >> j;
  154. chpid.id = sch->ssd_info.chpid[j];
  155. if ((sch->schib.pmcw.pim & mask) &&
  156. !chp_is_registered(chpid))
  157. chp_new(chpid);
  158. }
  159. }
  160. return ret;
  161. }
  162. static int
  163. s390_subchannel_remove_chpid(struct device *dev, void *data)
  164. {
  165. int j;
  166. int mask;
  167. struct subchannel *sch;
  168. struct chp_id *chpid;
  169. struct schib schib;
  170. sch = to_subchannel(dev);
  171. chpid = data;
  172. for (j = 0; j < 8; j++) {
  173. mask = 0x80 >> j;
  174. if ((sch->schib.pmcw.pim & mask) &&
  175. (sch->schib.pmcw.chpid[j] == chpid->id))
  176. break;
  177. }
  178. if (j >= 8)
  179. return 0;
  180. spin_lock_irq(sch->lock);
  181. stsch(sch->schid, &schib);
  182. if (!schib.pmcw.dnv)
  183. goto out_unreg;
  184. memcpy(&sch->schib, &schib, sizeof(struct schib));
  185. /* Check for single path devices. */
  186. if (sch->schib.pmcw.pim == 0x80)
  187. goto out_unreg;
  188. if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
  189. (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
  190. (sch->schib.pmcw.lpum == mask)) {
  191. int cc;
  192. cc = cio_clear(sch);
  193. if (cc == -ENODEV)
  194. goto out_unreg;
  195. /* Request retry of internal operation. */
  196. device_set_intretry(sch);
  197. /* Call handler. */
  198. if (sch->driver && sch->driver->termination)
  199. sch->driver->termination(&sch->dev);
  200. goto out_unlock;
  201. }
  202. /* trigger path verification. */
  203. if (sch->driver && sch->driver->verify)
  204. sch->driver->verify(&sch->dev);
  205. else if (sch->lpm == mask)
  206. goto out_unreg;
  207. out_unlock:
  208. spin_unlock_irq(sch->lock);
  209. return 0;
  210. out_unreg:
  211. spin_unlock_irq(sch->lock);
  212. sch->lpm = 0;
  213. if (css_enqueue_subchannel_slow(sch->schid)) {
  214. css_clear_subchannel_slow_list();
  215. need_rescan = 1;
  216. }
  217. return 0;
  218. }
  219. void chsc_chp_offline(struct chp_id chpid)
  220. {
  221. char dbf_txt[15];
  222. sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
  223. CIO_TRACE_EVENT(2, dbf_txt);
  224. if (chp_get_status(chpid) <= 0)
  225. return;
  226. bus_for_each_dev(&css_bus_type, NULL, &chpid,
  227. s390_subchannel_remove_chpid);
  228. if (need_rescan || css_slow_subchannels_exist())
  229. queue_work(slow_path_wq, &slow_path_work);
  230. }
  231. struct res_acc_data {
  232. struct chp_id chpid;
  233. u32 fla_mask;
  234. u16 fla;
  235. };
  236. static int s390_process_res_acc_sch(struct res_acc_data *res_data,
  237. struct subchannel *sch)
  238. {
  239. int found;
  240. int chp;
  241. int ccode;
  242. found = 0;
  243. for (chp = 0; chp <= 7; chp++)
  244. /*
  245. * check if chpid is in information updated by ssd
  246. */
  247. if (sch->ssd_info.valid &&
  248. sch->ssd_info.chpid[chp] == res_data->chpid.id &&
  249. (sch->ssd_info.fla[chp] & res_data->fla_mask)
  250. == res_data->fla) {
  251. found = 1;
  252. break;
  253. }
  254. if (found == 0)
  255. return 0;
  256. /*
  257. * Do a stsch to update our subchannel structure with the
  258. * new path information and eventually check for logically
  259. * offline chpids.
  260. */
  261. ccode = stsch(sch->schid, &sch->schib);
  262. if (ccode > 0)
  263. return 0;
  264. return 0x80 >> chp;
  265. }
  266. static int
  267. s390_process_res_acc_new_sch(struct subchannel_id schid)
  268. {
  269. struct schib schib;
  270. int ret;
  271. /*
  272. * We don't know the device yet, but since a path
  273. * may be available now to the device we'll have
  274. * to do recognition again.
  275. * Since we don't have any idea about which chpid
  276. * that beast may be on we'll have to do a stsch
  277. * on all devices, grr...
  278. */
  279. if (stsch_err(schid, &schib))
  280. /* We're through */
  281. return need_rescan ? -EAGAIN : -ENXIO;
  282. /* Put it on the slow path. */
  283. ret = css_enqueue_subchannel_slow(schid);
  284. if (ret) {
  285. css_clear_subchannel_slow_list();
  286. need_rescan = 1;
  287. return -EAGAIN;
  288. }
  289. return 0;
  290. }
  291. static int
  292. __s390_process_res_acc(struct subchannel_id schid, void *data)
  293. {
  294. int chp_mask, old_lpm;
  295. struct res_acc_data *res_data;
  296. struct subchannel *sch;
  297. res_data = data;
  298. sch = get_subchannel_by_schid(schid);
  299. if (!sch)
  300. /* Check if a subchannel is newly available. */
  301. return s390_process_res_acc_new_sch(schid);
  302. spin_lock_irq(sch->lock);
  303. chp_mask = s390_process_res_acc_sch(res_data, sch);
  304. if (chp_mask == 0) {
  305. spin_unlock_irq(sch->lock);
  306. put_device(&sch->dev);
  307. return 0;
  308. }
  309. old_lpm = sch->lpm;
  310. sch->lpm = ((sch->schib.pmcw.pim &
  311. sch->schib.pmcw.pam &
  312. sch->schib.pmcw.pom)
  313. | chp_mask) & sch->opm;
  314. if (!old_lpm && sch->lpm)
  315. device_trigger_reprobe(sch);
  316. else if (sch->driver && sch->driver->verify)
  317. sch->driver->verify(&sch->dev);
  318. spin_unlock_irq(sch->lock);
  319. put_device(&sch->dev);
  320. return 0;
  321. }
  322. static int
  323. s390_process_res_acc (struct res_acc_data *res_data)
  324. {
  325. int rc;
  326. char dbf_txt[15];
  327. sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
  328. res_data->chpid.id);
  329. CIO_TRACE_EVENT( 2, dbf_txt);
  330. if (res_data->fla != 0) {
  331. sprintf(dbf_txt, "fla%x", res_data->fla);
  332. CIO_TRACE_EVENT( 2, dbf_txt);
  333. }
  334. /*
  335. * I/O resources may have become accessible.
  336. * Scan through all subchannels that may be concerned and
  337. * do a validation on those.
  338. * The more information we have (info), the less scanning
  339. * will we have to do.
  340. */
  341. rc = for_each_subchannel(__s390_process_res_acc, res_data);
  342. if (css_slow_subchannels_exist())
  343. rc = -EAGAIN;
  344. else if (rc != -EAGAIN)
  345. rc = 0;
  346. return rc;
  347. }
  348. static int
  349. __get_chpid_from_lir(void *data)
  350. {
  351. struct lir {
  352. u8 iq;
  353. u8 ic;
  354. u16 sci;
  355. /* incident-node descriptor */
  356. u32 indesc[28];
  357. /* attached-node descriptor */
  358. u32 andesc[28];
  359. /* incident-specific information */
  360. u32 isinfo[28];
  361. } __attribute__ ((packed)) *lir;
  362. lir = data;
  363. if (!(lir->iq&0x80))
  364. /* NULL link incident record */
  365. return -EINVAL;
  366. if (!(lir->indesc[0]&0xc0000000))
  367. /* node descriptor not valid */
  368. return -EINVAL;
  369. if (!(lir->indesc[0]&0x10000000))
  370. /* don't handle device-type nodes - FIXME */
  371. return -EINVAL;
  372. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  373. return (u16) (lir->indesc[0]&0x000000ff);
  374. }
  375. struct chsc_sei_area {
  376. struct chsc_header request;
  377. u32 reserved1;
  378. u32 reserved2;
  379. u32 reserved3;
  380. struct chsc_header response;
  381. u32 reserved4;
  382. u8 flags;
  383. u8 vf; /* validity flags */
  384. u8 rs; /* reporting source */
  385. u8 cc; /* content code */
  386. u16 fla; /* full link address */
  387. u16 rsid; /* reporting source id */
  388. u32 reserved5;
  389. u32 reserved6;
  390. u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
  391. /* ccdf has to be big enough for a link-incident record */
  392. } __attribute__ ((packed));
  393. static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
  394. {
  395. struct chp_id chpid;
  396. int id;
  397. CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
  398. sei_area->rs, sei_area->rsid);
  399. if (sei_area->rs != 4)
  400. return 0;
  401. id = __get_chpid_from_lir(sei_area->ccdf);
  402. if (id < 0)
  403. CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
  404. else {
  405. chp_id_init(&chpid);
  406. chpid.id = id;
  407. chsc_chp_offline(chpid);
  408. }
  409. return 0;
  410. }
  411. static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
  412. {
  413. struct res_acc_data res_data;
  414. struct chp_id chpid;
  415. int status;
  416. int rc;
  417. CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
  418. "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
  419. if (sei_area->rs != 4)
  420. return 0;
  421. chp_id_init(&chpid);
  422. chpid.id = sei_area->rsid;
  423. /* allocate a new channel path structure, if needed */
  424. status = chp_get_status(chpid);
  425. if (status < 0)
  426. chp_new(chpid);
  427. else if (!status)
  428. return 0;
  429. memset(&res_data, 0, sizeof(struct res_acc_data));
  430. res_data.chpid = chpid;
  431. if ((sei_area->vf & 0xc0) != 0) {
  432. res_data.fla = sei_area->fla;
  433. if ((sei_area->vf & 0xc0) == 0xc0)
  434. /* full link address */
  435. res_data.fla_mask = 0xffff;
  436. else
  437. /* link address */
  438. res_data.fla_mask = 0xff00;
  439. }
  440. rc = s390_process_res_acc(&res_data);
  441. return rc;
  442. }
  443. struct chp_config_data {
  444. u8 map[32];
  445. u8 op;
  446. u8 pc;
  447. };
  448. static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
  449. {
  450. struct chp_config_data *data;
  451. struct chp_id chpid;
  452. int num;
  453. CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
  454. if (sei_area->rs != 0)
  455. return 0;
  456. data = (struct chp_config_data *) &(sei_area->ccdf);
  457. chp_id_init(&chpid);
  458. for (num = 0; num <= __MAX_CHPID; num++) {
  459. if (!chp_test_bit(data->map, num))
  460. continue;
  461. chpid.id = num;
  462. printk(KERN_WARNING "cio: processing configure event %d for "
  463. "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
  464. switch (data->op) {
  465. case 0:
  466. chp_cfg_schedule(chpid, 1);
  467. break;
  468. case 1:
  469. chp_cfg_schedule(chpid, 0);
  470. break;
  471. case 2:
  472. chp_cfg_cancel_deconfigure(chpid);
  473. break;
  474. }
  475. }
  476. return 0;
  477. }
  478. static int chsc_process_sei(struct chsc_sei_area *sei_area)
  479. {
  480. int rc;
  481. /* Check if we might have lost some information. */
  482. if (sei_area->flags & 0x40)
  483. CIO_CRW_EVENT(2, "chsc: event overflow\n");
  484. /* which kind of information was stored? */
  485. rc = 0;
  486. switch (sei_area->cc) {
  487. case 1: /* link incident*/
  488. rc = chsc_process_sei_link_incident(sei_area);
  489. break;
  490. case 2: /* i/o resource accessibiliy */
  491. rc = chsc_process_sei_res_acc(sei_area);
  492. break;
  493. case 8: /* channel-path-configuration notification */
  494. rc = chsc_process_sei_chp_config(sei_area);
  495. break;
  496. default: /* other stuff */
  497. CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
  498. sei_area->cc);
  499. break;
  500. }
  501. return rc;
  502. }
  503. int chsc_process_crw(void)
  504. {
  505. struct chsc_sei_area *sei_area;
  506. int ret;
  507. int rc;
  508. if (!sei_page)
  509. return 0;
  510. /* Access to sei_page is serialized through machine check handler
  511. * thread, so no need for locking. */
  512. sei_area = sei_page;
  513. CIO_TRACE_EVENT( 2, "prcss");
  514. ret = 0;
  515. do {
  516. memset(sei_area, 0, sizeof(*sei_area));
  517. sei_area->request.length = 0x0010;
  518. sei_area->request.code = 0x000e;
  519. if (chsc(sei_area))
  520. break;
  521. if (sei_area->response.code == 0x0001) {
  522. CIO_CRW_EVENT(4, "chsc: sei successful\n");
  523. rc = chsc_process_sei(sei_area);
  524. if (rc)
  525. ret = rc;
  526. } else {
  527. CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
  528. sei_area->response.code);
  529. ret = 0;
  530. break;
  531. }
  532. } while (sei_area->flags & 0x80);
  533. return ret;
  534. }
  535. static int
  536. __chp_add_new_sch(struct subchannel_id schid)
  537. {
  538. struct schib schib;
  539. int ret;
  540. if (stsch_err(schid, &schib))
  541. /* We're through */
  542. return need_rescan ? -EAGAIN : -ENXIO;
  543. /* Put it on the slow path. */
  544. ret = css_enqueue_subchannel_slow(schid);
  545. if (ret) {
  546. css_clear_subchannel_slow_list();
  547. need_rescan = 1;
  548. return -EAGAIN;
  549. }
  550. return 0;
  551. }
  552. static int
  553. __chp_add(struct subchannel_id schid, void *data)
  554. {
  555. int i, mask;
  556. struct chp_id *chpid;
  557. struct subchannel *sch;
  558. chpid = data;
  559. sch = get_subchannel_by_schid(schid);
  560. if (!sch)
  561. /* Check if the subchannel is now available. */
  562. return __chp_add_new_sch(schid);
  563. spin_lock_irq(sch->lock);
  564. for (i=0; i<8; i++) {
  565. mask = 0x80 >> i;
  566. if ((sch->schib.pmcw.pim & mask) &&
  567. (sch->schib.pmcw.chpid[i] == chpid->id)) {
  568. if (stsch(sch->schid, &sch->schib) != 0) {
  569. /* Endgame. */
  570. spin_unlock_irq(sch->lock);
  571. return -ENXIO;
  572. }
  573. break;
  574. }
  575. }
  576. if (i==8) {
  577. spin_unlock_irq(sch->lock);
  578. return 0;
  579. }
  580. sch->lpm = ((sch->schib.pmcw.pim &
  581. sch->schib.pmcw.pam &
  582. sch->schib.pmcw.pom)
  583. | mask) & sch->opm;
  584. if (sch->driver && sch->driver->verify)
  585. sch->driver->verify(&sch->dev);
  586. spin_unlock_irq(sch->lock);
  587. put_device(&sch->dev);
  588. return 0;
  589. }
  590. int chsc_chp_online(struct chp_id chpid)
  591. {
  592. int rc;
  593. char dbf_txt[15];
  594. sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
  595. CIO_TRACE_EVENT(2, dbf_txt);
  596. if (chp_get_status(chpid) == 0)
  597. return 0;
  598. rc = for_each_subchannel(__chp_add, &chpid);
  599. if (css_slow_subchannels_exist())
  600. rc = -EAGAIN;
  601. if (rc != -EAGAIN)
  602. rc = 0;
  603. return rc;
  604. }
  605. static int check_for_io_on_path(struct subchannel *sch, int index)
  606. {
  607. int cc;
  608. cc = stsch(sch->schid, &sch->schib);
  609. if (cc)
  610. return 0;
  611. if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
  612. return 1;
  613. return 0;
  614. }
  615. static void terminate_internal_io(struct subchannel *sch)
  616. {
  617. if (cio_clear(sch)) {
  618. /* Recheck device in case clear failed. */
  619. sch->lpm = 0;
  620. if (device_trigger_verify(sch) != 0) {
  621. if(css_enqueue_subchannel_slow(sch->schid)) {
  622. css_clear_subchannel_slow_list();
  623. need_rescan = 1;
  624. }
  625. }
  626. return;
  627. }
  628. /* Request retry of internal operation. */
  629. device_set_intretry(sch);
  630. /* Call handler. */
  631. if (sch->driver && sch->driver->termination)
  632. sch->driver->termination(&sch->dev);
  633. }
  634. static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  635. struct chp_id chpid, int on)
  636. {
  637. int chp, old_lpm;
  638. unsigned long flags;
  639. if (!sch->ssd_info.valid)
  640. return;
  641. spin_lock_irqsave(sch->lock, flags);
  642. old_lpm = sch->lpm;
  643. for (chp = 0; chp < 8; chp++) {
  644. if (sch->ssd_info.chpid[chp] != chpid.id)
  645. continue;
  646. if (on) {
  647. sch->opm |= (0x80 >> chp);
  648. sch->lpm |= (0x80 >> chp);
  649. if (!old_lpm)
  650. device_trigger_reprobe(sch);
  651. else if (sch->driver && sch->driver->verify)
  652. sch->driver->verify(&sch->dev);
  653. break;
  654. }
  655. sch->opm &= ~(0x80 >> chp);
  656. sch->lpm &= ~(0x80 >> chp);
  657. if (check_for_io_on_path(sch, chp)) {
  658. if (device_is_online(sch))
  659. /* Path verification is done after killing. */
  660. device_kill_io(sch);
  661. else
  662. /* Kill and retry internal I/O. */
  663. terminate_internal_io(sch);
  664. } else if (!sch->lpm) {
  665. if (device_trigger_verify(sch) != 0) {
  666. if (css_enqueue_subchannel_slow(sch->schid)) {
  667. css_clear_subchannel_slow_list();
  668. need_rescan = 1;
  669. }
  670. }
  671. } else if (sch->driver && sch->driver->verify)
  672. sch->driver->verify(&sch->dev);
  673. break;
  674. }
  675. spin_unlock_irqrestore(sch->lock, flags);
  676. }
  677. static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
  678. {
  679. struct subchannel *sch;
  680. struct chp_id *chpid;
  681. sch = to_subchannel(dev);
  682. chpid = data;
  683. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  684. return 0;
  685. }
  686. static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
  687. {
  688. struct subchannel *sch;
  689. struct chp_id *chpid;
  690. sch = to_subchannel(dev);
  691. chpid = data;
  692. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  693. return 0;
  694. }
  695. static int
  696. __s390_vary_chpid_on(struct subchannel_id schid, void *data)
  697. {
  698. struct schib schib;
  699. struct subchannel *sch;
  700. sch = get_subchannel_by_schid(schid);
  701. if (sch) {
  702. put_device(&sch->dev);
  703. return 0;
  704. }
  705. if (stsch_err(schid, &schib))
  706. /* We're through */
  707. return -ENXIO;
  708. /* Put it on the slow path. */
  709. if (css_enqueue_subchannel_slow(schid)) {
  710. css_clear_subchannel_slow_list();
  711. need_rescan = 1;
  712. return -EAGAIN;
  713. }
  714. return 0;
  715. }
  716. /**
  717. * chsc_chp_vary - propagate channel-path vary operation to subchannels
  718. * @chpid: channl-path ID
  719. * @on: non-zero for vary online, zero for vary offline
  720. */
  721. int chsc_chp_vary(struct chp_id chpid, int on)
  722. {
  723. /*
  724. * Redo PathVerification on the devices the chpid connects to
  725. */
  726. bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
  727. s390_subchannel_vary_chpid_on :
  728. s390_subchannel_vary_chpid_off);
  729. if (on)
  730. /* Scan for new devices on varied on path. */
  731. for_each_subchannel(__s390_vary_chpid_on, NULL);
  732. if (need_rescan || css_slow_subchannels_exist())
  733. queue_work(slow_path_wq, &slow_path_work);
  734. return 0;
  735. }
  736. static void
  737. chsc_remove_cmg_attr(struct channel_subsystem *css)
  738. {
  739. int i;
  740. for (i = 0; i <= __MAX_CHPID; i++) {
  741. if (!css->chps[i])
  742. continue;
  743. chp_remove_cmg_attr(css->chps[i]);
  744. }
  745. }
  746. static int
  747. chsc_add_cmg_attr(struct channel_subsystem *css)
  748. {
  749. int i, ret;
  750. ret = 0;
  751. for (i = 0; i <= __MAX_CHPID; i++) {
  752. if (!css->chps[i])
  753. continue;
  754. ret = chp_add_cmg_attr(css->chps[i]);
  755. if (ret)
  756. goto cleanup;
  757. }
  758. return ret;
  759. cleanup:
  760. for (--i; i >= 0; i--) {
  761. if (!css->chps[i])
  762. continue;
  763. chp_remove_cmg_attr(css->chps[i]);
  764. }
  765. return ret;
  766. }
  767. static int
  768. __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
  769. {
  770. struct {
  771. struct chsc_header request;
  772. u32 operation_code : 2;
  773. u32 : 30;
  774. u32 key : 4;
  775. u32 : 28;
  776. u32 zeroes1;
  777. u32 cub_addr1;
  778. u32 zeroes2;
  779. u32 cub_addr2;
  780. u32 reserved[13];
  781. struct chsc_header response;
  782. u32 status : 8;
  783. u32 : 4;
  784. u32 fmt : 4;
  785. u32 : 16;
  786. } __attribute__ ((packed)) *secm_area;
  787. int ret, ccode;
  788. secm_area = page;
  789. secm_area->request.length = 0x0050;
  790. secm_area->request.code = 0x0016;
  791. secm_area->key = PAGE_DEFAULT_KEY;
  792. secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
  793. secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
  794. secm_area->operation_code = enable ? 0 : 1;
  795. ccode = chsc(secm_area);
  796. if (ccode > 0)
  797. return (ccode == 3) ? -ENODEV : -EBUSY;
  798. switch (secm_area->response.code) {
  799. case 0x0001: /* Success. */
  800. ret = 0;
  801. break;
  802. case 0x0003: /* Invalid block. */
  803. case 0x0007: /* Invalid format. */
  804. case 0x0008: /* Other invalid block. */
  805. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  806. ret = -EINVAL;
  807. break;
  808. case 0x0004: /* Command not provided in model. */
  809. CIO_CRW_EVENT(2, "Model does not provide secm\n");
  810. ret = -EOPNOTSUPP;
  811. break;
  812. case 0x0102: /* cub adresses incorrect */
  813. CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
  814. ret = -EINVAL;
  815. break;
  816. case 0x0103: /* key error */
  817. CIO_CRW_EVENT(2, "Access key error in secm\n");
  818. ret = -EINVAL;
  819. break;
  820. case 0x0105: /* error while starting */
  821. CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
  822. ret = -EIO;
  823. break;
  824. default:
  825. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  826. secm_area->response.code);
  827. ret = -EIO;
  828. }
  829. return ret;
  830. }
  831. int
  832. chsc_secm(struct channel_subsystem *css, int enable)
  833. {
  834. void *secm_area;
  835. int ret;
  836. secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  837. if (!secm_area)
  838. return -ENOMEM;
  839. mutex_lock(&css->mutex);
  840. if (enable && !css->cm_enabled) {
  841. css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  842. css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  843. if (!css->cub_addr1 || !css->cub_addr2) {
  844. free_page((unsigned long)css->cub_addr1);
  845. free_page((unsigned long)css->cub_addr2);
  846. free_page((unsigned long)secm_area);
  847. mutex_unlock(&css->mutex);
  848. return -ENOMEM;
  849. }
  850. }
  851. ret = __chsc_do_secm(css, enable, secm_area);
  852. if (!ret) {
  853. css->cm_enabled = enable;
  854. if (css->cm_enabled) {
  855. ret = chsc_add_cmg_attr(css);
  856. if (ret) {
  857. memset(secm_area, 0, PAGE_SIZE);
  858. __chsc_do_secm(css, 0, secm_area);
  859. css->cm_enabled = 0;
  860. }
  861. } else
  862. chsc_remove_cmg_attr(css);
  863. }
  864. if (enable && !css->cm_enabled) {
  865. free_page((unsigned long)css->cub_addr1);
  866. free_page((unsigned long)css->cub_addr2);
  867. }
  868. mutex_unlock(&css->mutex);
  869. free_page((unsigned long)secm_area);
  870. return ret;
  871. }
  872. int chsc_determine_channel_path_description(struct chp_id chpid,
  873. struct channel_path_desc *desc)
  874. {
  875. int ccode, ret;
  876. struct {
  877. struct chsc_header request;
  878. u32 : 24;
  879. u32 first_chpid : 8;
  880. u32 : 24;
  881. u32 last_chpid : 8;
  882. u32 zeroes1;
  883. struct chsc_header response;
  884. u32 zeroes2;
  885. struct channel_path_desc desc;
  886. } __attribute__ ((packed)) *scpd_area;
  887. scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  888. if (!scpd_area)
  889. return -ENOMEM;
  890. scpd_area->request.length = 0x0010;
  891. scpd_area->request.code = 0x0002;
  892. scpd_area->first_chpid = chpid.id;
  893. scpd_area->last_chpid = chpid.id;
  894. ccode = chsc(scpd_area);
  895. if (ccode > 0) {
  896. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  897. goto out;
  898. }
  899. switch (scpd_area->response.code) {
  900. case 0x0001: /* Success. */
  901. memcpy(desc, &scpd_area->desc,
  902. sizeof(struct channel_path_desc));
  903. ret = 0;
  904. break;
  905. case 0x0003: /* Invalid block. */
  906. case 0x0007: /* Invalid format. */
  907. case 0x0008: /* Other invalid block. */
  908. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  909. ret = -EINVAL;
  910. break;
  911. case 0x0004: /* Command not provided in model. */
  912. CIO_CRW_EVENT(2, "Model does not provide scpd\n");
  913. ret = -EOPNOTSUPP;
  914. break;
  915. default:
  916. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  917. scpd_area->response.code);
  918. ret = -EIO;
  919. }
  920. out:
  921. free_page((unsigned long)scpd_area);
  922. return ret;
  923. }
  924. static void
  925. chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
  926. struct cmg_chars *chars)
  927. {
  928. switch (chp->cmg) {
  929. case 2:
  930. case 3:
  931. chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
  932. GFP_KERNEL);
  933. if (chp->cmg_chars) {
  934. int i, mask;
  935. struct cmg_chars *cmg_chars;
  936. cmg_chars = chp->cmg_chars;
  937. for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
  938. mask = 0x80 >> (i + 3);
  939. if (cmcv & mask)
  940. cmg_chars->values[i] = chars->values[i];
  941. else
  942. cmg_chars->values[i] = 0;
  943. }
  944. }
  945. break;
  946. default:
  947. /* No cmg-dependent data. */
  948. break;
  949. }
  950. }
  951. int chsc_get_channel_measurement_chars(struct channel_path *chp)
  952. {
  953. int ccode, ret;
  954. struct {
  955. struct chsc_header request;
  956. u32 : 24;
  957. u32 first_chpid : 8;
  958. u32 : 24;
  959. u32 last_chpid : 8;
  960. u32 zeroes1;
  961. struct chsc_header response;
  962. u32 zeroes2;
  963. u32 not_valid : 1;
  964. u32 shared : 1;
  965. u32 : 22;
  966. u32 chpid : 8;
  967. u32 cmcv : 5;
  968. u32 : 11;
  969. u32 cmgq : 8;
  970. u32 cmg : 8;
  971. u32 zeroes3;
  972. u32 data[NR_MEASUREMENT_CHARS];
  973. } __attribute__ ((packed)) *scmc_area;
  974. scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  975. if (!scmc_area)
  976. return -ENOMEM;
  977. scmc_area->request.length = 0x0010;
  978. scmc_area->request.code = 0x0022;
  979. scmc_area->first_chpid = chp->chpid.id;
  980. scmc_area->last_chpid = chp->chpid.id;
  981. ccode = chsc(scmc_area);
  982. if (ccode > 0) {
  983. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  984. goto out;
  985. }
  986. switch (scmc_area->response.code) {
  987. case 0x0001: /* Success. */
  988. if (!scmc_area->not_valid) {
  989. chp->cmg = scmc_area->cmg;
  990. chp->shared = scmc_area->shared;
  991. chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
  992. (struct cmg_chars *)
  993. &scmc_area->data);
  994. } else {
  995. chp->cmg = -1;
  996. chp->shared = -1;
  997. }
  998. ret = 0;
  999. break;
  1000. case 0x0003: /* Invalid block. */
  1001. case 0x0007: /* Invalid format. */
  1002. case 0x0008: /* Invalid bit combination. */
  1003. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  1004. ret = -EINVAL;
  1005. break;
  1006. case 0x0004: /* Command not provided. */
  1007. CIO_CRW_EVENT(2, "Model does not provide scmc\n");
  1008. ret = -EOPNOTSUPP;
  1009. break;
  1010. default:
  1011. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  1012. scmc_area->response.code);
  1013. ret = -EIO;
  1014. }
  1015. out:
  1016. free_page((unsigned long)scmc_area);
  1017. return ret;
  1018. }
  1019. static int __init
  1020. chsc_alloc_sei_area(void)
  1021. {
  1022. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  1023. if (!sei_page)
  1024. printk(KERN_WARNING"Can't allocate page for processing of " \
  1025. "chsc machine checks!\n");
  1026. return (sei_page ? 0 : -ENOMEM);
  1027. }
  1028. int __init
  1029. chsc_enable_facility(int operation_code)
  1030. {
  1031. int ret;
  1032. struct {
  1033. struct chsc_header request;
  1034. u8 reserved1:4;
  1035. u8 format:4;
  1036. u8 reserved2;
  1037. u16 operation_code;
  1038. u32 reserved3;
  1039. u32 reserved4;
  1040. u32 operation_data_area[252];
  1041. struct chsc_header response;
  1042. u32 reserved5:4;
  1043. u32 format2:4;
  1044. u32 reserved6:24;
  1045. } __attribute__ ((packed)) *sda_area;
  1046. sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
  1047. if (!sda_area)
  1048. return -ENOMEM;
  1049. sda_area->request.length = 0x0400;
  1050. sda_area->request.code = 0x0031;
  1051. sda_area->operation_code = operation_code;
  1052. ret = chsc(sda_area);
  1053. if (ret > 0) {
  1054. ret = (ret == 3) ? -ENODEV : -EBUSY;
  1055. goto out;
  1056. }
  1057. switch (sda_area->response.code) {
  1058. case 0x0001: /* everything ok */
  1059. ret = 0;
  1060. break;
  1061. case 0x0003: /* invalid request block */
  1062. case 0x0007:
  1063. ret = -EINVAL;
  1064. break;
  1065. case 0x0004: /* command not provided */
  1066. case 0x0101: /* facility not provided */
  1067. ret = -EOPNOTSUPP;
  1068. break;
  1069. default: /* something went wrong */
  1070. ret = -EIO;
  1071. }
  1072. out:
  1073. free_page((unsigned long)sda_area);
  1074. return ret;
  1075. }
  1076. subsys_initcall(chsc_alloc_sei_area);
  1077. struct css_general_char css_general_characteristics;
  1078. struct css_chsc_char css_chsc_characteristics;
  1079. int __init
  1080. chsc_determine_css_characteristics(void)
  1081. {
  1082. int result;
  1083. struct {
  1084. struct chsc_header request;
  1085. u32 reserved1;
  1086. u32 reserved2;
  1087. u32 reserved3;
  1088. struct chsc_header response;
  1089. u32 reserved4;
  1090. u32 general_char[510];
  1091. u32 chsc_char[518];
  1092. } __attribute__ ((packed)) *scsc_area;
  1093. scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  1094. if (!scsc_area) {
  1095. printk(KERN_WARNING"cio: Was not able to determine available" \
  1096. "CHSCs due to no memory.\n");
  1097. return -ENOMEM;
  1098. }
  1099. scsc_area->request.length = 0x0010;
  1100. scsc_area->request.code = 0x0010;
  1101. result = chsc(scsc_area);
  1102. if (result) {
  1103. printk(KERN_WARNING"cio: Was not able to determine " \
  1104. "available CHSCs, cc=%i.\n", result);
  1105. result = -EIO;
  1106. goto exit;
  1107. }
  1108. if (scsc_area->response.code != 1) {
  1109. printk(KERN_WARNING"cio: Was not able to determine " \
  1110. "available CHSCs.\n");
  1111. result = -EIO;
  1112. goto exit;
  1113. }
  1114. memcpy(&css_general_characteristics, scsc_area->general_char,
  1115. sizeof(css_general_characteristics));
  1116. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  1117. sizeof(css_chsc_characteristics));
  1118. exit:
  1119. free_page ((unsigned long) scsc_area);
  1120. return result;
  1121. }
  1122. EXPORT_SYMBOL_GPL(css_general_characteristics);
  1123. EXPORT_SYMBOL_GPL(css_chsc_characteristics);