chsc.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * drivers/s390/cio/chsc.c
  3. * S/390 common I/O routines -- channel subsystem call
  4. *
  5. * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
  6. * IBM Corporation
  7. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  8. * Cornelia Huck (cornelia.huck@de.ibm.com)
  9. * Arnd Bergmann (arndb@de.ibm.com)
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <asm/cio.h>
  16. #include <asm/chpid.h>
  17. #include "css.h"
  18. #include "cio.h"
  19. #include "cio_debug.h"
  20. #include "ioasm.h"
  21. #include "chp.h"
  22. #include "chsc.h"
  23. static void *sei_page;
  24. static int chsc_error_from_response(int response)
  25. {
  26. switch (response) {
  27. case 0x0001:
  28. return 0;
  29. case 0x0002:
  30. case 0x0003:
  31. case 0x0006:
  32. case 0x0007:
  33. case 0x0008:
  34. case 0x000a:
  35. return -EINVAL;
  36. case 0x0004:
  37. return -EOPNOTSUPP;
  38. default:
  39. return -EIO;
  40. }
  41. }
  42. struct chsc_ssd_area {
  43. struct chsc_header request;
  44. u16 :10;
  45. u16 ssid:2;
  46. u16 :4;
  47. u16 f_sch; /* first subchannel */
  48. u16 :16;
  49. u16 l_sch; /* last subchannel */
  50. u32 :32;
  51. struct chsc_header response;
  52. u32 :32;
  53. u8 sch_valid : 1;
  54. u8 dev_valid : 1;
  55. u8 st : 3; /* subchannel type */
  56. u8 zeroes : 3;
  57. u8 unit_addr; /* unit address */
  58. u16 devno; /* device number */
  59. u8 path_mask;
  60. u8 fla_valid_mask;
  61. u16 sch; /* subchannel */
  62. u8 chpid[8]; /* chpids 0-7 */
  63. u16 fla[8]; /* full link addresses 0-7 */
  64. } __attribute__ ((packed));
  65. int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
  66. {
  67. unsigned long page;
  68. struct chsc_ssd_area *ssd_area;
  69. int ccode;
  70. int ret;
  71. int i;
  72. int mask;
  73. page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
  74. if (!page)
  75. return -ENOMEM;
  76. ssd_area = (struct chsc_ssd_area *) page;
  77. ssd_area->request.length = 0x0010;
  78. ssd_area->request.code = 0x0004;
  79. ssd_area->ssid = schid.ssid;
  80. ssd_area->f_sch = schid.sch_no;
  81. ssd_area->l_sch = schid.sch_no;
  82. ccode = chsc(ssd_area);
  83. /* Check response. */
  84. if (ccode > 0) {
  85. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  86. goto out_free;
  87. }
  88. ret = chsc_error_from_response(ssd_area->response.code);
  89. if (ret != 0) {
  90. CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
  91. schid.ssid, schid.sch_no,
  92. ssd_area->response.code);
  93. goto out_free;
  94. }
  95. if (!ssd_area->sch_valid) {
  96. ret = -ENODEV;
  97. goto out_free;
  98. }
  99. /* Copy data */
  100. ret = 0;
  101. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  102. if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
  103. (ssd_area->st != SUBCHANNEL_TYPE_MSG))
  104. goto out_free;
  105. ssd->path_mask = ssd_area->path_mask;
  106. ssd->fla_valid_mask = ssd_area->fla_valid_mask;
  107. for (i = 0; i < 8; i++) {
  108. mask = 0x80 >> i;
  109. if (ssd_area->path_mask & mask) {
  110. chp_id_init(&ssd->chpid[i]);
  111. ssd->chpid[i].id = ssd_area->chpid[i];
  112. }
  113. if (ssd_area->fla_valid_mask & mask)
  114. ssd->fla[i] = ssd_area->fla[i];
  115. }
  116. out_free:
  117. free_page(page);
  118. return ret;
  119. }
  120. static int check_for_io_on_path(struct subchannel *sch, int mask)
  121. {
  122. int cc;
  123. cc = stsch(sch->schid, &sch->schib);
  124. if (cc)
  125. return 0;
  126. if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
  127. return 1;
  128. return 0;
  129. }
  130. static void terminate_internal_io(struct subchannel *sch)
  131. {
  132. if (cio_clear(sch)) {
  133. /* Recheck device in case clear failed. */
  134. sch->lpm = 0;
  135. if (device_trigger_verify(sch) != 0)
  136. css_schedule_eval(sch->schid);
  137. return;
  138. }
  139. /* Request retry of internal operation. */
  140. device_set_intretry(sch);
  141. /* Call handler. */
  142. if (sch->driver && sch->driver->termination)
  143. sch->driver->termination(sch);
  144. }
  145. static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
  146. {
  147. int j;
  148. int mask;
  149. struct chp_id *chpid = data;
  150. struct schib schib;
  151. for (j = 0; j < 8; j++) {
  152. mask = 0x80 >> j;
  153. if ((sch->schib.pmcw.pim & mask) &&
  154. (sch->schib.pmcw.chpid[j] == chpid->id))
  155. break;
  156. }
  157. if (j >= 8)
  158. return 0;
  159. spin_lock_irq(sch->lock);
  160. stsch(sch->schid, &schib);
  161. if (!css_sch_is_valid(&schib))
  162. goto out_unreg;
  163. memcpy(&sch->schib, &schib, sizeof(struct schib));
  164. /* Check for single path devices. */
  165. if (sch->schib.pmcw.pim == 0x80)
  166. goto out_unreg;
  167. if (check_for_io_on_path(sch, mask)) {
  168. if (device_is_online(sch))
  169. device_kill_io(sch);
  170. else {
  171. terminate_internal_io(sch);
  172. /* Re-start path verification. */
  173. if (sch->driver && sch->driver->verify)
  174. sch->driver->verify(sch);
  175. }
  176. } else {
  177. /* trigger path verification. */
  178. if (sch->driver && sch->driver->verify)
  179. sch->driver->verify(sch);
  180. else if (sch->lpm == mask)
  181. goto out_unreg;
  182. }
  183. spin_unlock_irq(sch->lock);
  184. return 0;
  185. out_unreg:
  186. sch->lpm = 0;
  187. spin_unlock_irq(sch->lock);
  188. css_schedule_eval(sch->schid);
  189. return 0;
  190. }
  191. void chsc_chp_offline(struct chp_id chpid)
  192. {
  193. char dbf_txt[15];
  194. sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
  195. CIO_TRACE_EVENT(2, dbf_txt);
  196. if (chp_get_status(chpid) <= 0)
  197. return;
  198. for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
  199. }
  200. static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
  201. {
  202. struct schib schib;
  203. /*
  204. * We don't know the device yet, but since a path
  205. * may be available now to the device we'll have
  206. * to do recognition again.
  207. * Since we don't have any idea about which chpid
  208. * that beast may be on we'll have to do a stsch
  209. * on all devices, grr...
  210. */
  211. if (stsch_err(schid, &schib))
  212. /* We're through */
  213. return -ENXIO;
  214. /* Put it on the slow path. */
  215. css_schedule_eval(schid);
  216. return 0;
  217. }
  218. struct res_acc_data {
  219. struct chp_id chpid;
  220. u32 fla_mask;
  221. u16 fla;
  222. };
  223. static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
  224. struct res_acc_data *data)
  225. {
  226. int i;
  227. int mask;
  228. for (i = 0; i < 8; i++) {
  229. mask = 0x80 >> i;
  230. if (!(ssd->path_mask & mask))
  231. continue;
  232. if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
  233. continue;
  234. if ((ssd->fla_valid_mask & mask) &&
  235. ((ssd->fla[i] & data->fla_mask) != data->fla))
  236. continue;
  237. return mask;
  238. }
  239. return 0;
  240. }
  241. static int __s390_process_res_acc(struct subchannel *sch, void *data)
  242. {
  243. int chp_mask, old_lpm;
  244. struct res_acc_data *res_data = data;
  245. spin_lock_irq(sch->lock);
  246. chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
  247. if (chp_mask == 0)
  248. goto out;
  249. if (stsch(sch->schid, &sch->schib))
  250. goto out;
  251. old_lpm = sch->lpm;
  252. sch->lpm = ((sch->schib.pmcw.pim &
  253. sch->schib.pmcw.pam &
  254. sch->schib.pmcw.pom)
  255. | chp_mask) & sch->opm;
  256. if (!old_lpm && sch->lpm)
  257. device_trigger_reprobe(sch);
  258. else if (sch->driver && sch->driver->verify)
  259. sch->driver->verify(sch);
  260. out:
  261. spin_unlock_irq(sch->lock);
  262. return 0;
  263. }
  264. static void s390_process_res_acc (struct res_acc_data *res_data)
  265. {
  266. char dbf_txt[15];
  267. sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
  268. res_data->chpid.id);
  269. CIO_TRACE_EVENT( 2, dbf_txt);
  270. if (res_data->fla != 0) {
  271. sprintf(dbf_txt, "fla%x", res_data->fla);
  272. CIO_TRACE_EVENT( 2, dbf_txt);
  273. }
  274. /*
  275. * I/O resources may have become accessible.
  276. * Scan through all subchannels that may be concerned and
  277. * do a validation on those.
  278. * The more information we have (info), the less scanning
  279. * will we have to do.
  280. */
  281. for_each_subchannel_staged(__s390_process_res_acc,
  282. s390_process_res_acc_new_sch, res_data);
  283. }
  284. static int
  285. __get_chpid_from_lir(void *data)
  286. {
  287. struct lir {
  288. u8 iq;
  289. u8 ic;
  290. u16 sci;
  291. /* incident-node descriptor */
  292. u32 indesc[28];
  293. /* attached-node descriptor */
  294. u32 andesc[28];
  295. /* incident-specific information */
  296. u32 isinfo[28];
  297. } __attribute__ ((packed)) *lir;
  298. lir = data;
  299. if (!(lir->iq&0x80))
  300. /* NULL link incident record */
  301. return -EINVAL;
  302. if (!(lir->indesc[0]&0xc0000000))
  303. /* node descriptor not valid */
  304. return -EINVAL;
  305. if (!(lir->indesc[0]&0x10000000))
  306. /* don't handle device-type nodes - FIXME */
  307. return -EINVAL;
  308. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  309. return (u16) (lir->indesc[0]&0x000000ff);
  310. }
  311. struct chsc_sei_area {
  312. struct chsc_header request;
  313. u32 reserved1;
  314. u32 reserved2;
  315. u32 reserved3;
  316. struct chsc_header response;
  317. u32 reserved4;
  318. u8 flags;
  319. u8 vf; /* validity flags */
  320. u8 rs; /* reporting source */
  321. u8 cc; /* content code */
  322. u16 fla; /* full link address */
  323. u16 rsid; /* reporting source id */
  324. u32 reserved5;
  325. u32 reserved6;
  326. u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
  327. /* ccdf has to be big enough for a link-incident record */
  328. } __attribute__ ((packed));
  329. static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
  330. {
  331. struct chp_id chpid;
  332. int id;
  333. CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
  334. sei_area->rs, sei_area->rsid);
  335. if (sei_area->rs != 4)
  336. return;
  337. id = __get_chpid_from_lir(sei_area->ccdf);
  338. if (id < 0)
  339. CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
  340. else {
  341. chp_id_init(&chpid);
  342. chpid.id = id;
  343. chsc_chp_offline(chpid);
  344. }
  345. }
  346. static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
  347. {
  348. struct res_acc_data res_data;
  349. struct chp_id chpid;
  350. int status;
  351. CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
  352. "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
  353. if (sei_area->rs != 4)
  354. return;
  355. chp_id_init(&chpid);
  356. chpid.id = sei_area->rsid;
  357. /* allocate a new channel path structure, if needed */
  358. status = chp_get_status(chpid);
  359. if (status < 0)
  360. chp_new(chpid);
  361. else if (!status)
  362. return;
  363. memset(&res_data, 0, sizeof(struct res_acc_data));
  364. res_data.chpid = chpid;
  365. if ((sei_area->vf & 0xc0) != 0) {
  366. res_data.fla = sei_area->fla;
  367. if ((sei_area->vf & 0xc0) == 0xc0)
  368. /* full link address */
  369. res_data.fla_mask = 0xffff;
  370. else
  371. /* link address */
  372. res_data.fla_mask = 0xff00;
  373. }
  374. s390_process_res_acc(&res_data);
  375. }
  376. struct chp_config_data {
  377. u8 map[32];
  378. u8 op;
  379. u8 pc;
  380. };
  381. static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
  382. {
  383. struct chp_config_data *data;
  384. struct chp_id chpid;
  385. int num;
  386. CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
  387. if (sei_area->rs != 0)
  388. return;
  389. data = (struct chp_config_data *) &(sei_area->ccdf);
  390. chp_id_init(&chpid);
  391. for (num = 0; num <= __MAX_CHPID; num++) {
  392. if (!chp_test_bit(data->map, num))
  393. continue;
  394. chpid.id = num;
  395. printk(KERN_WARNING "cio: processing configure event %d for "
  396. "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
  397. switch (data->op) {
  398. case 0:
  399. chp_cfg_schedule(chpid, 1);
  400. break;
  401. case 1:
  402. chp_cfg_schedule(chpid, 0);
  403. break;
  404. case 2:
  405. chp_cfg_cancel_deconfigure(chpid);
  406. break;
  407. }
  408. }
  409. }
  410. static void chsc_process_sei(struct chsc_sei_area *sei_area)
  411. {
  412. /* Check if we might have lost some information. */
  413. if (sei_area->flags & 0x40) {
  414. CIO_CRW_EVENT(2, "chsc: event overflow\n");
  415. css_schedule_eval_all();
  416. }
  417. /* which kind of information was stored? */
  418. switch (sei_area->cc) {
  419. case 1: /* link incident*/
  420. chsc_process_sei_link_incident(sei_area);
  421. break;
  422. case 2: /* i/o resource accessibiliy */
  423. chsc_process_sei_res_acc(sei_area);
  424. break;
  425. case 8: /* channel-path-configuration notification */
  426. chsc_process_sei_chp_config(sei_area);
  427. break;
  428. default: /* other stuff */
  429. CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
  430. sei_area->cc);
  431. break;
  432. }
  433. }
  434. void chsc_process_crw(void)
  435. {
  436. struct chsc_sei_area *sei_area;
  437. if (!sei_page)
  438. return;
  439. /* Access to sei_page is serialized through machine check handler
  440. * thread, so no need for locking. */
  441. sei_area = sei_page;
  442. CIO_TRACE_EVENT( 2, "prcss");
  443. do {
  444. memset(sei_area, 0, sizeof(*sei_area));
  445. sei_area->request.length = 0x0010;
  446. sei_area->request.code = 0x000e;
  447. if (chsc(sei_area))
  448. break;
  449. if (sei_area->response.code == 0x0001) {
  450. CIO_CRW_EVENT(4, "chsc: sei successful\n");
  451. chsc_process_sei(sei_area);
  452. } else {
  453. CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
  454. sei_area->response.code);
  455. break;
  456. }
  457. } while (sei_area->flags & 0x80);
  458. }
  459. static int __chp_add_new_sch(struct subchannel_id schid, void *data)
  460. {
  461. struct schib schib;
  462. if (stsch_err(schid, &schib))
  463. /* We're through */
  464. return -ENXIO;
  465. /* Put it on the slow path. */
  466. css_schedule_eval(schid);
  467. return 0;
  468. }
  469. static int __chp_add(struct subchannel *sch, void *data)
  470. {
  471. int i, mask;
  472. struct chp_id *chpid = data;
  473. spin_lock_irq(sch->lock);
  474. for (i=0; i<8; i++) {
  475. mask = 0x80 >> i;
  476. if ((sch->schib.pmcw.pim & mask) &&
  477. (sch->schib.pmcw.chpid[i] == chpid->id))
  478. break;
  479. }
  480. if (i==8) {
  481. spin_unlock_irq(sch->lock);
  482. return 0;
  483. }
  484. if (stsch(sch->schid, &sch->schib)) {
  485. spin_unlock_irq(sch->lock);
  486. css_schedule_eval(sch->schid);
  487. return 0;
  488. }
  489. sch->lpm = ((sch->schib.pmcw.pim &
  490. sch->schib.pmcw.pam &
  491. sch->schib.pmcw.pom)
  492. | mask) & sch->opm;
  493. if (sch->driver && sch->driver->verify)
  494. sch->driver->verify(sch);
  495. spin_unlock_irq(sch->lock);
  496. return 0;
  497. }
  498. void chsc_chp_online(struct chp_id chpid)
  499. {
  500. char dbf_txt[15];
  501. sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
  502. CIO_TRACE_EVENT(2, dbf_txt);
  503. if (chp_get_status(chpid) != 0)
  504. for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
  505. &chpid);
  506. }
  507. static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  508. struct chp_id chpid, int on)
  509. {
  510. int chp, old_lpm;
  511. int mask;
  512. unsigned long flags;
  513. spin_lock_irqsave(sch->lock, flags);
  514. old_lpm = sch->lpm;
  515. for (chp = 0; chp < 8; chp++) {
  516. mask = 0x80 >> chp;
  517. if (!(sch->ssd_info.path_mask & mask))
  518. continue;
  519. if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
  520. continue;
  521. if (on) {
  522. sch->opm |= mask;
  523. sch->lpm |= mask;
  524. if (!old_lpm)
  525. device_trigger_reprobe(sch);
  526. else if (sch->driver && sch->driver->verify)
  527. sch->driver->verify(sch);
  528. break;
  529. }
  530. sch->opm &= ~mask;
  531. sch->lpm &= ~mask;
  532. if (check_for_io_on_path(sch, mask)) {
  533. if (device_is_online(sch))
  534. /* Path verification is done after killing. */
  535. device_kill_io(sch);
  536. else {
  537. /* Kill and retry internal I/O. */
  538. terminate_internal_io(sch);
  539. /* Re-start path verification. */
  540. if (sch->driver && sch->driver->verify)
  541. sch->driver->verify(sch);
  542. }
  543. } else if (!sch->lpm) {
  544. if (device_trigger_verify(sch) != 0)
  545. css_schedule_eval(sch->schid);
  546. } else if (sch->driver && sch->driver->verify)
  547. sch->driver->verify(sch);
  548. break;
  549. }
  550. spin_unlock_irqrestore(sch->lock, flags);
  551. }
  552. static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
  553. {
  554. struct chp_id *chpid = data;
  555. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  556. return 0;
  557. }
  558. static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
  559. {
  560. struct chp_id *chpid = data;
  561. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  562. return 0;
  563. }
  564. static int
  565. __s390_vary_chpid_on(struct subchannel_id schid, void *data)
  566. {
  567. struct schib schib;
  568. if (stsch_err(schid, &schib))
  569. /* We're through */
  570. return -ENXIO;
  571. /* Put it on the slow path. */
  572. css_schedule_eval(schid);
  573. return 0;
  574. }
  575. /**
  576. * chsc_chp_vary - propagate channel-path vary operation to subchannels
  577. * @chpid: channl-path ID
  578. * @on: non-zero for vary online, zero for vary offline
  579. */
  580. int chsc_chp_vary(struct chp_id chpid, int on)
  581. {
  582. /*
  583. * Redo PathVerification on the devices the chpid connects to
  584. */
  585. if (on)
  586. for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
  587. __s390_vary_chpid_on, &chpid);
  588. else
  589. for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
  590. NULL, &chpid);
  591. return 0;
  592. }
  593. static void
  594. chsc_remove_cmg_attr(struct channel_subsystem *css)
  595. {
  596. int i;
  597. for (i = 0; i <= __MAX_CHPID; i++) {
  598. if (!css->chps[i])
  599. continue;
  600. chp_remove_cmg_attr(css->chps[i]);
  601. }
  602. }
  603. static int
  604. chsc_add_cmg_attr(struct channel_subsystem *css)
  605. {
  606. int i, ret;
  607. ret = 0;
  608. for (i = 0; i <= __MAX_CHPID; i++) {
  609. if (!css->chps[i])
  610. continue;
  611. ret = chp_add_cmg_attr(css->chps[i]);
  612. if (ret)
  613. goto cleanup;
  614. }
  615. return ret;
  616. cleanup:
  617. for (--i; i >= 0; i--) {
  618. if (!css->chps[i])
  619. continue;
  620. chp_remove_cmg_attr(css->chps[i]);
  621. }
  622. return ret;
  623. }
  624. static int
  625. __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
  626. {
  627. struct {
  628. struct chsc_header request;
  629. u32 operation_code : 2;
  630. u32 : 30;
  631. u32 key : 4;
  632. u32 : 28;
  633. u32 zeroes1;
  634. u32 cub_addr1;
  635. u32 zeroes2;
  636. u32 cub_addr2;
  637. u32 reserved[13];
  638. struct chsc_header response;
  639. u32 status : 8;
  640. u32 : 4;
  641. u32 fmt : 4;
  642. u32 : 16;
  643. } __attribute__ ((packed)) *secm_area;
  644. int ret, ccode;
  645. secm_area = page;
  646. secm_area->request.length = 0x0050;
  647. secm_area->request.code = 0x0016;
  648. secm_area->key = PAGE_DEFAULT_KEY;
  649. secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
  650. secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
  651. secm_area->operation_code = enable ? 0 : 1;
  652. ccode = chsc(secm_area);
  653. if (ccode > 0)
  654. return (ccode == 3) ? -ENODEV : -EBUSY;
  655. switch (secm_area->response.code) {
  656. case 0x0102:
  657. case 0x0103:
  658. ret = -EINVAL;
  659. default:
  660. ret = chsc_error_from_response(secm_area->response.code);
  661. }
  662. if (ret != 0)
  663. CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
  664. secm_area->response.code);
  665. return ret;
  666. }
  667. int
  668. chsc_secm(struct channel_subsystem *css, int enable)
  669. {
  670. void *secm_area;
  671. int ret;
  672. secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  673. if (!secm_area)
  674. return -ENOMEM;
  675. mutex_lock(&css->mutex);
  676. if (enable && !css->cm_enabled) {
  677. css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  678. css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  679. if (!css->cub_addr1 || !css->cub_addr2) {
  680. free_page((unsigned long)css->cub_addr1);
  681. free_page((unsigned long)css->cub_addr2);
  682. free_page((unsigned long)secm_area);
  683. mutex_unlock(&css->mutex);
  684. return -ENOMEM;
  685. }
  686. }
  687. ret = __chsc_do_secm(css, enable, secm_area);
  688. if (!ret) {
  689. css->cm_enabled = enable;
  690. if (css->cm_enabled) {
  691. ret = chsc_add_cmg_attr(css);
  692. if (ret) {
  693. memset(secm_area, 0, PAGE_SIZE);
  694. __chsc_do_secm(css, 0, secm_area);
  695. css->cm_enabled = 0;
  696. }
  697. } else
  698. chsc_remove_cmg_attr(css);
  699. }
  700. if (!css->cm_enabled) {
  701. free_page((unsigned long)css->cub_addr1);
  702. free_page((unsigned long)css->cub_addr2);
  703. }
  704. mutex_unlock(&css->mutex);
  705. free_page((unsigned long)secm_area);
  706. return ret;
  707. }
  708. int chsc_determine_channel_path_description(struct chp_id chpid,
  709. struct channel_path_desc *desc)
  710. {
  711. int ccode, ret;
  712. struct {
  713. struct chsc_header request;
  714. u32 : 24;
  715. u32 first_chpid : 8;
  716. u32 : 24;
  717. u32 last_chpid : 8;
  718. u32 zeroes1;
  719. struct chsc_header response;
  720. u32 zeroes2;
  721. struct channel_path_desc desc;
  722. } __attribute__ ((packed)) *scpd_area;
  723. scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  724. if (!scpd_area)
  725. return -ENOMEM;
  726. scpd_area->request.length = 0x0010;
  727. scpd_area->request.code = 0x0002;
  728. scpd_area->first_chpid = chpid.id;
  729. scpd_area->last_chpid = chpid.id;
  730. ccode = chsc(scpd_area);
  731. if (ccode > 0) {
  732. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  733. goto out;
  734. }
  735. ret = chsc_error_from_response(scpd_area->response.code);
  736. if (ret == 0)
  737. /* Success. */
  738. memcpy(desc, &scpd_area->desc,
  739. sizeof(struct channel_path_desc));
  740. else
  741. CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
  742. scpd_area->response.code);
  743. out:
  744. free_page((unsigned long)scpd_area);
  745. return ret;
  746. }
  747. static void
  748. chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
  749. struct cmg_chars *chars)
  750. {
  751. switch (chp->cmg) {
  752. case 2:
  753. case 3:
  754. chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
  755. GFP_KERNEL);
  756. if (chp->cmg_chars) {
  757. int i, mask;
  758. struct cmg_chars *cmg_chars;
  759. cmg_chars = chp->cmg_chars;
  760. for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
  761. mask = 0x80 >> (i + 3);
  762. if (cmcv & mask)
  763. cmg_chars->values[i] = chars->values[i];
  764. else
  765. cmg_chars->values[i] = 0;
  766. }
  767. }
  768. break;
  769. default:
  770. /* No cmg-dependent data. */
  771. break;
  772. }
  773. }
  774. int chsc_get_channel_measurement_chars(struct channel_path *chp)
  775. {
  776. int ccode, ret;
  777. struct {
  778. struct chsc_header request;
  779. u32 : 24;
  780. u32 first_chpid : 8;
  781. u32 : 24;
  782. u32 last_chpid : 8;
  783. u32 zeroes1;
  784. struct chsc_header response;
  785. u32 zeroes2;
  786. u32 not_valid : 1;
  787. u32 shared : 1;
  788. u32 : 22;
  789. u32 chpid : 8;
  790. u32 cmcv : 5;
  791. u32 : 11;
  792. u32 cmgq : 8;
  793. u32 cmg : 8;
  794. u32 zeroes3;
  795. u32 data[NR_MEASUREMENT_CHARS];
  796. } __attribute__ ((packed)) *scmc_area;
  797. scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  798. if (!scmc_area)
  799. return -ENOMEM;
  800. scmc_area->request.length = 0x0010;
  801. scmc_area->request.code = 0x0022;
  802. scmc_area->first_chpid = chp->chpid.id;
  803. scmc_area->last_chpid = chp->chpid.id;
  804. ccode = chsc(scmc_area);
  805. if (ccode > 0) {
  806. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  807. goto out;
  808. }
  809. ret = chsc_error_from_response(scmc_area->response.code);
  810. if (ret == 0) {
  811. /* Success. */
  812. if (!scmc_area->not_valid) {
  813. chp->cmg = scmc_area->cmg;
  814. chp->shared = scmc_area->shared;
  815. chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
  816. (struct cmg_chars *)
  817. &scmc_area->data);
  818. } else {
  819. chp->cmg = -1;
  820. chp->shared = -1;
  821. }
  822. } else {
  823. CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
  824. scmc_area->response.code);
  825. }
  826. out:
  827. free_page((unsigned long)scmc_area);
  828. return ret;
  829. }
  830. int __init chsc_alloc_sei_area(void)
  831. {
  832. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  833. if (!sei_page)
  834. CIO_MSG_EVENT(0, "Can't allocate page for processing of "
  835. "chsc machine checks!\n");
  836. return (sei_page ? 0 : -ENOMEM);
  837. }
  838. void __init chsc_free_sei_area(void)
  839. {
  840. kfree(sei_page);
  841. }
  842. int __init
  843. chsc_enable_facility(int operation_code)
  844. {
  845. int ret;
  846. struct {
  847. struct chsc_header request;
  848. u8 reserved1:4;
  849. u8 format:4;
  850. u8 reserved2;
  851. u16 operation_code;
  852. u32 reserved3;
  853. u32 reserved4;
  854. u32 operation_data_area[252];
  855. struct chsc_header response;
  856. u32 reserved5:4;
  857. u32 format2:4;
  858. u32 reserved6:24;
  859. } __attribute__ ((packed)) *sda_area;
  860. sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
  861. if (!sda_area)
  862. return -ENOMEM;
  863. sda_area->request.length = 0x0400;
  864. sda_area->request.code = 0x0031;
  865. sda_area->operation_code = operation_code;
  866. ret = chsc(sda_area);
  867. if (ret > 0) {
  868. ret = (ret == 3) ? -ENODEV : -EBUSY;
  869. goto out;
  870. }
  871. switch (sda_area->response.code) {
  872. case 0x0101:
  873. ret = -EOPNOTSUPP;
  874. break;
  875. default:
  876. ret = chsc_error_from_response(sda_area->response.code);
  877. }
  878. if (ret != 0)
  879. CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
  880. operation_code, sda_area->response.code);
  881. out:
  882. free_page((unsigned long)sda_area);
  883. return ret;
  884. }
  885. struct css_general_char css_general_characteristics;
  886. struct css_chsc_char css_chsc_characteristics;
  887. int __init
  888. chsc_determine_css_characteristics(void)
  889. {
  890. int result;
  891. struct {
  892. struct chsc_header request;
  893. u32 reserved1;
  894. u32 reserved2;
  895. u32 reserved3;
  896. struct chsc_header response;
  897. u32 reserved4;
  898. u32 general_char[510];
  899. u32 chsc_char[518];
  900. } __attribute__ ((packed)) *scsc_area;
  901. scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  902. if (!scsc_area)
  903. return -ENOMEM;
  904. scsc_area->request.length = 0x0010;
  905. scsc_area->request.code = 0x0010;
  906. result = chsc(scsc_area);
  907. if (result) {
  908. result = (result == 3) ? -ENODEV : -EBUSY;
  909. goto exit;
  910. }
  911. result = chsc_error_from_response(scsc_area->response.code);
  912. if (result == 0) {
  913. memcpy(&css_general_characteristics, scsc_area->general_char,
  914. sizeof(css_general_characteristics));
  915. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  916. sizeof(css_chsc_characteristics));
  917. } else
  918. CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
  919. scsc_area->response.code);
  920. exit:
  921. free_page ((unsigned long) scsc_area);
  922. return result;
  923. }
  924. EXPORT_SYMBOL_GPL(css_general_characteristics);
  925. EXPORT_SYMBOL_GPL(css_chsc_characteristics);