chsc.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /*
  2. * drivers/s390/cio/chsc.c
  3. * S/390 common I/O routines -- channel subsystem call
  4. * $Revision: 1.128 $
  5. *
  6. * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
  7. * IBM Corporation
  8. * Author(s): Ingo Adlung (adlung@de.ibm.com)
  9. * Cornelia Huck (cornelia.huck@de.ibm.com)
  10. * Arnd Bergmann (arndb@de.ibm.com)
  11. */
  12. #include <linux/module.h>
  13. #include <linux/config.h>
  14. #include <linux/slab.h>
  15. #include <linux/init.h>
  16. #include <linux/device.h>
  17. #include <asm/cio.h>
  18. #include "css.h"
  19. #include "cio.h"
  20. #include "cio_debug.h"
  21. #include "ioasm.h"
  22. #include "chsc.h"
  23. static void *sei_page;
  24. static int new_channel_path(int chpid);
  25. static inline void
  26. set_chp_logically_online(int chp, int onoff)
  27. {
  28. css[0]->chps[chp]->state = onoff;
  29. }
  30. static int
  31. get_chp_status(int chp)
  32. {
  33. return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
  34. }
  35. void
  36. chsc_validate_chpids(struct subchannel *sch)
  37. {
  38. int mask, chp;
  39. for (chp = 0; chp <= 7; chp++) {
  40. mask = 0x80 >> chp;
  41. if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
  42. /* disable using this path */
  43. sch->opm &= ~mask;
  44. }
  45. }
  46. void
  47. chpid_is_actually_online(int chp)
  48. {
  49. int state;
  50. state = get_chp_status(chp);
  51. if (state < 0) {
  52. need_rescan = 1;
  53. queue_work(slow_path_wq, &slow_path_work);
  54. } else
  55. WARN_ON(!state);
  56. }
  57. /* FIXME: this is _always_ called for every subchannel. shouldn't we
  58. * process more than one at a time? */
  59. static int
  60. chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
  61. {
  62. int ccode, j;
  63. struct {
  64. struct chsc_header request;
  65. u16 reserved1a:10;
  66. u16 ssid:2;
  67. u16 reserved1b:4;
  68. u16 f_sch; /* first subchannel */
  69. u16 reserved2;
  70. u16 l_sch; /* last subchannel */
  71. u32 reserved3;
  72. struct chsc_header response;
  73. u32 reserved4;
  74. u8 sch_valid : 1;
  75. u8 dev_valid : 1;
  76. u8 st : 3; /* subchannel type */
  77. u8 zeroes : 3;
  78. u8 unit_addr; /* unit address */
  79. u16 devno; /* device number */
  80. u8 path_mask;
  81. u8 fla_valid_mask;
  82. u16 sch; /* subchannel */
  83. u8 chpid[8]; /* chpids 0-7 */
  84. u16 fla[8]; /* full link addresses 0-7 */
  85. } *ssd_area;
  86. ssd_area = page;
  87. ssd_area->request = (struct chsc_header) {
  88. .length = 0x0010,
  89. .code = 0x0004,
  90. };
  91. ssd_area->ssid = sch->schid.ssid;
  92. ssd_area->f_sch = sch->schid.sch_no;
  93. ssd_area->l_sch = sch->schid.sch_no;
  94. ccode = chsc(ssd_area);
  95. if (ccode > 0) {
  96. pr_debug("chsc returned with ccode = %d\n", ccode);
  97. return (ccode == 3) ? -ENODEV : -EBUSY;
  98. }
  99. switch (ssd_area->response.code) {
  100. case 0x0001: /* everything ok */
  101. break;
  102. case 0x0002:
  103. CIO_CRW_EVENT(2, "Invalid command!\n");
  104. return -EINVAL;
  105. case 0x0003:
  106. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  107. return -EINVAL;
  108. case 0x0004:
  109. CIO_CRW_EVENT(2, "Model does not provide ssd\n");
  110. return -EOPNOTSUPP;
  111. default:
  112. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  113. ssd_area->response.code);
  114. return -EIO;
  115. }
  116. /*
  117. * ssd_area->st stores the type of the detected
  118. * subchannel, with the following definitions:
  119. *
  120. * 0: I/O subchannel: All fields have meaning
  121. * 1: CHSC subchannel: Only sch_val, st and sch
  122. * have meaning
  123. * 2: Message subchannel: All fields except unit_addr
  124. * have meaning
  125. * 3: ADM subchannel: Only sch_val, st and sch
  126. * have meaning
  127. *
  128. * Other types are currently undefined.
  129. */
  130. if (ssd_area->st > 3) { /* uhm, that looks strange... */
  131. CIO_CRW_EVENT(0, "Strange subchannel type %d"
  132. " for sch 0.%x.%04x\n", ssd_area->st,
  133. sch->schid.ssid, sch->schid.sch_no);
  134. /*
  135. * There may have been a new subchannel type defined in the
  136. * time since this code was written; since we don't know which
  137. * fields have meaning and what to do with it we just jump out
  138. */
  139. return 0;
  140. } else {
  141. const char *type[4] = {"I/O", "chsc", "message", "ADM"};
  142. CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
  143. sch->schid.ssid, sch->schid.sch_no,
  144. type[ssd_area->st]);
  145. sch->ssd_info.valid = 1;
  146. sch->ssd_info.type = ssd_area->st;
  147. }
  148. if (ssd_area->st == 0 || ssd_area->st == 2) {
  149. for (j = 0; j < 8; j++) {
  150. if (!((0x80 >> j) & ssd_area->path_mask &
  151. ssd_area->fla_valid_mask))
  152. continue;
  153. sch->ssd_info.chpid[j] = ssd_area->chpid[j];
  154. sch->ssd_info.fla[j] = ssd_area->fla[j];
  155. }
  156. }
  157. return 0;
  158. }
  159. int
  160. css_get_ssd_info(struct subchannel *sch)
  161. {
  162. int ret;
  163. void *page;
  164. page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  165. if (!page)
  166. return -ENOMEM;
  167. spin_lock_irq(&sch->lock);
  168. ret = chsc_get_sch_desc_irq(sch, page);
  169. if (ret) {
  170. static int cio_chsc_err_msg;
  171. if (!cio_chsc_err_msg) {
  172. printk(KERN_ERR
  173. "chsc_get_sch_descriptions:"
  174. " Error %d while doing chsc; "
  175. "processing some machine checks may "
  176. "not work\n", ret);
  177. cio_chsc_err_msg = 1;
  178. }
  179. }
  180. spin_unlock_irq(&sch->lock);
  181. free_page((unsigned long)page);
  182. if (!ret) {
  183. int j, chpid;
  184. /* Allocate channel path structures, if needed. */
  185. for (j = 0; j < 8; j++) {
  186. chpid = sch->ssd_info.chpid[j];
  187. if (chpid && (get_chp_status(chpid) < 0))
  188. new_channel_path(chpid);
  189. }
  190. }
  191. return ret;
  192. }
  193. static int
  194. s390_subchannel_remove_chpid(struct device *dev, void *data)
  195. {
  196. int j;
  197. int mask;
  198. struct subchannel *sch;
  199. struct channel_path *chpid;
  200. struct schib schib;
  201. sch = to_subchannel(dev);
  202. chpid = data;
  203. for (j = 0; j < 8; j++)
  204. if (sch->schib.pmcw.chpid[j] == chpid->id)
  205. break;
  206. if (j >= 8)
  207. return 0;
  208. mask = 0x80 >> j;
  209. spin_lock(&sch->lock);
  210. stsch(sch->schid, &schib);
  211. if (!schib.pmcw.dnv)
  212. goto out_unreg;
  213. memcpy(&sch->schib, &schib, sizeof(struct schib));
  214. /* Check for single path devices. */
  215. if (sch->schib.pmcw.pim == 0x80)
  216. goto out_unreg;
  217. if (sch->vpm == mask)
  218. goto out_unreg;
  219. if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
  220. SCSW_ACTL_HALT_PEND |
  221. SCSW_ACTL_START_PEND |
  222. SCSW_ACTL_RESUME_PEND)) &&
  223. (sch->schib.pmcw.lpum == mask)) {
  224. int cc = cio_cancel(sch);
  225. if (cc == -ENODEV)
  226. goto out_unreg;
  227. if (cc == -EINVAL) {
  228. cc = cio_clear(sch);
  229. if (cc == -ENODEV)
  230. goto out_unreg;
  231. /* Call handler. */
  232. if (sch->driver && sch->driver->termination)
  233. sch->driver->termination(&sch->dev);
  234. goto out_unlock;
  235. }
  236. } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
  237. (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
  238. (sch->schib.pmcw.lpum == mask)) {
  239. int cc;
  240. cc = cio_clear(sch);
  241. if (cc == -ENODEV)
  242. goto out_unreg;
  243. /* Call handler. */
  244. if (sch->driver && sch->driver->termination)
  245. sch->driver->termination(&sch->dev);
  246. goto out_unlock;
  247. }
  248. /* trigger path verification. */
  249. if (sch->driver && sch->driver->verify)
  250. sch->driver->verify(&sch->dev);
  251. out_unlock:
  252. spin_unlock(&sch->lock);
  253. return 0;
  254. out_unreg:
  255. spin_unlock(&sch->lock);
  256. sch->lpm = 0;
  257. if (css_enqueue_subchannel_slow(sch->schid)) {
  258. css_clear_subchannel_slow_list();
  259. need_rescan = 1;
  260. }
  261. return 0;
  262. }
  263. static inline void
  264. s390_set_chpid_offline( __u8 chpid)
  265. {
  266. char dbf_txt[15];
  267. struct device *dev;
  268. sprintf(dbf_txt, "chpr%x", chpid);
  269. CIO_TRACE_EVENT(2, dbf_txt);
  270. if (get_chp_status(chpid) <= 0)
  271. return;
  272. dev = get_device(&css[0]->chps[chpid]->dev);
  273. bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
  274. s390_subchannel_remove_chpid);
  275. if (need_rescan || css_slow_subchannels_exist())
  276. queue_work(slow_path_wq, &slow_path_work);
  277. put_device(dev);
  278. }
  279. struct res_acc_data {
  280. struct channel_path *chp;
  281. u32 fla_mask;
  282. u16 fla;
  283. };
  284. static int
  285. s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
  286. {
  287. int found;
  288. int chp;
  289. int ccode;
  290. found = 0;
  291. for (chp = 0; chp <= 7; chp++)
  292. /*
  293. * check if chpid is in information updated by ssd
  294. */
  295. if (sch->ssd_info.valid &&
  296. sch->ssd_info.chpid[chp] == res_data->chp->id &&
  297. (sch->ssd_info.fla[chp] & res_data->fla_mask)
  298. == res_data->fla) {
  299. found = 1;
  300. break;
  301. }
  302. if (found == 0)
  303. return 0;
  304. /*
  305. * Do a stsch to update our subchannel structure with the
  306. * new path information and eventually check for logically
  307. * offline chpids.
  308. */
  309. ccode = stsch(sch->schid, &sch->schib);
  310. if (ccode > 0)
  311. return 0;
  312. return 0x80 >> chp;
  313. }
  314. static inline int
  315. s390_process_res_acc_new_sch(struct subchannel_id schid)
  316. {
  317. struct schib schib;
  318. int ret;
  319. /*
  320. * We don't know the device yet, but since a path
  321. * may be available now to the device we'll have
  322. * to do recognition again.
  323. * Since we don't have any idea about which chpid
  324. * that beast may be on we'll have to do a stsch
  325. * on all devices, grr...
  326. */
  327. if (stsch_err(schid, &schib))
  328. /* We're through */
  329. return need_rescan ? -EAGAIN : -ENXIO;
  330. /* Put it on the slow path. */
  331. ret = css_enqueue_subchannel_slow(schid);
  332. if (ret) {
  333. css_clear_subchannel_slow_list();
  334. need_rescan = 1;
  335. return -EAGAIN;
  336. }
  337. return 0;
  338. }
  339. static int
  340. __s390_process_res_acc(struct subchannel_id schid, void *data)
  341. {
  342. int chp_mask, old_lpm;
  343. struct res_acc_data *res_data;
  344. struct subchannel *sch;
  345. res_data = (struct res_acc_data *)data;
  346. sch = get_subchannel_by_schid(schid);
  347. if (!sch)
  348. /* Check if a subchannel is newly available. */
  349. return s390_process_res_acc_new_sch(schid);
  350. spin_lock_irq(&sch->lock);
  351. chp_mask = s390_process_res_acc_sch(res_data, sch);
  352. if (chp_mask == 0) {
  353. spin_unlock_irq(&sch->lock);
  354. return 0;
  355. }
  356. old_lpm = sch->lpm;
  357. sch->lpm = ((sch->schib.pmcw.pim &
  358. sch->schib.pmcw.pam &
  359. sch->schib.pmcw.pom)
  360. | chp_mask) & sch->opm;
  361. if (!old_lpm && sch->lpm)
  362. device_trigger_reprobe(sch);
  363. else if (sch->driver && sch->driver->verify)
  364. sch->driver->verify(&sch->dev);
  365. spin_unlock_irq(&sch->lock);
  366. put_device(&sch->dev);
  367. return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
  368. }
  369. static int
  370. s390_process_res_acc (struct res_acc_data *res_data)
  371. {
  372. int rc;
  373. char dbf_txt[15];
  374. sprintf(dbf_txt, "accpr%x", res_data->chp->id);
  375. CIO_TRACE_EVENT( 2, dbf_txt);
  376. if (res_data->fla != 0) {
  377. sprintf(dbf_txt, "fla%x", res_data->fla);
  378. CIO_TRACE_EVENT( 2, dbf_txt);
  379. }
  380. /*
  381. * I/O resources may have become accessible.
  382. * Scan through all subchannels that may be concerned and
  383. * do a validation on those.
  384. * The more information we have (info), the less scanning
  385. * will we have to do.
  386. */
  387. rc = for_each_subchannel(__s390_process_res_acc, res_data);
  388. if (css_slow_subchannels_exist())
  389. rc = -EAGAIN;
  390. else if (rc != -EAGAIN)
  391. rc = 0;
  392. return rc;
  393. }
  394. static int
  395. __get_chpid_from_lir(void *data)
  396. {
  397. struct lir {
  398. u8 iq;
  399. u8 ic;
  400. u16 sci;
  401. /* incident-node descriptor */
  402. u32 indesc[28];
  403. /* attached-node descriptor */
  404. u32 andesc[28];
  405. /* incident-specific information */
  406. u32 isinfo[28];
  407. } *lir;
  408. lir = (struct lir*) data;
  409. if (!(lir->iq&0x80))
  410. /* NULL link incident record */
  411. return -EINVAL;
  412. if (!(lir->indesc[0]&0xc0000000))
  413. /* node descriptor not valid */
  414. return -EINVAL;
  415. if (!(lir->indesc[0]&0x10000000))
  416. /* don't handle device-type nodes - FIXME */
  417. return -EINVAL;
  418. /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
  419. return (u16) (lir->indesc[0]&0x000000ff);
  420. }
  421. int
  422. chsc_process_crw(void)
  423. {
  424. int chpid, ret;
  425. struct res_acc_data res_data;
  426. struct {
  427. struct chsc_header request;
  428. u32 reserved1;
  429. u32 reserved2;
  430. u32 reserved3;
  431. struct chsc_header response;
  432. u32 reserved4;
  433. u8 flags;
  434. u8 vf; /* validity flags */
  435. u8 rs; /* reporting source */
  436. u8 cc; /* content code */
  437. u16 fla; /* full link address */
  438. u16 rsid; /* reporting source id */
  439. u32 reserved5;
  440. u32 reserved6;
  441. u32 ccdf[96]; /* content-code dependent field */
  442. /* ccdf has to be big enough for a link-incident record */
  443. } *sei_area;
  444. if (!sei_page)
  445. return 0;
  446. /*
  447. * build the chsc request block for store event information
  448. * and do the call
  449. * This function is only called by the machine check handler thread,
  450. * so we don't need locking for the sei_page.
  451. */
  452. sei_area = sei_page;
  453. CIO_TRACE_EVENT( 2, "prcss");
  454. ret = 0;
  455. do {
  456. int ccode, status;
  457. struct device *dev;
  458. memset(sei_area, 0, sizeof(*sei_area));
  459. memset(&res_data, 0, sizeof(struct res_acc_data));
  460. sei_area->request = (struct chsc_header) {
  461. .length = 0x0010,
  462. .code = 0x000e,
  463. };
  464. ccode = chsc(sei_area);
  465. if (ccode > 0)
  466. return 0;
  467. switch (sei_area->response.code) {
  468. /* for debug purposes, check for problems */
  469. case 0x0001:
  470. CIO_CRW_EVENT(4, "chsc_process_crw: event information "
  471. "successfully stored\n");
  472. break; /* everything ok */
  473. case 0x0002:
  474. CIO_CRW_EVENT(2,
  475. "chsc_process_crw: invalid command!\n");
  476. return 0;
  477. case 0x0003:
  478. CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
  479. "request block!\n");
  480. return 0;
  481. case 0x0005:
  482. CIO_CRW_EVENT(2, "chsc_process_crw: no event "
  483. "information stored\n");
  484. return 0;
  485. default:
  486. CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
  487. sei_area->response.code);
  488. return 0;
  489. }
  490. /* Check if we might have lost some information. */
  491. if (sei_area->flags & 0x40)
  492. CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
  493. "has been lost due to overflow!\n");
  494. if (sei_area->rs != 4) {
  495. CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
  496. "(%04X) isn't a chpid!\n",
  497. sei_area->rsid);
  498. continue;
  499. }
  500. /* which kind of information was stored? */
  501. switch (sei_area->cc) {
  502. case 1: /* link incident*/
  503. CIO_CRW_EVENT(4, "chsc_process_crw: "
  504. "channel subsystem reports link incident,"
  505. " reporting source is chpid %x\n",
  506. sei_area->rsid);
  507. chpid = __get_chpid_from_lir(sei_area->ccdf);
  508. if (chpid < 0)
  509. CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
  510. __FUNCTION__);
  511. else
  512. s390_set_chpid_offline(chpid);
  513. break;
  514. case 2: /* i/o resource accessibiliy */
  515. CIO_CRW_EVENT(4, "chsc_process_crw: "
  516. "channel subsystem reports some I/O "
  517. "devices may have become accessible\n");
  518. pr_debug("Data received after sei: \n");
  519. pr_debug("Validity flags: %x\n", sei_area->vf);
  520. /* allocate a new channel path structure, if needed */
  521. status = get_chp_status(sei_area->rsid);
  522. if (status < 0)
  523. new_channel_path(sei_area->rsid);
  524. else if (!status)
  525. break;
  526. dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
  527. res_data.chp = to_channelpath(dev);
  528. pr_debug("chpid: %x", sei_area->rsid);
  529. if ((sei_area->vf & 0xc0) != 0) {
  530. res_data.fla = sei_area->fla;
  531. if ((sei_area->vf & 0xc0) == 0xc0) {
  532. pr_debug(" full link addr: %x",
  533. sei_area->fla);
  534. res_data.fla_mask = 0xffff;
  535. } else {
  536. pr_debug(" link addr: %x",
  537. sei_area->fla);
  538. res_data.fla_mask = 0xff00;
  539. }
  540. }
  541. ret = s390_process_res_acc(&res_data);
  542. pr_debug("\n\n");
  543. put_device(dev);
  544. break;
  545. default: /* other stuff */
  546. CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
  547. sei_area->cc);
  548. break;
  549. }
  550. } while (sei_area->flags & 0x80);
  551. return ret;
  552. }
  553. static inline int
  554. __chp_add_new_sch(struct subchannel_id schid)
  555. {
  556. struct schib schib;
  557. int ret;
  558. if (stsch(schid, &schib))
  559. /* We're through */
  560. return need_rescan ? -EAGAIN : -ENXIO;
  561. /* Put it on the slow path. */
  562. ret = css_enqueue_subchannel_slow(schid);
  563. if (ret) {
  564. css_clear_subchannel_slow_list();
  565. need_rescan = 1;
  566. return -EAGAIN;
  567. }
  568. return 0;
  569. }
  570. static int
  571. __chp_add(struct subchannel_id schid, void *data)
  572. {
  573. int i;
  574. struct channel_path *chp;
  575. struct subchannel *sch;
  576. chp = (struct channel_path *)data;
  577. sch = get_subchannel_by_schid(schid);
  578. if (!sch)
  579. /* Check if the subchannel is now available. */
  580. return __chp_add_new_sch(schid);
  581. spin_lock(&sch->lock);
  582. for (i=0; i<8; i++)
  583. if (sch->schib.pmcw.chpid[i] == chp->id) {
  584. if (stsch(sch->schid, &sch->schib) != 0) {
  585. /* Endgame. */
  586. spin_unlock(&sch->lock);
  587. return -ENXIO;
  588. }
  589. break;
  590. }
  591. if (i==8) {
  592. spin_unlock(&sch->lock);
  593. return 0;
  594. }
  595. sch->lpm = ((sch->schib.pmcw.pim &
  596. sch->schib.pmcw.pam &
  597. sch->schib.pmcw.pom)
  598. | 0x80 >> i) & sch->opm;
  599. if (sch->driver && sch->driver->verify)
  600. sch->driver->verify(&sch->dev);
  601. spin_unlock(&sch->lock);
  602. put_device(&sch->dev);
  603. return 0;
  604. }
  605. static int
  606. chp_add(int chpid)
  607. {
  608. int rc;
  609. char dbf_txt[15];
  610. struct device *dev;
  611. if (!get_chp_status(chpid))
  612. return 0; /* no need to do the rest */
  613. sprintf(dbf_txt, "cadd%x", chpid);
  614. CIO_TRACE_EVENT(2, dbf_txt);
  615. dev = get_device(&css[0]->chps[chpid]->dev);
  616. rc = for_each_subchannel(__chp_add, to_channelpath(dev));
  617. if (css_slow_subchannels_exist())
  618. rc = -EAGAIN;
  619. if (rc != -EAGAIN)
  620. rc = 0;
  621. put_device(dev);
  622. return rc;
  623. }
  624. /*
  625. * Handling of crw machine checks with channel path source.
  626. */
  627. int
  628. chp_process_crw(int chpid, int on)
  629. {
  630. if (on == 0) {
  631. /* Path has gone. We use the link incident routine.*/
  632. s390_set_chpid_offline(chpid);
  633. return 0; /* De-register is async anyway. */
  634. }
  635. /*
  636. * Path has come. Allocate a new channel path structure,
  637. * if needed.
  638. */
  639. if (get_chp_status(chpid) < 0)
  640. new_channel_path(chpid);
  641. /* Avoid the extra overhead in process_rec_acc. */
  642. return chp_add(chpid);
  643. }
  644. static inline int
  645. __check_for_io_and_kill(struct subchannel *sch, int index)
  646. {
  647. int cc;
  648. if (!device_is_online(sch))
  649. /* cio could be doing I/O. */
  650. return 0;
  651. cc = stsch(sch->schid, &sch->schib);
  652. if (cc)
  653. return 0;
  654. if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
  655. device_set_waiting(sch);
  656. return 1;
  657. }
  658. return 0;
  659. }
  660. static inline void
  661. __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
  662. {
  663. int chp, old_lpm;
  664. unsigned long flags;
  665. if (!sch->ssd_info.valid)
  666. return;
  667. spin_lock_irqsave(&sch->lock, flags);
  668. old_lpm = sch->lpm;
  669. for (chp = 0; chp < 8; chp++) {
  670. if (sch->ssd_info.chpid[chp] != chpid)
  671. continue;
  672. if (on) {
  673. sch->opm |= (0x80 >> chp);
  674. sch->lpm |= (0x80 >> chp);
  675. if (!old_lpm)
  676. device_trigger_reprobe(sch);
  677. else if (sch->driver && sch->driver->verify)
  678. sch->driver->verify(&sch->dev);
  679. } else {
  680. sch->opm &= ~(0x80 >> chp);
  681. sch->lpm &= ~(0x80 >> chp);
  682. /*
  683. * Give running I/O a grace period in which it
  684. * can successfully terminate, even using the
  685. * just varied off path. Then kill it.
  686. */
  687. if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
  688. if (css_enqueue_subchannel_slow(sch->schid)) {
  689. css_clear_subchannel_slow_list();
  690. need_rescan = 1;
  691. }
  692. } else if (sch->driver && sch->driver->verify)
  693. sch->driver->verify(&sch->dev);
  694. }
  695. break;
  696. }
  697. spin_unlock_irqrestore(&sch->lock, flags);
  698. }
  699. static int
  700. s390_subchannel_vary_chpid_off(struct device *dev, void *data)
  701. {
  702. struct subchannel *sch;
  703. __u8 *chpid;
  704. sch = to_subchannel(dev);
  705. chpid = data;
  706. __s390_subchannel_vary_chpid(sch, *chpid, 0);
  707. return 0;
  708. }
  709. static int
  710. s390_subchannel_vary_chpid_on(struct device *dev, void *data)
  711. {
  712. struct subchannel *sch;
  713. __u8 *chpid;
  714. sch = to_subchannel(dev);
  715. chpid = data;
  716. __s390_subchannel_vary_chpid(sch, *chpid, 1);
  717. return 0;
  718. }
  719. static int
  720. __s390_vary_chpid_on(struct subchannel_id schid, void *data)
  721. {
  722. struct schib schib;
  723. struct subchannel *sch;
  724. sch = get_subchannel_by_schid(schid);
  725. if (sch) {
  726. put_device(&sch->dev);
  727. return 0;
  728. }
  729. if (stsch_err(schid, &schib))
  730. /* We're through */
  731. return -ENXIO;
  732. /* Put it on the slow path. */
  733. if (css_enqueue_subchannel_slow(schid)) {
  734. css_clear_subchannel_slow_list();
  735. need_rescan = 1;
  736. return -EAGAIN;
  737. }
  738. return 0;
  739. }
  740. /*
  741. * Function: s390_vary_chpid
  742. * Varies the specified chpid online or offline
  743. */
  744. static int
  745. s390_vary_chpid( __u8 chpid, int on)
  746. {
  747. char dbf_text[15];
  748. int status;
  749. sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
  750. CIO_TRACE_EVENT( 2, dbf_text);
  751. status = get_chp_status(chpid);
  752. if (status < 0) {
  753. printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
  754. return -EINVAL;
  755. }
  756. if (!on && !status) {
  757. printk(KERN_ERR "chpid %x is already offline\n", chpid);
  758. return -EINVAL;
  759. }
  760. set_chp_logically_online(chpid, on);
  761. /*
  762. * Redo PathVerification on the devices the chpid connects to
  763. */
  764. bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
  765. s390_subchannel_vary_chpid_on :
  766. s390_subchannel_vary_chpid_off);
  767. if (on)
  768. /* Scan for new devices on varied on path. */
  769. for_each_subchannel(__s390_vary_chpid_on, NULL);
  770. if (need_rescan || css_slow_subchannels_exist())
  771. queue_work(slow_path_wq, &slow_path_work);
  772. return 0;
  773. }
  774. /*
  775. * Files for the channel path entries.
  776. */
  777. static ssize_t
  778. chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
  779. {
  780. struct channel_path *chp = container_of(dev, struct channel_path, dev);
  781. if (!chp)
  782. return 0;
  783. return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
  784. sprintf(buf, "offline\n"));
  785. }
  786. static ssize_t
  787. chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
  788. {
  789. struct channel_path *cp = container_of(dev, struct channel_path, dev);
  790. char cmd[10];
  791. int num_args;
  792. int error;
  793. num_args = sscanf(buf, "%5s", cmd);
  794. if (!num_args)
  795. return count;
  796. if (!strnicmp(cmd, "on", 2))
  797. error = s390_vary_chpid(cp->id, 1);
  798. else if (!strnicmp(cmd, "off", 3))
  799. error = s390_vary_chpid(cp->id, 0);
  800. else
  801. error = -EINVAL;
  802. return error < 0 ? error : count;
  803. }
  804. static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
  805. static ssize_t
  806. chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
  807. {
  808. struct channel_path *chp = container_of(dev, struct channel_path, dev);
  809. if (!chp)
  810. return 0;
  811. return sprintf(buf, "%x\n", chp->desc.desc);
  812. }
  813. static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
  814. static struct attribute * chp_attrs[] = {
  815. &dev_attr_status.attr,
  816. &dev_attr_type.attr,
  817. NULL,
  818. };
  819. static struct attribute_group chp_attr_group = {
  820. .attrs = chp_attrs,
  821. };
  822. static void
  823. chp_release(struct device *dev)
  824. {
  825. struct channel_path *cp;
  826. cp = container_of(dev, struct channel_path, dev);
  827. kfree(cp);
  828. }
  829. static int
  830. chsc_determine_channel_path_description(int chpid,
  831. struct channel_path_desc *desc)
  832. {
  833. int ccode, ret;
  834. struct {
  835. struct chsc_header request;
  836. u32 : 24;
  837. u32 first_chpid : 8;
  838. u32 : 24;
  839. u32 last_chpid : 8;
  840. u32 zeroes1;
  841. struct chsc_header response;
  842. u32 zeroes2;
  843. struct channel_path_desc desc;
  844. } *scpd_area;
  845. scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  846. if (!scpd_area)
  847. return -ENOMEM;
  848. scpd_area->request = (struct chsc_header) {
  849. .length = 0x0010,
  850. .code = 0x0002,
  851. };
  852. scpd_area->first_chpid = chpid;
  853. scpd_area->last_chpid = chpid;
  854. ccode = chsc(scpd_area);
  855. if (ccode > 0) {
  856. ret = (ccode == 3) ? -ENODEV : -EBUSY;
  857. goto out;
  858. }
  859. switch (scpd_area->response.code) {
  860. case 0x0001: /* Success. */
  861. memcpy(desc, &scpd_area->desc,
  862. sizeof(struct channel_path_desc));
  863. ret = 0;
  864. break;
  865. case 0x0003: /* Invalid block. */
  866. case 0x0007: /* Invalid format. */
  867. case 0x0008: /* Other invalid block. */
  868. CIO_CRW_EVENT(2, "Error in chsc request block!\n");
  869. ret = -EINVAL;
  870. break;
  871. case 0x0004: /* Command not provided in model. */
  872. CIO_CRW_EVENT(2, "Model does not provide scpd\n");
  873. ret = -EOPNOTSUPP;
  874. break;
  875. default:
  876. CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
  877. scpd_area->response.code);
  878. ret = -EIO;
  879. }
  880. out:
  881. free_page((unsigned long)scpd_area);
  882. return ret;
  883. }
  884. /*
  885. * Entries for chpids on the system bus.
  886. * This replaces /proc/chpids.
  887. */
  888. static int
  889. new_channel_path(int chpid)
  890. {
  891. struct channel_path *chp;
  892. int ret;
  893. chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
  894. if (!chp)
  895. return -ENOMEM;
  896. memset(chp, 0, sizeof(struct channel_path));
  897. /* fill in status, etc. */
  898. chp->id = chpid;
  899. chp->state = 1;
  900. chp->dev = (struct device) {
  901. .parent = &css[0]->device,
  902. .release = chp_release,
  903. };
  904. snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
  905. /* Obtain channel path description and fill it in. */
  906. ret = chsc_determine_channel_path_description(chpid, &chp->desc);
  907. if (ret)
  908. goto out_free;
  909. /* make it known to the system */
  910. ret = device_register(&chp->dev);
  911. if (ret) {
  912. printk(KERN_WARNING "%s: could not register %02x\n",
  913. __func__, chpid);
  914. goto out_free;
  915. }
  916. ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
  917. if (ret) {
  918. device_unregister(&chp->dev);
  919. goto out_free;
  920. } else
  921. css[0]->chps[chpid] = chp;
  922. return ret;
  923. out_free:
  924. kfree(chp);
  925. return ret;
  926. }
  927. void *
  928. chsc_get_chp_desc(struct subchannel *sch, int chp_no)
  929. {
  930. struct channel_path *chp;
  931. struct channel_path_desc *desc;
  932. chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
  933. if (!chp)
  934. return NULL;
  935. desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
  936. if (!desc)
  937. return NULL;
  938. memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
  939. return desc;
  940. }
  941. static int __init
  942. chsc_alloc_sei_area(void)
  943. {
  944. sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  945. if (!sei_page)
  946. printk(KERN_WARNING"Can't allocate page for processing of " \
  947. "chsc machine checks!\n");
  948. return (sei_page ? 0 : -ENOMEM);
  949. }
  950. int __init
  951. chsc_enable_facility(int operation_code)
  952. {
  953. int ret;
  954. struct {
  955. struct chsc_header request;
  956. u8 reserved1:4;
  957. u8 format:4;
  958. u8 reserved2;
  959. u16 operation_code;
  960. u32 reserved3;
  961. u32 reserved4;
  962. u32 operation_data_area[252];
  963. struct chsc_header response;
  964. u32 reserved5:4;
  965. u32 format2:4;
  966. u32 reserved6:24;
  967. } *sda_area;
  968. sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
  969. if (!sda_area)
  970. return -ENOMEM;
  971. sda_area->request = (struct chsc_header) {
  972. .length = 0x0400,
  973. .code = 0x0031,
  974. };
  975. sda_area->operation_code = operation_code;
  976. ret = chsc(sda_area);
  977. if (ret > 0) {
  978. ret = (ret == 3) ? -ENODEV : -EBUSY;
  979. goto out;
  980. }
  981. switch (sda_area->response.code) {
  982. case 0x0003: /* invalid request block */
  983. case 0x0007:
  984. ret = -EINVAL;
  985. break;
  986. case 0x0004: /* command not provided */
  987. case 0x0101: /* facility not provided */
  988. ret = -EOPNOTSUPP;
  989. break;
  990. }
  991. out:
  992. free_page((unsigned long)sda_area);
  993. return ret;
  994. }
  995. subsys_initcall(chsc_alloc_sei_area);
  996. struct css_general_char css_general_characteristics;
  997. struct css_chsc_char css_chsc_characteristics;
  998. int __init
  999. chsc_determine_css_characteristics(void)
  1000. {
  1001. int result;
  1002. struct {
  1003. struct chsc_header request;
  1004. u32 reserved1;
  1005. u32 reserved2;
  1006. u32 reserved3;
  1007. struct chsc_header response;
  1008. u32 reserved4;
  1009. u32 general_char[510];
  1010. u32 chsc_char[518];
  1011. } *scsc_area;
  1012. scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
  1013. if (!scsc_area) {
  1014. printk(KERN_WARNING"cio: Was not able to determine available" \
  1015. "CHSCs due to no memory.\n");
  1016. return -ENOMEM;
  1017. }
  1018. scsc_area->request = (struct chsc_header) {
  1019. .length = 0x0010,
  1020. .code = 0x0010,
  1021. };
  1022. result = chsc(scsc_area);
  1023. if (result) {
  1024. printk(KERN_WARNING"cio: Was not able to determine " \
  1025. "available CHSCs, cc=%i.\n", result);
  1026. result = -EIO;
  1027. goto exit;
  1028. }
  1029. if (scsc_area->response.code != 1) {
  1030. printk(KERN_WARNING"cio: Was not able to determine " \
  1031. "available CHSCs.\n");
  1032. result = -EIO;
  1033. goto exit;
  1034. }
  1035. memcpy(&css_general_characteristics, scsc_area->general_char,
  1036. sizeof(css_general_characteristics));
  1037. memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
  1038. sizeof(css_chsc_characteristics));
  1039. exit:
  1040. free_page ((unsigned long) scsc_area);
  1041. return result;
  1042. }
  1043. EXPORT_SYMBOL_GPL(css_general_characteristics);
  1044. EXPORT_SYMBOL_GPL(css_chsc_characteristics);