sclp_cmd.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. /*
  2. * drivers/s390/char/sclp_cmd.c
  3. *
  4. * Copyright IBM Corp. 2007
  5. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  6. * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
  7. */
  8. #define KMSG_COMPONENT "sclp_cmd"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/completion.h>
  11. #include <linux/init.h>
  12. #include <linux/errno.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/mm.h>
  16. #include <linux/mmzone.h>
  17. #include <linux/memory.h>
  18. #include <asm/chpid.h>
  19. #include <asm/sclp.h>
  20. #include "sclp.h"
  21. #define SCLP_CMDW_READ_SCP_INFO 0x00020001
  22. #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
  23. struct read_info_sccb {
  24. struct sccb_header header; /* 0-7 */
  25. u16 rnmax; /* 8-9 */
  26. u8 rnsize; /* 10 */
  27. u8 _reserved0[24 - 11]; /* 11-15 */
  28. u8 loadparm[8]; /* 24-31 */
  29. u8 _reserved1[48 - 32]; /* 32-47 */
  30. u64 facilities; /* 48-55 */
  31. u8 _reserved2[84 - 56]; /* 56-83 */
  32. u8 fac84; /* 84 */
  33. u8 _reserved3[91 - 85]; /* 85-90 */
  34. u8 flags; /* 91 */
  35. u8 _reserved4[100 - 92]; /* 92-99 */
  36. u32 rnsize2; /* 100-103 */
  37. u64 rnmax2; /* 104-111 */
  38. u8 _reserved5[4096 - 112]; /* 112-4095 */
  39. } __attribute__((packed, aligned(PAGE_SIZE)));
  40. static struct read_info_sccb __initdata early_read_info_sccb;
  41. static int __initdata early_read_info_sccb_valid;
  42. u64 sclp_facilities;
  43. static u8 sclp_fac84;
  44. static unsigned long long rzm;
  45. static unsigned long long rnmax;
  46. static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
  47. {
  48. int rc;
  49. __ctl_set_bit(0, 9);
  50. rc = sclp_service_call(cmd, sccb);
  51. if (rc)
  52. goto out;
  53. __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
  54. PSW_MASK_WAIT | PSW_DEFAULT_KEY);
  55. local_irq_disable();
  56. out:
  57. /* Contents of the sccb might have changed. */
  58. barrier();
  59. __ctl_clear_bit(0, 9);
  60. return rc;
  61. }
  62. static void __init sclp_read_info_early(void)
  63. {
  64. int rc;
  65. int i;
  66. struct read_info_sccb *sccb;
  67. sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
  68. SCLP_CMDW_READ_SCP_INFO};
  69. sccb = &early_read_info_sccb;
  70. for (i = 0; i < ARRAY_SIZE(commands); i++) {
  71. do {
  72. memset(sccb, 0, sizeof(*sccb));
  73. sccb->header.length = sizeof(*sccb);
  74. sccb->header.control_mask[2] = 0x80;
  75. rc = sclp_cmd_sync_early(commands[i], sccb);
  76. } while (rc == -EBUSY);
  77. if (rc)
  78. break;
  79. if (sccb->header.response_code == 0x10) {
  80. early_read_info_sccb_valid = 1;
  81. break;
  82. }
  83. if (sccb->header.response_code != 0x1f0)
  84. break;
  85. }
  86. }
  87. void __init sclp_facilities_detect(void)
  88. {
  89. struct read_info_sccb *sccb;
  90. sclp_read_info_early();
  91. if (!early_read_info_sccb_valid)
  92. return;
  93. sccb = &early_read_info_sccb;
  94. sclp_facilities = sccb->facilities;
  95. sclp_fac84 = sccb->fac84;
  96. rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
  97. rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
  98. rzm <<= 20;
  99. }
  100. unsigned long long sclp_get_rnmax(void)
  101. {
  102. return rnmax;
  103. }
  104. unsigned long long sclp_get_rzm(void)
  105. {
  106. return rzm;
  107. }
  108. /*
  109. * This function will be called after sclp_facilities_detect(), which gets
  110. * called from early.c code. Therefore the sccb should have valid contents.
  111. */
  112. void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
  113. {
  114. struct read_info_sccb *sccb;
  115. if (!early_read_info_sccb_valid)
  116. return;
  117. sccb = &early_read_info_sccb;
  118. info->is_valid = 1;
  119. if (sccb->flags & 0x2)
  120. info->has_dump = 1;
  121. memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
  122. }
  123. static void sclp_sync_callback(struct sclp_req *req, void *data)
  124. {
  125. struct completion *completion = data;
  126. complete(completion);
  127. }
  128. static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
  129. {
  130. struct completion completion;
  131. struct sclp_req *request;
  132. int rc;
  133. request = kzalloc(sizeof(*request), GFP_KERNEL);
  134. if (!request)
  135. return -ENOMEM;
  136. request->command = cmd;
  137. request->sccb = sccb;
  138. request->status = SCLP_REQ_FILLED;
  139. request->callback = sclp_sync_callback;
  140. request->callback_data = &completion;
  141. init_completion(&completion);
  142. /* Perform sclp request. */
  143. rc = sclp_add_request(request);
  144. if (rc)
  145. goto out;
  146. wait_for_completion(&completion);
  147. /* Check response. */
  148. if (request->status != SCLP_REQ_DONE) {
  149. pr_warning("sync request failed (cmd=0x%08x, "
  150. "status=0x%02x)\n", cmd, request->status);
  151. rc = -EIO;
  152. }
  153. out:
  154. kfree(request);
  155. return rc;
  156. }
  157. /*
  158. * CPU configuration related functions.
  159. */
  160. #define SCLP_CMDW_READ_CPU_INFO 0x00010001
  161. #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
  162. #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
  163. struct read_cpu_info_sccb {
  164. struct sccb_header header;
  165. u16 nr_configured;
  166. u16 offset_configured;
  167. u16 nr_standby;
  168. u16 offset_standby;
  169. u8 reserved[4096 - 16];
  170. } __attribute__((packed, aligned(PAGE_SIZE)));
  171. static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
  172. struct read_cpu_info_sccb *sccb)
  173. {
  174. char *page = (char *) sccb;
  175. memset(info, 0, sizeof(*info));
  176. info->configured = sccb->nr_configured;
  177. info->standby = sccb->nr_standby;
  178. info->combined = sccb->nr_configured + sccb->nr_standby;
  179. info->has_cpu_type = sclp_fac84 & 0x1;
  180. memcpy(&info->cpu, page + sccb->offset_configured,
  181. info->combined * sizeof(struct sclp_cpu_entry));
  182. }
  183. int sclp_get_cpu_info(struct sclp_cpu_info *info)
  184. {
  185. int rc;
  186. struct read_cpu_info_sccb *sccb;
  187. if (!SCLP_HAS_CPU_INFO)
  188. return -EOPNOTSUPP;
  189. sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  190. if (!sccb)
  191. return -ENOMEM;
  192. sccb->header.length = sizeof(*sccb);
  193. rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
  194. if (rc)
  195. goto out;
  196. if (sccb->header.response_code != 0x0010) {
  197. pr_warning("readcpuinfo failed (response=0x%04x)\n",
  198. sccb->header.response_code);
  199. rc = -EIO;
  200. goto out;
  201. }
  202. sclp_fill_cpu_info(info, sccb);
  203. out:
  204. free_page((unsigned long) sccb);
  205. return rc;
  206. }
  207. struct cpu_configure_sccb {
  208. struct sccb_header header;
  209. } __attribute__((packed, aligned(8)));
  210. static int do_cpu_configure(sclp_cmdw_t cmd)
  211. {
  212. struct cpu_configure_sccb *sccb;
  213. int rc;
  214. if (!SCLP_HAS_CPU_RECONFIG)
  215. return -EOPNOTSUPP;
  216. /*
  217. * This is not going to cross a page boundary since we force
  218. * kmalloc to have a minimum alignment of 8 bytes on s390.
  219. */
  220. sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
  221. if (!sccb)
  222. return -ENOMEM;
  223. sccb->header.length = sizeof(*sccb);
  224. rc = do_sync_request(cmd, sccb);
  225. if (rc)
  226. goto out;
  227. switch (sccb->header.response_code) {
  228. case 0x0020:
  229. case 0x0120:
  230. break;
  231. default:
  232. pr_warning("configure cpu failed (cmd=0x%08x, "
  233. "response=0x%04x)\n", cmd,
  234. sccb->header.response_code);
  235. rc = -EIO;
  236. break;
  237. }
  238. out:
  239. kfree(sccb);
  240. return rc;
  241. }
  242. int sclp_cpu_configure(u8 cpu)
  243. {
  244. return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
  245. }
  246. int sclp_cpu_deconfigure(u8 cpu)
  247. {
  248. return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
  249. }
  250. #ifdef CONFIG_MEMORY_HOTPLUG
  251. static DEFINE_MUTEX(sclp_mem_mutex);
  252. static LIST_HEAD(sclp_mem_list);
  253. static u8 sclp_max_storage_id;
  254. static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
  255. struct memory_increment {
  256. struct list_head list;
  257. u16 rn;
  258. int standby;
  259. int usecount;
  260. };
  261. struct assign_storage_sccb {
  262. struct sccb_header header;
  263. u16 rn;
  264. } __packed;
  265. static unsigned long long rn2addr(u16 rn)
  266. {
  267. return (unsigned long long) (rn - 1) * rzm;
  268. }
  269. static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
  270. {
  271. struct assign_storage_sccb *sccb;
  272. int rc;
  273. sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  274. if (!sccb)
  275. return -ENOMEM;
  276. sccb->header.length = PAGE_SIZE;
  277. sccb->rn = rn;
  278. rc = do_sync_request(cmd, sccb);
  279. if (rc)
  280. goto out;
  281. switch (sccb->header.response_code) {
  282. case 0x0020:
  283. case 0x0120:
  284. break;
  285. default:
  286. pr_warning("assign storage failed (cmd=0x%08x, "
  287. "response=0x%04x, rn=0x%04x)\n", cmd,
  288. sccb->header.response_code, rn);
  289. rc = -EIO;
  290. break;
  291. }
  292. out:
  293. free_page((unsigned long) sccb);
  294. return rc;
  295. }
  296. static int sclp_assign_storage(u16 rn)
  297. {
  298. return do_assign_storage(0x000d0001, rn);
  299. }
  300. static int sclp_unassign_storage(u16 rn)
  301. {
  302. return do_assign_storage(0x000c0001, rn);
  303. }
  304. struct attach_storage_sccb {
  305. struct sccb_header header;
  306. u16 :16;
  307. u16 assigned;
  308. u32 :32;
  309. u32 entries[0];
  310. } __packed;
  311. static int sclp_attach_storage(u8 id)
  312. {
  313. struct attach_storage_sccb *sccb;
  314. int rc;
  315. int i;
  316. sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  317. if (!sccb)
  318. return -ENOMEM;
  319. sccb->header.length = PAGE_SIZE;
  320. rc = do_sync_request(0x00080001 | id << 8, sccb);
  321. if (rc)
  322. goto out;
  323. switch (sccb->header.response_code) {
  324. case 0x0020:
  325. set_bit(id, sclp_storage_ids);
  326. for (i = 0; i < sccb->assigned; i++)
  327. sclp_unassign_storage(sccb->entries[i] >> 16);
  328. break;
  329. default:
  330. rc = -EIO;
  331. break;
  332. }
  333. out:
  334. free_page((unsigned long) sccb);
  335. return rc;
  336. }
  337. static int sclp_mem_change_state(unsigned long start, unsigned long size,
  338. int online)
  339. {
  340. struct memory_increment *incr;
  341. unsigned long long istart;
  342. int rc = 0;
  343. list_for_each_entry(incr, &sclp_mem_list, list) {
  344. istart = rn2addr(incr->rn);
  345. if (start + size - 1 < istart)
  346. break;
  347. if (start > istart + rzm - 1)
  348. continue;
  349. if (online) {
  350. if (incr->usecount++)
  351. continue;
  352. /*
  353. * Don't break the loop if one assign fails. Loop may
  354. * be walked again on CANCEL and we can't save
  355. * information if state changed before or not.
  356. * So continue and increase usecount for all increments.
  357. */
  358. rc |= sclp_assign_storage(incr->rn);
  359. } else {
  360. if (--incr->usecount)
  361. continue;
  362. sclp_unassign_storage(incr->rn);
  363. }
  364. }
  365. return rc ? -EIO : 0;
  366. }
  367. static int sclp_mem_notifier(struct notifier_block *nb,
  368. unsigned long action, void *data)
  369. {
  370. unsigned long start, size;
  371. struct memory_notify *arg;
  372. unsigned char id;
  373. int rc = 0;
  374. arg = data;
  375. start = arg->start_pfn << PAGE_SHIFT;
  376. size = arg->nr_pages << PAGE_SHIFT;
  377. mutex_lock(&sclp_mem_mutex);
  378. for (id = 0; id <= sclp_max_storage_id; id++)
  379. if (!test_bit(id, sclp_storage_ids))
  380. sclp_attach_storage(id);
  381. switch (action) {
  382. case MEM_ONLINE:
  383. case MEM_GOING_OFFLINE:
  384. case MEM_CANCEL_OFFLINE:
  385. break;
  386. case MEM_GOING_ONLINE:
  387. rc = sclp_mem_change_state(start, size, 1);
  388. break;
  389. case MEM_CANCEL_ONLINE:
  390. sclp_mem_change_state(start, size, 0);
  391. break;
  392. case MEM_OFFLINE:
  393. sclp_mem_change_state(start, size, 0);
  394. break;
  395. default:
  396. rc = -EINVAL;
  397. break;
  398. }
  399. mutex_unlock(&sclp_mem_mutex);
  400. return rc ? NOTIFY_BAD : NOTIFY_OK;
  401. }
  402. static struct notifier_block sclp_mem_nb = {
  403. .notifier_call = sclp_mem_notifier,
  404. };
  405. static void __init add_memory_merged(u16 rn)
  406. {
  407. static u16 first_rn, num;
  408. unsigned long long start, size;
  409. if (rn && first_rn && (first_rn + num == rn)) {
  410. num++;
  411. return;
  412. }
  413. if (!first_rn)
  414. goto skip_add;
  415. start = rn2addr(first_rn);
  416. size = (unsigned long long ) num * rzm;
  417. if (start >= VMEM_MAX_PHYS)
  418. goto skip_add;
  419. if (start + size > VMEM_MAX_PHYS)
  420. size = VMEM_MAX_PHYS - start;
  421. add_memory(0, start, size);
  422. skip_add:
  423. first_rn = rn;
  424. num = 1;
  425. }
  426. static void __init sclp_add_standby_memory(void)
  427. {
  428. struct memory_increment *incr;
  429. list_for_each_entry(incr, &sclp_mem_list, list)
  430. if (incr->standby)
  431. add_memory_merged(incr->rn);
  432. add_memory_merged(0);
  433. }
  434. static void __init insert_increment(u16 rn, int standby, int assigned)
  435. {
  436. struct memory_increment *incr, *new_incr;
  437. struct list_head *prev;
  438. u16 last_rn;
  439. new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
  440. if (!new_incr)
  441. return;
  442. new_incr->rn = rn;
  443. new_incr->standby = standby;
  444. last_rn = 0;
  445. prev = &sclp_mem_list;
  446. list_for_each_entry(incr, &sclp_mem_list, list) {
  447. if (assigned && incr->rn > rn)
  448. break;
  449. if (!assigned && incr->rn - last_rn > 1)
  450. break;
  451. last_rn = incr->rn;
  452. prev = &incr->list;
  453. }
  454. if (!assigned)
  455. new_incr->rn = last_rn + 1;
  456. if (new_incr->rn > rnmax) {
  457. kfree(new_incr);
  458. return;
  459. }
  460. list_add(&new_incr->list, prev);
  461. }
  462. struct read_storage_sccb {
  463. struct sccb_header header;
  464. u16 max_id;
  465. u16 assigned;
  466. u16 standby;
  467. u16 :16;
  468. u32 entries[0];
  469. } __packed;
  470. static int __init sclp_detect_standby_memory(void)
  471. {
  472. struct read_storage_sccb *sccb;
  473. int i, id, assigned, rc;
  474. if (!early_read_info_sccb_valid)
  475. return 0;
  476. if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
  477. return 0;
  478. rc = -ENOMEM;
  479. sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
  480. if (!sccb)
  481. goto out;
  482. assigned = 0;
  483. for (id = 0; id <= sclp_max_storage_id; id++) {
  484. memset(sccb, 0, PAGE_SIZE);
  485. sccb->header.length = PAGE_SIZE;
  486. rc = do_sync_request(0x00040001 | id << 8, sccb);
  487. if (rc)
  488. goto out;
  489. switch (sccb->header.response_code) {
  490. case 0x0010:
  491. set_bit(id, sclp_storage_ids);
  492. for (i = 0; i < sccb->assigned; i++) {
  493. if (!sccb->entries[i])
  494. continue;
  495. assigned++;
  496. insert_increment(sccb->entries[i] >> 16, 0, 1);
  497. }
  498. break;
  499. case 0x0310:
  500. break;
  501. case 0x0410:
  502. for (i = 0; i < sccb->assigned; i++) {
  503. if (!sccb->entries[i])
  504. continue;
  505. assigned++;
  506. insert_increment(sccb->entries[i] >> 16, 1, 1);
  507. }
  508. break;
  509. default:
  510. rc = -EIO;
  511. break;
  512. }
  513. if (!rc)
  514. sclp_max_storage_id = sccb->max_id;
  515. }
  516. if (rc || list_empty(&sclp_mem_list))
  517. goto out;
  518. for (i = 1; i <= rnmax - assigned; i++)
  519. insert_increment(0, 1, 0);
  520. rc = register_memory_notifier(&sclp_mem_nb);
  521. if (rc)
  522. goto out;
  523. sclp_add_standby_memory();
  524. out:
  525. free_page((unsigned long) sccb);
  526. return rc;
  527. }
  528. __initcall(sclp_detect_standby_memory);
  529. #endif /* CONFIG_MEMORY_HOTPLUG */
  530. /*
  531. * Channel path configuration related functions.
  532. */
  533. #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
  534. #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
  535. #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
  536. struct chp_cfg_sccb {
  537. struct sccb_header header;
  538. u8 ccm;
  539. u8 reserved[6];
  540. u8 cssid;
  541. } __attribute__((packed));
  542. static int do_chp_configure(sclp_cmdw_t cmd)
  543. {
  544. struct chp_cfg_sccb *sccb;
  545. int rc;
  546. if (!SCLP_HAS_CHP_RECONFIG)
  547. return -EOPNOTSUPP;
  548. /* Prepare sccb. */
  549. sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  550. if (!sccb)
  551. return -ENOMEM;
  552. sccb->header.length = sizeof(*sccb);
  553. rc = do_sync_request(cmd, sccb);
  554. if (rc)
  555. goto out;
  556. switch (sccb->header.response_code) {
  557. case 0x0020:
  558. case 0x0120:
  559. case 0x0440:
  560. case 0x0450:
  561. break;
  562. default:
  563. pr_warning("configure channel-path failed "
  564. "(cmd=0x%08x, response=0x%04x)\n", cmd,
  565. sccb->header.response_code);
  566. rc = -EIO;
  567. break;
  568. }
  569. out:
  570. free_page((unsigned long) sccb);
  571. return rc;
  572. }
  573. /**
  574. * sclp_chp_configure - perform configure channel-path sclp command
  575. * @chpid: channel-path ID
  576. *
  577. * Perform configure channel-path command sclp command for specified chpid.
  578. * Return 0 after command successfully finished, non-zero otherwise.
  579. */
  580. int sclp_chp_configure(struct chp_id chpid)
  581. {
  582. return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
  583. }
  584. /**
  585. * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
  586. * @chpid: channel-path ID
  587. *
  588. * Perform deconfigure channel-path command sclp command for specified chpid
  589. * and wait for completion. On success return 0. Return non-zero otherwise.
  590. */
  591. int sclp_chp_deconfigure(struct chp_id chpid)
  592. {
  593. return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
  594. }
  595. struct chp_info_sccb {
  596. struct sccb_header header;
  597. u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
  598. u8 standby[SCLP_CHP_INFO_MASK_SIZE];
  599. u8 configured[SCLP_CHP_INFO_MASK_SIZE];
  600. u8 ccm;
  601. u8 reserved[6];
  602. u8 cssid;
  603. } __attribute__((packed));
  604. /**
  605. * sclp_chp_read_info - perform read channel-path information sclp command
  606. * @info: resulting channel-path information data
  607. *
  608. * Perform read channel-path information sclp command and wait for completion.
  609. * On success, store channel-path information in @info and return 0. Return
  610. * non-zero otherwise.
  611. */
  612. int sclp_chp_read_info(struct sclp_chp_info *info)
  613. {
  614. struct chp_info_sccb *sccb;
  615. int rc;
  616. if (!SCLP_HAS_CHP_INFO)
  617. return -EOPNOTSUPP;
  618. /* Prepare sccb. */
  619. sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  620. if (!sccb)
  621. return -ENOMEM;
  622. sccb->header.length = sizeof(*sccb);
  623. rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
  624. if (rc)
  625. goto out;
  626. if (sccb->header.response_code != 0x0010) {
  627. pr_warning("read channel-path info failed "
  628. "(response=0x%04x)\n", sccb->header.response_code);
  629. rc = -EIO;
  630. goto out;
  631. }
  632. memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
  633. memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
  634. memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
  635. out:
  636. free_page((unsigned long) sccb);
  637. return rc;
  638. }