sclp_cmd.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * Copyright IBM Corp. 2007, 2009
  3. *
  4. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  5. * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "sclp_cmd"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/completion.h>
  10. #include <linux/init.h>
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/mm.h>
  16. #include <linux/mmzone.h>
  17. #include <linux/memory.h>
  18. #include <linux/module.h>
  19. #include <linux/platform_device.h>
  20. #include <asm/chpid.h>
  21. #include <asm/sclp.h>
  22. #include <asm/setup.h>
  23. #include <asm/ctl_reg.h>
  24. #include "sclp.h"
  25. #define SCLP_CMDW_READ_SCP_INFO 0x00020001
  26. #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
  27. struct read_info_sccb {
  28. struct sccb_header header; /* 0-7 */
  29. u16 rnmax; /* 8-9 */
  30. u8 rnsize; /* 10 */
  31. u8 _reserved0[24 - 11]; /* 11-15 */
  32. u8 loadparm[8]; /* 24-31 */
  33. u8 _reserved1[48 - 32]; /* 32-47 */
  34. u64 facilities; /* 48-55 */
  35. u8 _reserved2[84 - 56]; /* 56-83 */
  36. u8 fac84; /* 84 */
  37. u8 fac85; /* 85 */
  38. u8 _reserved3[91 - 86]; /* 86-90 */
  39. u8 flags; /* 91 */
  40. u8 _reserved4[100 - 92]; /* 92-99 */
  41. u32 rnsize2; /* 100-103 */
  42. u64 rnmax2; /* 104-111 */
  43. u8 _reserved5[4096 - 112]; /* 112-4095 */
  44. } __attribute__((packed, aligned(PAGE_SIZE)));
  45. static struct read_info_sccb __initdata early_read_info_sccb;
  46. static int __initdata early_read_info_sccb_valid;
  47. u64 sclp_facilities;
  48. static u8 sclp_fac84;
  49. static u8 sclp_fac85;
  50. static unsigned long long rzm;
  51. static unsigned long long rnmax;
  52. static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
  53. {
  54. int rc;
  55. __ctl_set_bit(0, 9);
  56. rc = sclp_service_call(cmd, sccb);
  57. if (rc)
  58. goto out;
  59. __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
  60. PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
  61. local_irq_disable();
  62. out:
  63. /* Contents of the sccb might have changed. */
  64. barrier();
  65. __ctl_clear_bit(0, 9);
  66. return rc;
  67. }
  68. static void __init sclp_read_info_early(void)
  69. {
  70. int rc;
  71. int i;
  72. struct read_info_sccb *sccb;
  73. sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
  74. SCLP_CMDW_READ_SCP_INFO};
  75. sccb = &early_read_info_sccb;
  76. for (i = 0; i < ARRAY_SIZE(commands); i++) {
  77. do {
  78. memset(sccb, 0, sizeof(*sccb));
  79. sccb->header.length = sizeof(*sccb);
  80. sccb->header.function_code = 0x80;
  81. sccb->header.control_mask[2] = 0x80;
  82. rc = sclp_cmd_sync_early(commands[i], sccb);
  83. } while (rc == -EBUSY);
  84. if (rc)
  85. break;
  86. if (sccb->header.response_code == 0x10) {
  87. early_read_info_sccb_valid = 1;
  88. break;
  89. }
  90. if (sccb->header.response_code != 0x1f0)
  91. break;
  92. }
  93. }
  94. void __init sclp_facilities_detect(void)
  95. {
  96. struct read_info_sccb *sccb;
  97. sclp_read_info_early();
  98. if (!early_read_info_sccb_valid)
  99. return;
  100. sccb = &early_read_info_sccb;
  101. sclp_facilities = sccb->facilities;
  102. sclp_fac84 = sccb->fac84;
  103. sclp_fac85 = sccb->fac85;
  104. rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
  105. rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
  106. rzm <<= 20;
  107. }
  108. unsigned long long sclp_get_rnmax(void)
  109. {
  110. return rnmax;
  111. }
  112. unsigned long long sclp_get_rzm(void)
  113. {
  114. return rzm;
  115. }
  116. u8 sclp_get_fac85(void)
  117. {
  118. return sclp_fac85;
  119. }
  120. EXPORT_SYMBOL_GPL(sclp_get_fac85);
  121. /*
  122. * This function will be called after sclp_facilities_detect(), which gets
  123. * called from early.c code. Therefore the sccb should have valid contents.
  124. */
  125. void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
  126. {
  127. struct read_info_sccb *sccb;
  128. if (!early_read_info_sccb_valid)
  129. return;
  130. sccb = &early_read_info_sccb;
  131. info->is_valid = 1;
  132. if (sccb->flags & 0x2)
  133. info->has_dump = 1;
  134. memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
  135. }
  136. static void sclp_sync_callback(struct sclp_req *req, void *data)
  137. {
  138. struct completion *completion = data;
  139. complete(completion);
  140. }
  141. static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
  142. {
  143. struct completion completion;
  144. struct sclp_req *request;
  145. int rc;
  146. request = kzalloc(sizeof(*request), GFP_KERNEL);
  147. if (!request)
  148. return -ENOMEM;
  149. request->command = cmd;
  150. request->sccb = sccb;
  151. request->status = SCLP_REQ_FILLED;
  152. request->callback = sclp_sync_callback;
  153. request->callback_data = &completion;
  154. init_completion(&completion);
  155. /* Perform sclp request. */
  156. rc = sclp_add_request(request);
  157. if (rc)
  158. goto out;
  159. wait_for_completion(&completion);
  160. /* Check response. */
  161. if (request->status != SCLP_REQ_DONE) {
  162. pr_warning("sync request failed (cmd=0x%08x, "
  163. "status=0x%02x)\n", cmd, request->status);
  164. rc = -EIO;
  165. }
  166. out:
  167. kfree(request);
  168. return rc;
  169. }
  170. /*
  171. * CPU configuration related functions.
  172. */
  173. #define SCLP_CMDW_READ_CPU_INFO 0x00010001
  174. #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
  175. #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
  176. struct read_cpu_info_sccb {
  177. struct sccb_header header;
  178. u16 nr_configured;
  179. u16 offset_configured;
  180. u16 nr_standby;
  181. u16 offset_standby;
  182. u8 reserved[4096 - 16];
  183. } __attribute__((packed, aligned(PAGE_SIZE)));
  184. static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
  185. struct read_cpu_info_sccb *sccb)
  186. {
  187. char *page = (char *) sccb;
  188. memset(info, 0, sizeof(*info));
  189. info->configured = sccb->nr_configured;
  190. info->standby = sccb->nr_standby;
  191. info->combined = sccb->nr_configured + sccb->nr_standby;
  192. info->has_cpu_type = sclp_fac84 & 0x1;
  193. memcpy(&info->cpu, page + sccb->offset_configured,
  194. info->combined * sizeof(struct sclp_cpu_entry));
  195. }
  196. int sclp_get_cpu_info(struct sclp_cpu_info *info)
  197. {
  198. int rc;
  199. struct read_cpu_info_sccb *sccb;
  200. if (!SCLP_HAS_CPU_INFO)
  201. return -EOPNOTSUPP;
  202. sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  203. if (!sccb)
  204. return -ENOMEM;
  205. sccb->header.length = sizeof(*sccb);
  206. rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
  207. if (rc)
  208. goto out;
  209. if (sccb->header.response_code != 0x0010) {
  210. pr_warning("readcpuinfo failed (response=0x%04x)\n",
  211. sccb->header.response_code);
  212. rc = -EIO;
  213. goto out;
  214. }
  215. sclp_fill_cpu_info(info, sccb);
  216. out:
  217. free_page((unsigned long) sccb);
  218. return rc;
  219. }
  220. struct cpu_configure_sccb {
  221. struct sccb_header header;
  222. } __attribute__((packed, aligned(8)));
  223. static int do_cpu_configure(sclp_cmdw_t cmd)
  224. {
  225. struct cpu_configure_sccb *sccb;
  226. int rc;
  227. if (!SCLP_HAS_CPU_RECONFIG)
  228. return -EOPNOTSUPP;
  229. /*
  230. * This is not going to cross a page boundary since we force
  231. * kmalloc to have a minimum alignment of 8 bytes on s390.
  232. */
  233. sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
  234. if (!sccb)
  235. return -ENOMEM;
  236. sccb->header.length = sizeof(*sccb);
  237. rc = do_sync_request(cmd, sccb);
  238. if (rc)
  239. goto out;
  240. switch (sccb->header.response_code) {
  241. case 0x0020:
  242. case 0x0120:
  243. break;
  244. default:
  245. pr_warning("configure cpu failed (cmd=0x%08x, "
  246. "response=0x%04x)\n", cmd,
  247. sccb->header.response_code);
  248. rc = -EIO;
  249. break;
  250. }
  251. out:
  252. kfree(sccb);
  253. return rc;
  254. }
  255. int sclp_cpu_configure(u8 cpu)
  256. {
  257. return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
  258. }
  259. int sclp_cpu_deconfigure(u8 cpu)
  260. {
  261. return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
  262. }
  263. #ifdef CONFIG_MEMORY_HOTPLUG
  264. static DEFINE_MUTEX(sclp_mem_mutex);
  265. static LIST_HEAD(sclp_mem_list);
  266. static u8 sclp_max_storage_id;
  267. static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
  268. static int sclp_mem_state_changed;
  269. struct memory_increment {
  270. struct list_head list;
  271. u16 rn;
  272. int standby;
  273. int usecount;
  274. };
  275. struct assign_storage_sccb {
  276. struct sccb_header header;
  277. u16 rn;
  278. } __packed;
  279. int arch_get_memory_phys_device(unsigned long start_pfn)
  280. {
  281. if (!rzm)
  282. return 0;
  283. return PFN_PHYS(start_pfn) >> ilog2(rzm);
  284. }
  285. static unsigned long long rn2addr(u16 rn)
  286. {
  287. return (unsigned long long) (rn - 1) * rzm;
  288. }
  289. static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
  290. {
  291. struct assign_storage_sccb *sccb;
  292. int rc;
  293. sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  294. if (!sccb)
  295. return -ENOMEM;
  296. sccb->header.length = PAGE_SIZE;
  297. sccb->rn = rn;
  298. rc = do_sync_request(cmd, sccb);
  299. if (rc)
  300. goto out;
  301. switch (sccb->header.response_code) {
  302. case 0x0020:
  303. case 0x0120:
  304. break;
  305. default:
  306. pr_warning("assign storage failed (cmd=0x%08x, "
  307. "response=0x%04x, rn=0x%04x)\n", cmd,
  308. sccb->header.response_code, rn);
  309. rc = -EIO;
  310. break;
  311. }
  312. out:
  313. free_page((unsigned long) sccb);
  314. return rc;
  315. }
  316. static int sclp_assign_storage(u16 rn)
  317. {
  318. unsigned long long start, address;
  319. int rc;
  320. rc = do_assign_storage(0x000d0001, rn);
  321. if (rc)
  322. goto out;
  323. start = address = rn2addr(rn);
  324. for (; address < start + rzm; address += PAGE_SIZE)
  325. page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
  326. out:
  327. return rc;
  328. }
  329. static int sclp_unassign_storage(u16 rn)
  330. {
  331. return do_assign_storage(0x000c0001, rn);
  332. }
  333. struct attach_storage_sccb {
  334. struct sccb_header header;
  335. u16 :16;
  336. u16 assigned;
  337. u32 :32;
  338. u32 entries[0];
  339. } __packed;
  340. static int sclp_attach_storage(u8 id)
  341. {
  342. struct attach_storage_sccb *sccb;
  343. int rc;
  344. int i;
  345. sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  346. if (!sccb)
  347. return -ENOMEM;
  348. sccb->header.length = PAGE_SIZE;
  349. rc = do_sync_request(0x00080001 | id << 8, sccb);
  350. if (rc)
  351. goto out;
  352. switch (sccb->header.response_code) {
  353. case 0x0020:
  354. set_bit(id, sclp_storage_ids);
  355. for (i = 0; i < sccb->assigned; i++) {
  356. if (sccb->entries[i])
  357. sclp_unassign_storage(sccb->entries[i] >> 16);
  358. }
  359. break;
  360. default:
  361. rc = -EIO;
  362. break;
  363. }
  364. out:
  365. free_page((unsigned long) sccb);
  366. return rc;
  367. }
  368. static int sclp_mem_change_state(unsigned long start, unsigned long size,
  369. int online)
  370. {
  371. struct memory_increment *incr;
  372. unsigned long long istart;
  373. int rc = 0;
  374. list_for_each_entry(incr, &sclp_mem_list, list) {
  375. istart = rn2addr(incr->rn);
  376. if (start + size - 1 < istart)
  377. break;
  378. if (start > istart + rzm - 1)
  379. continue;
  380. if (online) {
  381. if (incr->usecount++)
  382. continue;
  383. /*
  384. * Don't break the loop if one assign fails. Loop may
  385. * be walked again on CANCEL and we can't save
  386. * information if state changed before or not.
  387. * So continue and increase usecount for all increments.
  388. */
  389. rc |= sclp_assign_storage(incr->rn);
  390. } else {
  391. if (--incr->usecount)
  392. continue;
  393. sclp_unassign_storage(incr->rn);
  394. }
  395. }
  396. return rc ? -EIO : 0;
  397. }
  398. static int sclp_mem_notifier(struct notifier_block *nb,
  399. unsigned long action, void *data)
  400. {
  401. unsigned long start, size;
  402. struct memory_notify *arg;
  403. unsigned char id;
  404. int rc = 0;
  405. arg = data;
  406. start = arg->start_pfn << PAGE_SHIFT;
  407. size = arg->nr_pages << PAGE_SHIFT;
  408. mutex_lock(&sclp_mem_mutex);
  409. for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
  410. sclp_attach_storage(id);
  411. switch (action) {
  412. case MEM_ONLINE:
  413. case MEM_GOING_OFFLINE:
  414. case MEM_CANCEL_OFFLINE:
  415. break;
  416. case MEM_GOING_ONLINE:
  417. rc = sclp_mem_change_state(start, size, 1);
  418. break;
  419. case MEM_CANCEL_ONLINE:
  420. sclp_mem_change_state(start, size, 0);
  421. break;
  422. case MEM_OFFLINE:
  423. sclp_mem_change_state(start, size, 0);
  424. break;
  425. default:
  426. rc = -EINVAL;
  427. break;
  428. }
  429. if (!rc)
  430. sclp_mem_state_changed = 1;
  431. mutex_unlock(&sclp_mem_mutex);
  432. return rc ? NOTIFY_BAD : NOTIFY_OK;
  433. }
  434. static struct notifier_block sclp_mem_nb = {
  435. .notifier_call = sclp_mem_notifier,
  436. };
  437. static void __init add_memory_merged(u16 rn)
  438. {
  439. static u16 first_rn, num;
  440. unsigned long long start, size;
  441. if (rn && first_rn && (first_rn + num == rn)) {
  442. num++;
  443. return;
  444. }
  445. if (!first_rn)
  446. goto skip_add;
  447. start = rn2addr(first_rn);
  448. size = (unsigned long long ) num * rzm;
  449. if (start >= VMEM_MAX_PHYS)
  450. goto skip_add;
  451. if (start + size > VMEM_MAX_PHYS)
  452. size = VMEM_MAX_PHYS - start;
  453. if (memory_end_set && (start >= memory_end))
  454. goto skip_add;
  455. if (memory_end_set && (start + size > memory_end))
  456. size = memory_end - start;
  457. add_memory(0, start, size);
  458. skip_add:
  459. first_rn = rn;
  460. num = 1;
  461. }
  462. static void __init sclp_add_standby_memory(void)
  463. {
  464. struct memory_increment *incr;
  465. list_for_each_entry(incr, &sclp_mem_list, list)
  466. if (incr->standby)
  467. add_memory_merged(incr->rn);
  468. add_memory_merged(0);
  469. }
  470. static void __init insert_increment(u16 rn, int standby, int assigned)
  471. {
  472. struct memory_increment *incr, *new_incr;
  473. struct list_head *prev;
  474. u16 last_rn;
  475. new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
  476. if (!new_incr)
  477. return;
  478. new_incr->rn = rn;
  479. new_incr->standby = standby;
  480. if (!standby)
  481. new_incr->usecount = 1;
  482. last_rn = 0;
  483. prev = &sclp_mem_list;
  484. list_for_each_entry(incr, &sclp_mem_list, list) {
  485. if (assigned && incr->rn > rn)
  486. break;
  487. if (!assigned && incr->rn - last_rn > 1)
  488. break;
  489. last_rn = incr->rn;
  490. prev = &incr->list;
  491. }
  492. if (!assigned)
  493. new_incr->rn = last_rn + 1;
  494. if (new_incr->rn > rnmax) {
  495. kfree(new_incr);
  496. return;
  497. }
  498. list_add(&new_incr->list, prev);
  499. }
  500. static int sclp_mem_freeze(struct device *dev)
  501. {
  502. if (!sclp_mem_state_changed)
  503. return 0;
  504. pr_err("Memory hotplug state changed, suspend refused.\n");
  505. return -EPERM;
  506. }
  507. struct read_storage_sccb {
  508. struct sccb_header header;
  509. u16 max_id;
  510. u16 assigned;
  511. u16 standby;
  512. u16 :16;
  513. u32 entries[0];
  514. } __packed;
  515. static const struct dev_pm_ops sclp_mem_pm_ops = {
  516. .freeze = sclp_mem_freeze,
  517. };
  518. static struct platform_driver sclp_mem_pdrv = {
  519. .driver = {
  520. .name = "sclp_mem",
  521. .pm = &sclp_mem_pm_ops,
  522. },
  523. };
  524. static int __init sclp_detect_standby_memory(void)
  525. {
  526. struct platform_device *sclp_pdev;
  527. struct read_storage_sccb *sccb;
  528. int i, id, assigned, rc;
  529. if (!early_read_info_sccb_valid)
  530. return 0;
  531. if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
  532. return 0;
  533. rc = -ENOMEM;
  534. sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
  535. if (!sccb)
  536. goto out;
  537. assigned = 0;
  538. for (id = 0; id <= sclp_max_storage_id; id++) {
  539. memset(sccb, 0, PAGE_SIZE);
  540. sccb->header.length = PAGE_SIZE;
  541. rc = do_sync_request(0x00040001 | id << 8, sccb);
  542. if (rc)
  543. goto out;
  544. switch (sccb->header.response_code) {
  545. case 0x0010:
  546. set_bit(id, sclp_storage_ids);
  547. for (i = 0; i < sccb->assigned; i++) {
  548. if (!sccb->entries[i])
  549. continue;
  550. assigned++;
  551. insert_increment(sccb->entries[i] >> 16, 0, 1);
  552. }
  553. break;
  554. case 0x0310:
  555. break;
  556. case 0x0410:
  557. for (i = 0; i < sccb->assigned; i++) {
  558. if (!sccb->entries[i])
  559. continue;
  560. assigned++;
  561. insert_increment(sccb->entries[i] >> 16, 1, 1);
  562. }
  563. break;
  564. default:
  565. rc = -EIO;
  566. break;
  567. }
  568. if (!rc)
  569. sclp_max_storage_id = sccb->max_id;
  570. }
  571. if (rc || list_empty(&sclp_mem_list))
  572. goto out;
  573. for (i = 1; i <= rnmax - assigned; i++)
  574. insert_increment(0, 1, 0);
  575. rc = register_memory_notifier(&sclp_mem_nb);
  576. if (rc)
  577. goto out;
  578. rc = platform_driver_register(&sclp_mem_pdrv);
  579. if (rc)
  580. goto out;
  581. sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
  582. rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
  583. if (rc)
  584. goto out_driver;
  585. sclp_add_standby_memory();
  586. goto out;
  587. out_driver:
  588. platform_driver_unregister(&sclp_mem_pdrv);
  589. out:
  590. free_page((unsigned long) sccb);
  591. return rc;
  592. }
  593. __initcall(sclp_detect_standby_memory);
  594. #endif /* CONFIG_MEMORY_HOTPLUG */
  595. /*
  596. * Channel path configuration related functions.
  597. */
  598. #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
  599. #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
  600. #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
  601. struct chp_cfg_sccb {
  602. struct sccb_header header;
  603. u8 ccm;
  604. u8 reserved[6];
  605. u8 cssid;
  606. } __attribute__((packed));
  607. static int do_chp_configure(sclp_cmdw_t cmd)
  608. {
  609. struct chp_cfg_sccb *sccb;
  610. int rc;
  611. if (!SCLP_HAS_CHP_RECONFIG)
  612. return -EOPNOTSUPP;
  613. /* Prepare sccb. */
  614. sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  615. if (!sccb)
  616. return -ENOMEM;
  617. sccb->header.length = sizeof(*sccb);
  618. rc = do_sync_request(cmd, sccb);
  619. if (rc)
  620. goto out;
  621. switch (sccb->header.response_code) {
  622. case 0x0020:
  623. case 0x0120:
  624. case 0x0440:
  625. case 0x0450:
  626. break;
  627. default:
  628. pr_warning("configure channel-path failed "
  629. "(cmd=0x%08x, response=0x%04x)\n", cmd,
  630. sccb->header.response_code);
  631. rc = -EIO;
  632. break;
  633. }
  634. out:
  635. free_page((unsigned long) sccb);
  636. return rc;
  637. }
  638. /**
  639. * sclp_chp_configure - perform configure channel-path sclp command
  640. * @chpid: channel-path ID
  641. *
  642. * Perform configure channel-path command sclp command for specified chpid.
  643. * Return 0 after command successfully finished, non-zero otherwise.
  644. */
  645. int sclp_chp_configure(struct chp_id chpid)
  646. {
  647. return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
  648. }
  649. /**
  650. * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
  651. * @chpid: channel-path ID
  652. *
  653. * Perform deconfigure channel-path command sclp command for specified chpid
  654. * and wait for completion. On success return 0. Return non-zero otherwise.
  655. */
  656. int sclp_chp_deconfigure(struct chp_id chpid)
  657. {
  658. return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
  659. }
  660. struct chp_info_sccb {
  661. struct sccb_header header;
  662. u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
  663. u8 standby[SCLP_CHP_INFO_MASK_SIZE];
  664. u8 configured[SCLP_CHP_INFO_MASK_SIZE];
  665. u8 ccm;
  666. u8 reserved[6];
  667. u8 cssid;
  668. } __attribute__((packed));
  669. /**
  670. * sclp_chp_read_info - perform read channel-path information sclp command
  671. * @info: resulting channel-path information data
  672. *
  673. * Perform read channel-path information sclp command and wait for completion.
  674. * On success, store channel-path information in @info and return 0. Return
  675. * non-zero otherwise.
  676. */
  677. int sclp_chp_read_info(struct sclp_chp_info *info)
  678. {
  679. struct chp_info_sccb *sccb;
  680. int rc;
  681. if (!SCLP_HAS_CHP_INFO)
  682. return -EOPNOTSUPP;
  683. /* Prepare sccb. */
  684. sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
  685. if (!sccb)
  686. return -ENOMEM;
  687. sccb->header.length = sizeof(*sccb);
  688. rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
  689. if (rc)
  690. goto out;
  691. if (sccb->header.response_code != 0x0010) {
  692. pr_warning("read channel-path info failed "
  693. "(response=0x%04x)\n", sccb->header.response_code);
  694. rc = -EIO;
  695. goto out;
  696. }
  697. memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
  698. memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
  699. memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
  700. out:
  701. free_page((unsigned long) sccb);
  702. return rc;
  703. }