sclp_early.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * SCLP early driver
  3. *
  4. * Copyright IBM Corp. 2013
  5. */
  6. #define KMSG_COMPONENT "sclp_early"
  7. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  8. #include <asm/ctl_reg.h>
  9. #include <asm/sclp.h>
  10. #include <asm/ipl.h>
  11. #include "sclp_sdias.h"
  12. #include "sclp.h"
  13. #define SCLP_CMDW_READ_SCP_INFO 0x00020001
  14. #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
  15. struct read_info_sccb {
  16. struct sccb_header header; /* 0-7 */
  17. u16 rnmax; /* 8-9 */
  18. u8 rnsize; /* 10 */
  19. u8 _reserved0[24 - 11]; /* 11-15 */
  20. u8 loadparm[8]; /* 24-31 */
  21. u8 _reserved1[48 - 32]; /* 32-47 */
  22. u64 facilities; /* 48-55 */
  23. u8 _reserved2[84 - 56]; /* 56-83 */
  24. u8 fac84; /* 84 */
  25. u8 fac85; /* 85 */
  26. u8 _reserved3[91 - 86]; /* 86-90 */
  27. u8 flags; /* 91 */
  28. u8 _reserved4[100 - 92]; /* 92-99 */
  29. u32 rnsize2; /* 100-103 */
  30. u64 rnmax2; /* 104-111 */
  31. u8 _reserved5[4096 - 112]; /* 112-4095 */
  32. } __packed __aligned(PAGE_SIZE);
  33. static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE);
  34. static __initdata struct read_info_sccb early_read_info_sccb;
  35. static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
  36. static unsigned long sclp_hsa_size;
  37. __initdata int sclp_early_read_info_sccb_valid;
  38. u64 sclp_facilities;
  39. u8 sclp_fac84;
  40. unsigned long long sclp_rzm;
  41. unsigned long long sclp_rnmax;
  42. static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
  43. {
  44. int rc;
  45. __ctl_set_bit(0, 9);
  46. rc = sclp_service_call(cmd, sccb);
  47. if (rc)
  48. goto out;
  49. __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
  50. PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
  51. local_irq_disable();
  52. out:
  53. /* Contents of the sccb might have changed. */
  54. barrier();
  55. __ctl_clear_bit(0, 9);
  56. return rc;
  57. }
  58. static void __init sclp_read_info_early(void)
  59. {
  60. int rc;
  61. int i;
  62. struct read_info_sccb *sccb;
  63. sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
  64. SCLP_CMDW_READ_SCP_INFO};
  65. sccb = &early_read_info_sccb;
  66. for (i = 0; i < ARRAY_SIZE(commands); i++) {
  67. do {
  68. memset(sccb, 0, sizeof(*sccb));
  69. sccb->header.length = sizeof(*sccb);
  70. sccb->header.function_code = 0x80;
  71. sccb->header.control_mask[2] = 0x80;
  72. rc = sclp_cmd_sync_early(commands[i], sccb);
  73. } while (rc == -EBUSY);
  74. if (rc)
  75. break;
  76. if (sccb->header.response_code == 0x10) {
  77. sclp_early_read_info_sccb_valid = 1;
  78. break;
  79. }
  80. if (sccb->header.response_code != 0x1f0)
  81. break;
  82. }
  83. }
  84. static void __init sclp_facilities_detect(void)
  85. {
  86. struct read_info_sccb *sccb;
  87. sclp_read_info_early();
  88. if (!sclp_early_read_info_sccb_valid)
  89. return;
  90. sccb = &early_read_info_sccb;
  91. sclp_facilities = sccb->facilities;
  92. sclp_fac84 = sccb->fac84;
  93. if (sccb->fac85 & 0x02)
  94. S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
  95. sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
  96. sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
  97. sclp_rzm <<= 20;
  98. }
  99. bool __init sclp_has_linemode(void)
  100. {
  101. struct init_sccb *sccb = &early_event_mask_sccb;
  102. if (sccb->header.response_code != 0x20)
  103. return 0;
  104. if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
  105. return 0;
  106. if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
  107. return 0;
  108. return 1;
  109. }
  110. bool __init sclp_has_vt220(void)
  111. {
  112. struct init_sccb *sccb = &early_event_mask_sccb;
  113. if (sccb->header.response_code != 0x20)
  114. return 0;
  115. if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
  116. return 1;
  117. return 0;
  118. }
  119. unsigned long long sclp_get_rnmax(void)
  120. {
  121. return sclp_rnmax;
  122. }
  123. unsigned long long sclp_get_rzm(void)
  124. {
  125. return sclp_rzm;
  126. }
  127. /*
  128. * This function will be called after sclp_facilities_detect(), which gets
  129. * called from early.c code. Therefore the sccb should have valid contents.
  130. */
  131. void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
  132. {
  133. struct read_info_sccb *sccb;
  134. if (!sclp_early_read_info_sccb_valid)
  135. return;
  136. sccb = &early_read_info_sccb;
  137. info->is_valid = 1;
  138. if (sccb->flags & 0x2)
  139. info->has_dump = 1;
  140. memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
  141. }
  142. static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
  143. {
  144. int rc;
  145. do {
  146. rc = sclp_cmd_sync_early(cmd, sccb);
  147. } while (rc == -EBUSY);
  148. if (rc)
  149. return -EIO;
  150. if (((struct sccb_header *) sccb)->response_code != 0x0020)
  151. return -EIO;
  152. return 0;
  153. }
  154. static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
  155. {
  156. memset(sccb, 0, sizeof(*sccb));
  157. sccb->hdr.length = sizeof(*sccb);
  158. sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
  159. sccb->evbuf.hdr.type = EVTYP_SDIAS;
  160. sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
  161. sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
  162. sccb->evbuf.event_id = 4712;
  163. sccb->evbuf.dbs = 1;
  164. }
  165. static int __init sclp_set_event_mask(unsigned long receive_mask,
  166. unsigned long send_mask)
  167. {
  168. struct init_sccb *sccb = (void *) &sccb_early;
  169. memset(sccb, 0, sizeof(*sccb));
  170. sccb->header.length = sizeof(*sccb);
  171. sccb->mask_length = sizeof(sccb_mask_t);
  172. sccb->receive_mask = receive_mask;
  173. sccb->send_mask = send_mask;
  174. return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
  175. }
  176. static long __init sclp_hsa_size_init(void)
  177. {
  178. struct sdias_sccb *sccb = (void *) &sccb_early;
  179. sccb_init_eq_size(sccb);
  180. if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
  181. return -EIO;
  182. if (sccb->evbuf.blk_cnt != 0)
  183. return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
  184. return 0;
  185. }
  186. static long __init sclp_hsa_copy_wait(void)
  187. {
  188. struct sccb_header *sccb = (void *) &sccb_early;
  189. memset(sccb, 0, PAGE_SIZE);
  190. sccb->length = PAGE_SIZE;
  191. if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
  192. return -EIO;
  193. return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
  194. }
  195. unsigned long sclp_get_hsa_size(void)
  196. {
  197. return sclp_hsa_size;
  198. }
  199. static void __init sclp_hsa_size_detect(void)
  200. {
  201. long size;
  202. /* First try synchronous interface (LPAR) */
  203. if (sclp_set_event_mask(0, 0x40000010))
  204. return;
  205. size = sclp_hsa_size_init();
  206. if (size < 0)
  207. return;
  208. if (size != 0)
  209. goto out;
  210. /* Then try asynchronous interface (z/VM) */
  211. if (sclp_set_event_mask(0x00000010, 0x40000010))
  212. return;
  213. size = sclp_hsa_size_init();
  214. if (size < 0)
  215. return;
  216. size = sclp_hsa_copy_wait();
  217. if (size < 0)
  218. return;
  219. out:
  220. sclp_hsa_size = size;
  221. }
  222. void __init sclp_early_detect(void)
  223. {
  224. sclp_facilities_detect();
  225. sclp_hsa_size_detect();
  226. sclp_set_event_mask(0, 0);
  227. }