sclp.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. /*
  2. * drivers/s390/char/sclp.c
  3. * core function to access sclp interface
  4. *
  5. * S390 version
  6. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  7. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  8. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  9. */
  10. #include <linux/module.h>
  11. #include <linux/err.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/timer.h>
  15. #include <linux/reboot.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/init.h>
  18. #include <asm/types.h>
  19. #include <asm/s390_ext.h>
  20. #include "sclp.h"
  21. #define SCLP_HEADER "sclp: "
  22. /* Structure for register_early_external_interrupt. */
  23. static ext_int_info_t ext_int_info_hwc;
  24. /* Lock to protect internal data consistency. */
  25. static DEFINE_SPINLOCK(sclp_lock);
  26. /* Mask of events that we can receive from the sclp interface. */
  27. static sccb_mask_t sclp_receive_mask;
  28. /* Mask of events that we can send to the sclp interface. */
  29. static sccb_mask_t sclp_send_mask;
  30. /* List of registered event listeners and senders. */
  31. static struct list_head sclp_reg_list;
  32. /* List of queued requests. */
  33. static struct list_head sclp_req_queue;
  34. /* Data for read and and init requests. */
  35. static struct sclp_req sclp_read_req;
  36. static struct sclp_req sclp_init_req;
  37. static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  38. static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  39. /* Timer for request retries. */
  40. static struct timer_list sclp_request_timer;
  41. /* Internal state: is the driver initialized? */
  42. static volatile enum sclp_init_state_t {
  43. sclp_init_state_uninitialized,
  44. sclp_init_state_initializing,
  45. sclp_init_state_initialized
  46. } sclp_init_state = sclp_init_state_uninitialized;
  47. /* Internal state: is a request active at the sclp? */
  48. static volatile enum sclp_running_state_t {
  49. sclp_running_state_idle,
  50. sclp_running_state_running,
  51. sclp_running_state_reset_pending
  52. } sclp_running_state = sclp_running_state_idle;
  53. /* Internal state: is a read request pending? */
  54. static volatile enum sclp_reading_state_t {
  55. sclp_reading_state_idle,
  56. sclp_reading_state_reading
  57. } sclp_reading_state = sclp_reading_state_idle;
  58. /* Internal state: is the driver currently serving requests? */
  59. static volatile enum sclp_activation_state_t {
  60. sclp_activation_state_active,
  61. sclp_activation_state_deactivating,
  62. sclp_activation_state_inactive,
  63. sclp_activation_state_activating
  64. } sclp_activation_state = sclp_activation_state_active;
  65. /* Internal state: is an init mask request pending? */
  66. static volatile enum sclp_mask_state_t {
  67. sclp_mask_state_idle,
  68. sclp_mask_state_initializing
  69. } sclp_mask_state = sclp_mask_state_idle;
  70. /* Maximum retry counts */
  71. #define SCLP_INIT_RETRY 3
  72. #define SCLP_MASK_RETRY 3
  73. /* Timeout intervals in seconds.*/
  74. #define SCLP_BUSY_INTERVAL 10
  75. #define SCLP_RETRY_INTERVAL 30
  76. static void sclp_process_queue(void);
  77. static int sclp_init_mask(int calculate);
  78. static int sclp_init(void);
  79. /* Perform service call. Return 0 on success, non-zero otherwise. */
  80. int
  81. sclp_service_call(sclp_cmdw_t command, void *sccb)
  82. {
  83. int cc;
  84. asm volatile(
  85. " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
  86. " ipm %0\n"
  87. " srl %0,28"
  88. : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
  89. : "cc", "memory");
  90. if (cc == 3)
  91. return -EIO;
  92. if (cc == 2)
  93. return -EBUSY;
  94. return 0;
  95. }
  96. static inline void __sclp_make_read_req(void);
  97. static void
  98. __sclp_queue_read_req(void)
  99. {
  100. if (sclp_reading_state == sclp_reading_state_idle) {
  101. sclp_reading_state = sclp_reading_state_reading;
  102. __sclp_make_read_req();
  103. /* Add request to head of queue */
  104. list_add(&sclp_read_req.list, &sclp_req_queue);
  105. }
  106. }
  107. /* Set up request retry timer. Called while sclp_lock is locked. */
  108. static inline void
  109. __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
  110. unsigned long data)
  111. {
  112. del_timer(&sclp_request_timer);
  113. sclp_request_timer.function = function;
  114. sclp_request_timer.data = data;
  115. sclp_request_timer.expires = jiffies + time;
  116. add_timer(&sclp_request_timer);
  117. }
  118. /* Request timeout handler. Restart the request queue. If DATA is non-zero,
  119. * force restart of running request. */
  120. static void
  121. sclp_request_timeout(unsigned long data)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&sclp_lock, flags);
  125. if (data) {
  126. if (sclp_running_state == sclp_running_state_running) {
  127. /* Break running state and queue NOP read event request
  128. * to get a defined interface state. */
  129. __sclp_queue_read_req();
  130. sclp_running_state = sclp_running_state_idle;
  131. }
  132. } else {
  133. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  134. sclp_request_timeout, 0);
  135. }
  136. spin_unlock_irqrestore(&sclp_lock, flags);
  137. sclp_process_queue();
  138. }
  139. /* Try to start a request. Return zero if the request was successfully
  140. * started or if it will be started at a later time. Return non-zero otherwise.
  141. * Called while sclp_lock is locked. */
  142. static int
  143. __sclp_start_request(struct sclp_req *req)
  144. {
  145. int rc;
  146. if (sclp_running_state != sclp_running_state_idle)
  147. return 0;
  148. del_timer(&sclp_request_timer);
  149. rc = sclp_service_call(req->command, req->sccb);
  150. req->start_count++;
  151. if (rc == 0) {
  152. /* Sucessfully started request */
  153. req->status = SCLP_REQ_RUNNING;
  154. sclp_running_state = sclp_running_state_running;
  155. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  156. sclp_request_timeout, 1);
  157. return 0;
  158. } else if (rc == -EBUSY) {
  159. /* Try again later */
  160. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  161. sclp_request_timeout, 0);
  162. return 0;
  163. }
  164. /* Request failed */
  165. req->status = SCLP_REQ_FAILED;
  166. return rc;
  167. }
  168. /* Try to start queued requests. */
  169. static void
  170. sclp_process_queue(void)
  171. {
  172. struct sclp_req *req;
  173. int rc;
  174. unsigned long flags;
  175. spin_lock_irqsave(&sclp_lock, flags);
  176. if (sclp_running_state != sclp_running_state_idle) {
  177. spin_unlock_irqrestore(&sclp_lock, flags);
  178. return;
  179. }
  180. del_timer(&sclp_request_timer);
  181. while (!list_empty(&sclp_req_queue)) {
  182. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  183. rc = __sclp_start_request(req);
  184. if (rc == 0)
  185. break;
  186. /* Request failed */
  187. if (req->start_count > 1) {
  188. /* Cannot abort already submitted request - could still
  189. * be active at the SCLP */
  190. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  191. sclp_request_timeout, 0);
  192. break;
  193. }
  194. /* Post-processing for aborted request */
  195. list_del(&req->list);
  196. if (req->callback) {
  197. spin_unlock_irqrestore(&sclp_lock, flags);
  198. req->callback(req, req->callback_data);
  199. spin_lock_irqsave(&sclp_lock, flags);
  200. }
  201. }
  202. spin_unlock_irqrestore(&sclp_lock, flags);
  203. }
  204. /* Queue a new request. Return zero on success, non-zero otherwise. */
  205. int
  206. sclp_add_request(struct sclp_req *req)
  207. {
  208. unsigned long flags;
  209. int rc;
  210. spin_lock_irqsave(&sclp_lock, flags);
  211. if ((sclp_init_state != sclp_init_state_initialized ||
  212. sclp_activation_state != sclp_activation_state_active) &&
  213. req != &sclp_init_req) {
  214. spin_unlock_irqrestore(&sclp_lock, flags);
  215. return -EIO;
  216. }
  217. req->status = SCLP_REQ_QUEUED;
  218. req->start_count = 0;
  219. list_add_tail(&req->list, &sclp_req_queue);
  220. rc = 0;
  221. /* Start if request is first in list */
  222. if (sclp_running_state == sclp_running_state_idle &&
  223. req->list.prev == &sclp_req_queue) {
  224. rc = __sclp_start_request(req);
  225. if (rc)
  226. list_del(&req->list);
  227. }
  228. spin_unlock_irqrestore(&sclp_lock, flags);
  229. return rc;
  230. }
  231. EXPORT_SYMBOL(sclp_add_request);
  232. /* Dispatch events found in request buffer to registered listeners. Return 0
  233. * if all events were dispatched, non-zero otherwise. */
  234. static int
  235. sclp_dispatch_evbufs(struct sccb_header *sccb)
  236. {
  237. unsigned long flags;
  238. struct evbuf_header *evbuf;
  239. struct list_head *l;
  240. struct sclp_register *reg;
  241. int offset;
  242. int rc;
  243. spin_lock_irqsave(&sclp_lock, flags);
  244. rc = 0;
  245. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  246. offset += evbuf->length) {
  247. /* Search for event handler */
  248. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  249. reg = NULL;
  250. list_for_each(l, &sclp_reg_list) {
  251. reg = list_entry(l, struct sclp_register, list);
  252. if (reg->receive_mask & (1 << (32 - evbuf->type)))
  253. break;
  254. else
  255. reg = NULL;
  256. }
  257. if (reg && reg->receiver_fn) {
  258. spin_unlock_irqrestore(&sclp_lock, flags);
  259. reg->receiver_fn(evbuf);
  260. spin_lock_irqsave(&sclp_lock, flags);
  261. } else if (reg == NULL)
  262. rc = -ENOSYS;
  263. }
  264. spin_unlock_irqrestore(&sclp_lock, flags);
  265. return rc;
  266. }
  267. /* Read event data request callback. */
  268. static void
  269. sclp_read_cb(struct sclp_req *req, void *data)
  270. {
  271. unsigned long flags;
  272. struct sccb_header *sccb;
  273. sccb = (struct sccb_header *) req->sccb;
  274. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  275. sccb->response_code == 0x220))
  276. sclp_dispatch_evbufs(sccb);
  277. spin_lock_irqsave(&sclp_lock, flags);
  278. sclp_reading_state = sclp_reading_state_idle;
  279. spin_unlock_irqrestore(&sclp_lock, flags);
  280. }
  281. /* Prepare read event data request. Called while sclp_lock is locked. */
  282. static inline void
  283. __sclp_make_read_req(void)
  284. {
  285. struct sccb_header *sccb;
  286. sccb = (struct sccb_header *) sclp_read_sccb;
  287. clear_page(sccb);
  288. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  289. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  290. sclp_read_req.status = SCLP_REQ_QUEUED;
  291. sclp_read_req.start_count = 0;
  292. sclp_read_req.callback = sclp_read_cb;
  293. sclp_read_req.sccb = sccb;
  294. sccb->length = PAGE_SIZE;
  295. sccb->function_code = 0;
  296. sccb->control_mask[2] = 0x80;
  297. }
  298. /* Search request list for request with matching sccb. Return request if found,
  299. * NULL otherwise. Called while sclp_lock is locked. */
  300. static inline struct sclp_req *
  301. __sclp_find_req(u32 sccb)
  302. {
  303. struct list_head *l;
  304. struct sclp_req *req;
  305. list_for_each(l, &sclp_req_queue) {
  306. req = list_entry(l, struct sclp_req, list);
  307. if (sccb == (u32) (addr_t) req->sccb)
  308. return req;
  309. }
  310. return NULL;
  311. }
  312. /* Handler for external interruption. Perform request post-processing.
  313. * Prepare read event data request if necessary. Start processing of next
  314. * request on queue. */
  315. static void
  316. sclp_interrupt_handler(__u16 code)
  317. {
  318. struct sclp_req *req;
  319. u32 finished_sccb;
  320. u32 evbuf_pending;
  321. spin_lock(&sclp_lock);
  322. finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
  323. evbuf_pending = S390_lowcore.ext_params & 0x3;
  324. if (finished_sccb) {
  325. del_timer(&sclp_request_timer);
  326. sclp_running_state = sclp_running_state_reset_pending;
  327. req = __sclp_find_req(finished_sccb);
  328. if (req) {
  329. /* Request post-processing */
  330. list_del(&req->list);
  331. req->status = SCLP_REQ_DONE;
  332. if (req->callback) {
  333. spin_unlock(&sclp_lock);
  334. req->callback(req, req->callback_data);
  335. spin_lock(&sclp_lock);
  336. }
  337. }
  338. sclp_running_state = sclp_running_state_idle;
  339. }
  340. if (evbuf_pending && sclp_receive_mask != 0 &&
  341. sclp_activation_state == sclp_activation_state_active)
  342. __sclp_queue_read_req();
  343. spin_unlock(&sclp_lock);
  344. sclp_process_queue();
  345. }
  346. /* Convert interval in jiffies to TOD ticks. */
  347. static inline u64
  348. sclp_tod_from_jiffies(unsigned long jiffies)
  349. {
  350. return (u64) (jiffies / HZ) << 32;
  351. }
  352. /* Wait until a currently running request finished. Note: while this function
  353. * is running, no timers are served on the calling CPU. */
  354. void
  355. sclp_sync_wait(void)
  356. {
  357. unsigned long flags;
  358. unsigned long cr0, cr0_sync;
  359. u64 timeout;
  360. int irq_context;
  361. /* We'll be disabling timer interrupts, so we need a custom timeout
  362. * mechanism */
  363. timeout = 0;
  364. if (timer_pending(&sclp_request_timer)) {
  365. /* Get timeout TOD value */
  366. timeout = get_clock() +
  367. sclp_tod_from_jiffies(sclp_request_timer.expires -
  368. jiffies);
  369. }
  370. local_irq_save(flags);
  371. /* Prevent bottom half from executing once we force interrupts open */
  372. irq_context = in_interrupt();
  373. if (!irq_context)
  374. local_bh_disable();
  375. /* Enable service-signal interruption, disable timer interrupts */
  376. trace_hardirqs_on();
  377. __ctl_store(cr0, 0, 0);
  378. cr0_sync = cr0;
  379. cr0_sync |= 0x00000200;
  380. cr0_sync &= 0xFFFFF3AC;
  381. __ctl_load(cr0_sync, 0, 0);
  382. __raw_local_irq_stosm(0x01);
  383. /* Loop until driver state indicates finished request */
  384. while (sclp_running_state != sclp_running_state_idle) {
  385. /* Check for expired request timer */
  386. if (timer_pending(&sclp_request_timer) &&
  387. get_clock() > timeout &&
  388. del_timer(&sclp_request_timer))
  389. sclp_request_timer.function(sclp_request_timer.data);
  390. cpu_relax();
  391. }
  392. local_irq_disable();
  393. __ctl_load(cr0, 0, 0);
  394. if (!irq_context)
  395. _local_bh_enable();
  396. local_irq_restore(flags);
  397. }
  398. EXPORT_SYMBOL(sclp_sync_wait);
  399. /* Dispatch changes in send and receive mask to registered listeners. */
  400. static void
  401. sclp_dispatch_state_change(void)
  402. {
  403. struct list_head *l;
  404. struct sclp_register *reg;
  405. unsigned long flags;
  406. sccb_mask_t receive_mask;
  407. sccb_mask_t send_mask;
  408. do {
  409. spin_lock_irqsave(&sclp_lock, flags);
  410. reg = NULL;
  411. list_for_each(l, &sclp_reg_list) {
  412. reg = list_entry(l, struct sclp_register, list);
  413. receive_mask = reg->receive_mask & sclp_receive_mask;
  414. send_mask = reg->send_mask & sclp_send_mask;
  415. if (reg->sclp_receive_mask != receive_mask ||
  416. reg->sclp_send_mask != send_mask) {
  417. reg->sclp_receive_mask = receive_mask;
  418. reg->sclp_send_mask = send_mask;
  419. break;
  420. } else
  421. reg = NULL;
  422. }
  423. spin_unlock_irqrestore(&sclp_lock, flags);
  424. if (reg && reg->state_change_fn)
  425. reg->state_change_fn(reg);
  426. } while (reg);
  427. }
  428. struct sclp_statechangebuf {
  429. struct evbuf_header header;
  430. u8 validity_sclp_active_facility_mask : 1;
  431. u8 validity_sclp_receive_mask : 1;
  432. u8 validity_sclp_send_mask : 1;
  433. u8 validity_read_data_function_mask : 1;
  434. u16 _zeros : 12;
  435. u16 mask_length;
  436. u64 sclp_active_facility_mask;
  437. sccb_mask_t sclp_receive_mask;
  438. sccb_mask_t sclp_send_mask;
  439. u32 read_data_function_mask;
  440. } __attribute__((packed));
  441. /* State change event callback. Inform listeners of changes. */
  442. static void
  443. sclp_state_change_cb(struct evbuf_header *evbuf)
  444. {
  445. unsigned long flags;
  446. struct sclp_statechangebuf *scbuf;
  447. scbuf = (struct sclp_statechangebuf *) evbuf;
  448. if (scbuf->mask_length != sizeof(sccb_mask_t))
  449. return;
  450. spin_lock_irqsave(&sclp_lock, flags);
  451. if (scbuf->validity_sclp_receive_mask)
  452. sclp_receive_mask = scbuf->sclp_receive_mask;
  453. if (scbuf->validity_sclp_send_mask)
  454. sclp_send_mask = scbuf->sclp_send_mask;
  455. spin_unlock_irqrestore(&sclp_lock, flags);
  456. sclp_dispatch_state_change();
  457. }
  458. static struct sclp_register sclp_state_change_event = {
  459. .receive_mask = EVTYP_STATECHANGE_MASK,
  460. .receiver_fn = sclp_state_change_cb
  461. };
  462. /* Calculate receive and send mask of currently registered listeners.
  463. * Called while sclp_lock is locked. */
  464. static inline void
  465. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  466. {
  467. struct list_head *l;
  468. struct sclp_register *t;
  469. *receive_mask = 0;
  470. *send_mask = 0;
  471. list_for_each(l, &sclp_reg_list) {
  472. t = list_entry(l, struct sclp_register, list);
  473. *receive_mask |= t->receive_mask;
  474. *send_mask |= t->send_mask;
  475. }
  476. }
  477. /* Register event listener. Return 0 on success, non-zero otherwise. */
  478. int
  479. sclp_register(struct sclp_register *reg)
  480. {
  481. unsigned long flags;
  482. sccb_mask_t receive_mask;
  483. sccb_mask_t send_mask;
  484. int rc;
  485. rc = sclp_init();
  486. if (rc)
  487. return rc;
  488. spin_lock_irqsave(&sclp_lock, flags);
  489. /* Check event mask for collisions */
  490. __sclp_get_mask(&receive_mask, &send_mask);
  491. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  492. spin_unlock_irqrestore(&sclp_lock, flags);
  493. return -EBUSY;
  494. }
  495. /* Trigger initial state change callback */
  496. reg->sclp_receive_mask = 0;
  497. reg->sclp_send_mask = 0;
  498. list_add(&reg->list, &sclp_reg_list);
  499. spin_unlock_irqrestore(&sclp_lock, flags);
  500. rc = sclp_init_mask(1);
  501. if (rc) {
  502. spin_lock_irqsave(&sclp_lock, flags);
  503. list_del(&reg->list);
  504. spin_unlock_irqrestore(&sclp_lock, flags);
  505. }
  506. return rc;
  507. }
  508. EXPORT_SYMBOL(sclp_register);
  509. /* Unregister event listener. */
  510. void
  511. sclp_unregister(struct sclp_register *reg)
  512. {
  513. unsigned long flags;
  514. spin_lock_irqsave(&sclp_lock, flags);
  515. list_del(&reg->list);
  516. spin_unlock_irqrestore(&sclp_lock, flags);
  517. sclp_init_mask(1);
  518. }
  519. EXPORT_SYMBOL(sclp_unregister);
  520. /* Remove event buffers which are marked processed. Return the number of
  521. * remaining event buffers. */
  522. int
  523. sclp_remove_processed(struct sccb_header *sccb)
  524. {
  525. struct evbuf_header *evbuf;
  526. int unprocessed;
  527. u16 remaining;
  528. evbuf = (struct evbuf_header *) (sccb + 1);
  529. unprocessed = 0;
  530. remaining = sccb->length - sizeof(struct sccb_header);
  531. while (remaining > 0) {
  532. remaining -= evbuf->length;
  533. if (evbuf->flags & 0x80) {
  534. sccb->length -= evbuf->length;
  535. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  536. remaining);
  537. } else {
  538. unprocessed++;
  539. evbuf = (struct evbuf_header *)
  540. ((addr_t) evbuf + evbuf->length);
  541. }
  542. }
  543. return unprocessed;
  544. }
  545. EXPORT_SYMBOL(sclp_remove_processed);
  546. struct init_sccb {
  547. struct sccb_header header;
  548. u16 _reserved;
  549. u16 mask_length;
  550. sccb_mask_t receive_mask;
  551. sccb_mask_t send_mask;
  552. sccb_mask_t sclp_send_mask;
  553. sccb_mask_t sclp_receive_mask;
  554. } __attribute__((packed));
  555. /* Prepare init mask request. Called while sclp_lock is locked. */
  556. static inline void
  557. __sclp_make_init_req(u32 receive_mask, u32 send_mask)
  558. {
  559. struct init_sccb *sccb;
  560. sccb = (struct init_sccb *) sclp_init_sccb;
  561. clear_page(sccb);
  562. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  563. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  564. sclp_init_req.status = SCLP_REQ_FILLED;
  565. sclp_init_req.start_count = 0;
  566. sclp_init_req.callback = NULL;
  567. sclp_init_req.callback_data = NULL;
  568. sclp_init_req.sccb = sccb;
  569. sccb->header.length = sizeof(struct init_sccb);
  570. sccb->mask_length = sizeof(sccb_mask_t);
  571. sccb->receive_mask = receive_mask;
  572. sccb->send_mask = send_mask;
  573. sccb->sclp_receive_mask = 0;
  574. sccb->sclp_send_mask = 0;
  575. }
  576. /* Start init mask request. If calculate is non-zero, calculate the mask as
  577. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  578. * success, non-zero otherwise. */
  579. static int
  580. sclp_init_mask(int calculate)
  581. {
  582. unsigned long flags;
  583. struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
  584. sccb_mask_t receive_mask;
  585. sccb_mask_t send_mask;
  586. int retry;
  587. int rc;
  588. unsigned long wait;
  589. spin_lock_irqsave(&sclp_lock, flags);
  590. /* Check if interface is in appropriate state */
  591. if (sclp_mask_state != sclp_mask_state_idle) {
  592. spin_unlock_irqrestore(&sclp_lock, flags);
  593. return -EBUSY;
  594. }
  595. if (sclp_activation_state == sclp_activation_state_inactive) {
  596. spin_unlock_irqrestore(&sclp_lock, flags);
  597. return -EINVAL;
  598. }
  599. sclp_mask_state = sclp_mask_state_initializing;
  600. /* Determine mask */
  601. if (calculate)
  602. __sclp_get_mask(&receive_mask, &send_mask);
  603. else {
  604. receive_mask = 0;
  605. send_mask = 0;
  606. }
  607. rc = -EIO;
  608. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  609. /* Prepare request */
  610. __sclp_make_init_req(receive_mask, send_mask);
  611. spin_unlock_irqrestore(&sclp_lock, flags);
  612. if (sclp_add_request(&sclp_init_req)) {
  613. /* Try again later */
  614. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  615. while (time_before(jiffies, wait))
  616. sclp_sync_wait();
  617. spin_lock_irqsave(&sclp_lock, flags);
  618. continue;
  619. }
  620. while (sclp_init_req.status != SCLP_REQ_DONE &&
  621. sclp_init_req.status != SCLP_REQ_FAILED)
  622. sclp_sync_wait();
  623. spin_lock_irqsave(&sclp_lock, flags);
  624. if (sclp_init_req.status == SCLP_REQ_DONE &&
  625. sccb->header.response_code == 0x20) {
  626. /* Successful request */
  627. if (calculate) {
  628. sclp_receive_mask = sccb->sclp_receive_mask;
  629. sclp_send_mask = sccb->sclp_send_mask;
  630. } else {
  631. sclp_receive_mask = 0;
  632. sclp_send_mask = 0;
  633. }
  634. spin_unlock_irqrestore(&sclp_lock, flags);
  635. sclp_dispatch_state_change();
  636. spin_lock_irqsave(&sclp_lock, flags);
  637. rc = 0;
  638. break;
  639. }
  640. }
  641. sclp_mask_state = sclp_mask_state_idle;
  642. spin_unlock_irqrestore(&sclp_lock, flags);
  643. return rc;
  644. }
  645. /* Deactivate SCLP interface. On success, new requests will be rejected,
  646. * events will no longer be dispatched. Return 0 on success, non-zero
  647. * otherwise. */
  648. int
  649. sclp_deactivate(void)
  650. {
  651. unsigned long flags;
  652. int rc;
  653. spin_lock_irqsave(&sclp_lock, flags);
  654. /* Deactivate can only be called when active */
  655. if (sclp_activation_state != sclp_activation_state_active) {
  656. spin_unlock_irqrestore(&sclp_lock, flags);
  657. return -EINVAL;
  658. }
  659. sclp_activation_state = sclp_activation_state_deactivating;
  660. spin_unlock_irqrestore(&sclp_lock, flags);
  661. rc = sclp_init_mask(0);
  662. spin_lock_irqsave(&sclp_lock, flags);
  663. if (rc == 0)
  664. sclp_activation_state = sclp_activation_state_inactive;
  665. else
  666. sclp_activation_state = sclp_activation_state_active;
  667. spin_unlock_irqrestore(&sclp_lock, flags);
  668. return rc;
  669. }
  670. EXPORT_SYMBOL(sclp_deactivate);
  671. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  672. * requests will be accepted, events will be dispatched again. Return 0 on
  673. * success, non-zero otherwise. */
  674. int
  675. sclp_reactivate(void)
  676. {
  677. unsigned long flags;
  678. int rc;
  679. spin_lock_irqsave(&sclp_lock, flags);
  680. /* Reactivate can only be called when inactive */
  681. if (sclp_activation_state != sclp_activation_state_inactive) {
  682. spin_unlock_irqrestore(&sclp_lock, flags);
  683. return -EINVAL;
  684. }
  685. sclp_activation_state = sclp_activation_state_activating;
  686. spin_unlock_irqrestore(&sclp_lock, flags);
  687. rc = sclp_init_mask(1);
  688. spin_lock_irqsave(&sclp_lock, flags);
  689. if (rc == 0)
  690. sclp_activation_state = sclp_activation_state_active;
  691. else
  692. sclp_activation_state = sclp_activation_state_inactive;
  693. spin_unlock_irqrestore(&sclp_lock, flags);
  694. return rc;
  695. }
  696. EXPORT_SYMBOL(sclp_reactivate);
  697. /* Handler for external interruption used during initialization. Modify
  698. * request state to done. */
  699. static void
  700. sclp_check_handler(__u16 code)
  701. {
  702. u32 finished_sccb;
  703. finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
  704. /* Is this the interrupt we are waiting for? */
  705. if (finished_sccb == 0)
  706. return;
  707. if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
  708. printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
  709. "for buffer at 0x%x\n", finished_sccb);
  710. return;
  711. }
  712. spin_lock(&sclp_lock);
  713. if (sclp_running_state == sclp_running_state_running) {
  714. sclp_init_req.status = SCLP_REQ_DONE;
  715. sclp_running_state = sclp_running_state_idle;
  716. }
  717. spin_unlock(&sclp_lock);
  718. }
  719. /* Initial init mask request timed out. Modify request state to failed. */
  720. static void
  721. sclp_check_timeout(unsigned long data)
  722. {
  723. unsigned long flags;
  724. spin_lock_irqsave(&sclp_lock, flags);
  725. if (sclp_running_state == sclp_running_state_running) {
  726. sclp_init_req.status = SCLP_REQ_FAILED;
  727. sclp_running_state = sclp_running_state_idle;
  728. }
  729. spin_unlock_irqrestore(&sclp_lock, flags);
  730. }
  731. /* Perform a check of the SCLP interface. Return zero if the interface is
  732. * available and there are no pending requests from a previous instance.
  733. * Return non-zero otherwise. */
  734. static int
  735. sclp_check_interface(void)
  736. {
  737. struct init_sccb *sccb;
  738. unsigned long flags;
  739. int retry;
  740. int rc;
  741. spin_lock_irqsave(&sclp_lock, flags);
  742. /* Prepare init mask command */
  743. rc = register_early_external_interrupt(0x2401, sclp_check_handler,
  744. &ext_int_info_hwc);
  745. if (rc) {
  746. spin_unlock_irqrestore(&sclp_lock, flags);
  747. return rc;
  748. }
  749. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  750. __sclp_make_init_req(0, 0);
  751. sccb = (struct init_sccb *) sclp_init_req.sccb;
  752. rc = sclp_service_call(sclp_init_req.command, sccb);
  753. if (rc == -EIO)
  754. break;
  755. sclp_init_req.status = SCLP_REQ_RUNNING;
  756. sclp_running_state = sclp_running_state_running;
  757. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  758. sclp_check_timeout, 0);
  759. spin_unlock_irqrestore(&sclp_lock, flags);
  760. /* Enable service-signal interruption - needs to happen
  761. * with IRQs enabled. */
  762. ctl_set_bit(0, 9);
  763. /* Wait for signal from interrupt or timeout */
  764. sclp_sync_wait();
  765. /* Disable service-signal interruption - needs to happen
  766. * with IRQs enabled. */
  767. ctl_clear_bit(0,9);
  768. spin_lock_irqsave(&sclp_lock, flags);
  769. del_timer(&sclp_request_timer);
  770. if (sclp_init_req.status == SCLP_REQ_DONE &&
  771. sccb->header.response_code == 0x20) {
  772. rc = 0;
  773. break;
  774. } else
  775. rc = -EBUSY;
  776. }
  777. unregister_early_external_interrupt(0x2401, sclp_check_handler,
  778. &ext_int_info_hwc);
  779. spin_unlock_irqrestore(&sclp_lock, flags);
  780. return rc;
  781. }
  782. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  783. * events from interfering with rebooted system. */
  784. static int
  785. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  786. {
  787. sclp_deactivate();
  788. return NOTIFY_DONE;
  789. }
  790. static struct notifier_block sclp_reboot_notifier = {
  791. .notifier_call = sclp_reboot_event
  792. };
  793. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  794. * otherwise. */
  795. static int
  796. sclp_init(void)
  797. {
  798. unsigned long flags;
  799. int rc;
  800. if (!MACHINE_HAS_SCLP)
  801. return -ENODEV;
  802. spin_lock_irqsave(&sclp_lock, flags);
  803. /* Check for previous or running initialization */
  804. if (sclp_init_state != sclp_init_state_uninitialized) {
  805. spin_unlock_irqrestore(&sclp_lock, flags);
  806. return 0;
  807. }
  808. sclp_init_state = sclp_init_state_initializing;
  809. /* Set up variables */
  810. INIT_LIST_HEAD(&sclp_req_queue);
  811. INIT_LIST_HEAD(&sclp_reg_list);
  812. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  813. init_timer(&sclp_request_timer);
  814. /* Check interface */
  815. spin_unlock_irqrestore(&sclp_lock, flags);
  816. rc = sclp_check_interface();
  817. spin_lock_irqsave(&sclp_lock, flags);
  818. if (rc) {
  819. sclp_init_state = sclp_init_state_uninitialized;
  820. spin_unlock_irqrestore(&sclp_lock, flags);
  821. return rc;
  822. }
  823. /* Register reboot handler */
  824. rc = register_reboot_notifier(&sclp_reboot_notifier);
  825. if (rc) {
  826. sclp_init_state = sclp_init_state_uninitialized;
  827. spin_unlock_irqrestore(&sclp_lock, flags);
  828. return rc;
  829. }
  830. /* Register interrupt handler */
  831. rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
  832. &ext_int_info_hwc);
  833. if (rc) {
  834. unregister_reboot_notifier(&sclp_reboot_notifier);
  835. sclp_init_state = sclp_init_state_uninitialized;
  836. spin_unlock_irqrestore(&sclp_lock, flags);
  837. return rc;
  838. }
  839. sclp_init_state = sclp_init_state_initialized;
  840. spin_unlock_irqrestore(&sclp_lock, flags);
  841. /* Enable service-signal external interruption - needs to happen with
  842. * IRQs enabled. */
  843. ctl_set_bit(0, 9);
  844. sclp_init_mask(1);
  845. return 0;
  846. }
  847. static __init int sclp_initcall(void)
  848. {
  849. return sclp_init();
  850. }
  851. arch_initcall(sclp_initcall);