sclp.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. /*
  2. * drivers/s390/char/sclp.c
  3. * core function to access sclp interface
  4. *
  5. * S390 version
  6. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  7. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  8. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  9. */
  10. #include <linux/module.h>
  11. #include <linux/err.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/timer.h>
  15. #include <linux/reboot.h>
  16. #include <linux/jiffies.h>
  17. #include <asm/types.h>
  18. #include <asm/s390_ext.h>
  19. #include "sclp.h"
  20. #define SCLP_HEADER "sclp: "
  21. /* Structure for register_early_external_interrupt. */
  22. static ext_int_info_t ext_int_info_hwc;
  23. /* Lock to protect internal data consistency. */
  24. static DEFINE_SPINLOCK(sclp_lock);
  25. /* Mask of events that we can receive from the sclp interface. */
  26. static sccb_mask_t sclp_receive_mask;
  27. /* Mask of events that we can send to the sclp interface. */
  28. static sccb_mask_t sclp_send_mask;
  29. /* List of registered event listeners and senders. */
  30. static struct list_head sclp_reg_list;
  31. /* List of queued requests. */
  32. static struct list_head sclp_req_queue;
  33. /* Data for read and and init requests. */
  34. static struct sclp_req sclp_read_req;
  35. static struct sclp_req sclp_init_req;
  36. static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  37. static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  38. /* Timer for request retries. */
  39. static struct timer_list sclp_request_timer;
  40. /* Internal state: is the driver initialized? */
  41. static volatile enum sclp_init_state_t {
  42. sclp_init_state_uninitialized,
  43. sclp_init_state_initializing,
  44. sclp_init_state_initialized
  45. } sclp_init_state = sclp_init_state_uninitialized;
  46. /* Internal state: is a request active at the sclp? */
  47. static volatile enum sclp_running_state_t {
  48. sclp_running_state_idle,
  49. sclp_running_state_running
  50. } sclp_running_state = sclp_running_state_idle;
  51. /* Internal state: is a read request pending? */
  52. static volatile enum sclp_reading_state_t {
  53. sclp_reading_state_idle,
  54. sclp_reading_state_reading
  55. } sclp_reading_state = sclp_reading_state_idle;
  56. /* Internal state: is the driver currently serving requests? */
  57. static volatile enum sclp_activation_state_t {
  58. sclp_activation_state_active,
  59. sclp_activation_state_deactivating,
  60. sclp_activation_state_inactive,
  61. sclp_activation_state_activating
  62. } sclp_activation_state = sclp_activation_state_active;
  63. /* Internal state: is an init mask request pending? */
  64. static volatile enum sclp_mask_state_t {
  65. sclp_mask_state_idle,
  66. sclp_mask_state_initializing
  67. } sclp_mask_state = sclp_mask_state_idle;
  68. /* Maximum retry counts */
  69. #define SCLP_INIT_RETRY 3
  70. #define SCLP_MASK_RETRY 3
  71. /* Timeout intervals in seconds.*/
  72. #define SCLP_BUSY_INTERVAL 10
  73. #define SCLP_RETRY_INTERVAL 15
  74. static void sclp_process_queue(void);
  75. static int sclp_init_mask(int calculate);
  76. static int sclp_init(void);
  77. /* Perform service call. Return 0 on success, non-zero otherwise. */
  78. static int
  79. service_call(sclp_cmdw_t command, void *sccb)
  80. {
  81. int cc;
  82. __asm__ __volatile__(
  83. " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
  84. " ipm %0\n"
  85. " srl %0,28"
  86. : "=&d" (cc)
  87. : "d" (command), "a" (__pa(sccb))
  88. : "cc", "memory" );
  89. if (cc == 3)
  90. return -EIO;
  91. if (cc == 2)
  92. return -EBUSY;
  93. return 0;
  94. }
  95. /* Request timeout handler. Restart the request queue. If DATA is non-zero,
  96. * force restart of running request. */
  97. static void
  98. sclp_request_timeout(unsigned long data)
  99. {
  100. unsigned long flags;
  101. if (data) {
  102. spin_lock_irqsave(&sclp_lock, flags);
  103. sclp_running_state = sclp_running_state_idle;
  104. spin_unlock_irqrestore(&sclp_lock, flags);
  105. }
  106. sclp_process_queue();
  107. }
  108. /* Set up request retry timer. Called while sclp_lock is locked. */
  109. static inline void
  110. __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
  111. unsigned long data)
  112. {
  113. del_timer(&sclp_request_timer);
  114. sclp_request_timer.function = function;
  115. sclp_request_timer.data = data;
  116. sclp_request_timer.expires = jiffies + time;
  117. add_timer(&sclp_request_timer);
  118. }
  119. /* Try to start a request. Return zero if the request was successfully
  120. * started or if it will be started at a later time. Return non-zero otherwise.
  121. * Called while sclp_lock is locked. */
  122. static int
  123. __sclp_start_request(struct sclp_req *req)
  124. {
  125. int rc;
  126. if (sclp_running_state != sclp_running_state_idle)
  127. return 0;
  128. del_timer(&sclp_request_timer);
  129. rc = service_call(req->command, req->sccb);
  130. req->start_count++;
  131. if (rc == 0) {
  132. /* Sucessfully started request */
  133. req->status = SCLP_REQ_RUNNING;
  134. sclp_running_state = sclp_running_state_running;
  135. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  136. sclp_request_timeout, 1);
  137. return 0;
  138. } else if (rc == -EBUSY) {
  139. /* Try again later */
  140. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  141. sclp_request_timeout, 0);
  142. return 0;
  143. }
  144. /* Request failed */
  145. req->status = SCLP_REQ_FAILED;
  146. return rc;
  147. }
  148. /* Try to start queued requests. */
  149. static void
  150. sclp_process_queue(void)
  151. {
  152. struct sclp_req *req;
  153. int rc;
  154. unsigned long flags;
  155. spin_lock_irqsave(&sclp_lock, flags);
  156. if (sclp_running_state != sclp_running_state_idle) {
  157. spin_unlock_irqrestore(&sclp_lock, flags);
  158. return;
  159. }
  160. del_timer(&sclp_request_timer);
  161. while (!list_empty(&sclp_req_queue)) {
  162. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  163. rc = __sclp_start_request(req);
  164. if (rc == 0)
  165. break;
  166. /* Request failed. */
  167. list_del(&req->list);
  168. if (req->callback) {
  169. spin_unlock_irqrestore(&sclp_lock, flags);
  170. req->callback(req, req->callback_data);
  171. spin_lock_irqsave(&sclp_lock, flags);
  172. }
  173. }
  174. spin_unlock_irqrestore(&sclp_lock, flags);
  175. }
  176. /* Queue a new request. Return zero on success, non-zero otherwise. */
  177. int
  178. sclp_add_request(struct sclp_req *req)
  179. {
  180. unsigned long flags;
  181. int rc;
  182. spin_lock_irqsave(&sclp_lock, flags);
  183. if ((sclp_init_state != sclp_init_state_initialized ||
  184. sclp_activation_state != sclp_activation_state_active) &&
  185. req != &sclp_init_req) {
  186. spin_unlock_irqrestore(&sclp_lock, flags);
  187. return -EIO;
  188. }
  189. req->status = SCLP_REQ_QUEUED;
  190. req->start_count = 0;
  191. list_add_tail(&req->list, &sclp_req_queue);
  192. rc = 0;
  193. /* Start if request is first in list */
  194. if (req->list.prev == &sclp_req_queue) {
  195. rc = __sclp_start_request(req);
  196. if (rc)
  197. list_del(&req->list);
  198. }
  199. spin_unlock_irqrestore(&sclp_lock, flags);
  200. return rc;
  201. }
  202. EXPORT_SYMBOL(sclp_add_request);
  203. /* Dispatch events found in request buffer to registered listeners. Return 0
  204. * if all events were dispatched, non-zero otherwise. */
  205. static int
  206. sclp_dispatch_evbufs(struct sccb_header *sccb)
  207. {
  208. unsigned long flags;
  209. struct evbuf_header *evbuf;
  210. struct list_head *l;
  211. struct sclp_register *reg;
  212. int offset;
  213. int rc;
  214. spin_lock_irqsave(&sclp_lock, flags);
  215. rc = 0;
  216. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  217. offset += evbuf->length) {
  218. /* Search for event handler */
  219. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  220. reg = NULL;
  221. list_for_each(l, &sclp_reg_list) {
  222. reg = list_entry(l, struct sclp_register, list);
  223. if (reg->receive_mask & (1 << (32 - evbuf->type)))
  224. break;
  225. else
  226. reg = NULL;
  227. }
  228. if (reg && reg->receiver_fn) {
  229. spin_unlock_irqrestore(&sclp_lock, flags);
  230. reg->receiver_fn(evbuf);
  231. spin_lock_irqsave(&sclp_lock, flags);
  232. } else if (reg == NULL)
  233. rc = -ENOSYS;
  234. }
  235. spin_unlock_irqrestore(&sclp_lock, flags);
  236. return rc;
  237. }
  238. /* Read event data request callback. */
  239. static void
  240. sclp_read_cb(struct sclp_req *req, void *data)
  241. {
  242. unsigned long flags;
  243. struct sccb_header *sccb;
  244. sccb = (struct sccb_header *) req->sccb;
  245. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  246. sccb->response_code == 0x220))
  247. sclp_dispatch_evbufs(sccb);
  248. spin_lock_irqsave(&sclp_lock, flags);
  249. sclp_reading_state = sclp_reading_state_idle;
  250. spin_unlock_irqrestore(&sclp_lock, flags);
  251. }
  252. /* Prepare read event data request. Called while sclp_lock is locked. */
  253. static inline void
  254. __sclp_make_read_req(void)
  255. {
  256. struct sccb_header *sccb;
  257. sccb = (struct sccb_header *) sclp_read_sccb;
  258. clear_page(sccb);
  259. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  260. sclp_read_req.command = SCLP_CMDW_READDATA;
  261. sclp_read_req.status = SCLP_REQ_QUEUED;
  262. sclp_read_req.start_count = 0;
  263. sclp_read_req.callback = sclp_read_cb;
  264. sclp_read_req.sccb = sccb;
  265. sccb->length = PAGE_SIZE;
  266. sccb->function_code = 0;
  267. sccb->control_mask[2] = 0x80;
  268. }
  269. /* Search request list for request with matching sccb. Return request if found,
  270. * NULL otherwise. Called while sclp_lock is locked. */
  271. static inline struct sclp_req *
  272. __sclp_find_req(u32 sccb)
  273. {
  274. struct list_head *l;
  275. struct sclp_req *req;
  276. list_for_each(l, &sclp_req_queue) {
  277. req = list_entry(l, struct sclp_req, list);
  278. if (sccb == (u32) (addr_t) req->sccb)
  279. return req;
  280. }
  281. return NULL;
  282. }
  283. /* Handler for external interruption. Perform request post-processing.
  284. * Prepare read event data request if necessary. Start processing of next
  285. * request on queue. */
  286. static void
  287. sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
  288. {
  289. struct sclp_req *req;
  290. u32 finished_sccb;
  291. u32 evbuf_pending;
  292. spin_lock(&sclp_lock);
  293. finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
  294. evbuf_pending = S390_lowcore.ext_params & 0x3;
  295. if (finished_sccb) {
  296. req = __sclp_find_req(finished_sccb);
  297. if (req) {
  298. /* Request post-processing */
  299. list_del(&req->list);
  300. req->status = SCLP_REQ_DONE;
  301. if (req->callback) {
  302. spin_unlock(&sclp_lock);
  303. req->callback(req, req->callback_data);
  304. spin_lock(&sclp_lock);
  305. }
  306. }
  307. sclp_running_state = sclp_running_state_idle;
  308. }
  309. if (evbuf_pending && sclp_receive_mask != 0 &&
  310. sclp_reading_state == sclp_reading_state_idle &&
  311. sclp_activation_state == sclp_activation_state_active ) {
  312. sclp_reading_state = sclp_reading_state_reading;
  313. __sclp_make_read_req();
  314. /* Add request to head of queue */
  315. list_add(&sclp_read_req.list, &sclp_req_queue);
  316. }
  317. spin_unlock(&sclp_lock);
  318. sclp_process_queue();
  319. }
  320. /* Return current Time-Of-Day clock. */
  321. static inline u64
  322. sclp_get_clock(void)
  323. {
  324. u64 result;
  325. asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
  326. return result;
  327. }
  328. /* Convert interval in jiffies to TOD ticks. */
  329. static inline u64
  330. sclp_tod_from_jiffies(unsigned long jiffies)
  331. {
  332. return (u64) (jiffies / HZ) << 32;
  333. }
  334. /* Wait until a currently running request finished. Note: while this function
  335. * is running, no timers are served on the calling CPU. */
  336. void
  337. sclp_sync_wait(void)
  338. {
  339. unsigned long psw_mask;
  340. unsigned long flags;
  341. unsigned long cr0, cr0_sync;
  342. u64 timeout;
  343. /* We'll be disabling timer interrupts, so we need a custom timeout
  344. * mechanism */
  345. timeout = 0;
  346. if (timer_pending(&sclp_request_timer)) {
  347. /* Get timeout TOD value */
  348. timeout = sclp_get_clock() +
  349. sclp_tod_from_jiffies(sclp_request_timer.expires -
  350. jiffies);
  351. }
  352. local_irq_save(flags);
  353. /* Prevent bottom half from executing once we force interrupts open */
  354. local_bh_disable();
  355. /* Enable service-signal interruption, disable timer interrupts */
  356. trace_hardirqs_on();
  357. __ctl_store(cr0, 0, 0);
  358. cr0_sync = cr0;
  359. cr0_sync |= 0x00000200;
  360. cr0_sync &= 0xFFFFF3AC;
  361. __ctl_load(cr0_sync, 0, 0);
  362. asm volatile ("STOSM 0(%1),0x01"
  363. : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
  364. /* Loop until driver state indicates finished request */
  365. while (sclp_running_state != sclp_running_state_idle) {
  366. /* Check for expired request timer */
  367. if (timer_pending(&sclp_request_timer) &&
  368. sclp_get_clock() > timeout &&
  369. del_timer(&sclp_request_timer))
  370. sclp_request_timer.function(sclp_request_timer.data);
  371. barrier();
  372. cpu_relax();
  373. }
  374. local_irq_disable();
  375. __ctl_load(cr0, 0, 0);
  376. _local_bh_enable();
  377. local_irq_restore(flags);
  378. }
  379. EXPORT_SYMBOL(sclp_sync_wait);
  380. /* Dispatch changes in send and receive mask to registered listeners. */
  381. static inline void
  382. sclp_dispatch_state_change(void)
  383. {
  384. struct list_head *l;
  385. struct sclp_register *reg;
  386. unsigned long flags;
  387. sccb_mask_t receive_mask;
  388. sccb_mask_t send_mask;
  389. do {
  390. spin_lock_irqsave(&sclp_lock, flags);
  391. reg = NULL;
  392. list_for_each(l, &sclp_reg_list) {
  393. reg = list_entry(l, struct sclp_register, list);
  394. receive_mask = reg->receive_mask & sclp_receive_mask;
  395. send_mask = reg->send_mask & sclp_send_mask;
  396. if (reg->sclp_receive_mask != receive_mask ||
  397. reg->sclp_send_mask != send_mask) {
  398. reg->sclp_receive_mask = receive_mask;
  399. reg->sclp_send_mask = send_mask;
  400. break;
  401. } else
  402. reg = NULL;
  403. }
  404. spin_unlock_irqrestore(&sclp_lock, flags);
  405. if (reg && reg->state_change_fn)
  406. reg->state_change_fn(reg);
  407. } while (reg);
  408. }
  409. struct sclp_statechangebuf {
  410. struct evbuf_header header;
  411. u8 validity_sclp_active_facility_mask : 1;
  412. u8 validity_sclp_receive_mask : 1;
  413. u8 validity_sclp_send_mask : 1;
  414. u8 validity_read_data_function_mask : 1;
  415. u16 _zeros : 12;
  416. u16 mask_length;
  417. u64 sclp_active_facility_mask;
  418. sccb_mask_t sclp_receive_mask;
  419. sccb_mask_t sclp_send_mask;
  420. u32 read_data_function_mask;
  421. } __attribute__((packed));
  422. /* State change event callback. Inform listeners of changes. */
  423. static void
  424. sclp_state_change_cb(struct evbuf_header *evbuf)
  425. {
  426. unsigned long flags;
  427. struct sclp_statechangebuf *scbuf;
  428. scbuf = (struct sclp_statechangebuf *) evbuf;
  429. if (scbuf->mask_length != sizeof(sccb_mask_t))
  430. return;
  431. spin_lock_irqsave(&sclp_lock, flags);
  432. if (scbuf->validity_sclp_receive_mask)
  433. sclp_receive_mask = scbuf->sclp_receive_mask;
  434. if (scbuf->validity_sclp_send_mask)
  435. sclp_send_mask = scbuf->sclp_send_mask;
  436. spin_unlock_irqrestore(&sclp_lock, flags);
  437. sclp_dispatch_state_change();
  438. }
  439. static struct sclp_register sclp_state_change_event = {
  440. .receive_mask = EvTyp_StateChange_Mask,
  441. .receiver_fn = sclp_state_change_cb
  442. };
  443. /* Calculate receive and send mask of currently registered listeners.
  444. * Called while sclp_lock is locked. */
  445. static inline void
  446. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  447. {
  448. struct list_head *l;
  449. struct sclp_register *t;
  450. *receive_mask = 0;
  451. *send_mask = 0;
  452. list_for_each(l, &sclp_reg_list) {
  453. t = list_entry(l, struct sclp_register, list);
  454. *receive_mask |= t->receive_mask;
  455. *send_mask |= t->send_mask;
  456. }
  457. }
  458. /* Register event listener. Return 0 on success, non-zero otherwise. */
  459. int
  460. sclp_register(struct sclp_register *reg)
  461. {
  462. unsigned long flags;
  463. sccb_mask_t receive_mask;
  464. sccb_mask_t send_mask;
  465. int rc;
  466. rc = sclp_init();
  467. if (rc)
  468. return rc;
  469. spin_lock_irqsave(&sclp_lock, flags);
  470. /* Check event mask for collisions */
  471. __sclp_get_mask(&receive_mask, &send_mask);
  472. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  473. spin_unlock_irqrestore(&sclp_lock, flags);
  474. return -EBUSY;
  475. }
  476. /* Trigger initial state change callback */
  477. reg->sclp_receive_mask = 0;
  478. reg->sclp_send_mask = 0;
  479. list_add(&reg->list, &sclp_reg_list);
  480. spin_unlock_irqrestore(&sclp_lock, flags);
  481. rc = sclp_init_mask(1);
  482. if (rc) {
  483. spin_lock_irqsave(&sclp_lock, flags);
  484. list_del(&reg->list);
  485. spin_unlock_irqrestore(&sclp_lock, flags);
  486. }
  487. return rc;
  488. }
  489. EXPORT_SYMBOL(sclp_register);
  490. /* Unregister event listener. */
  491. void
  492. sclp_unregister(struct sclp_register *reg)
  493. {
  494. unsigned long flags;
  495. spin_lock_irqsave(&sclp_lock, flags);
  496. list_del(&reg->list);
  497. spin_unlock_irqrestore(&sclp_lock, flags);
  498. sclp_init_mask(1);
  499. }
  500. EXPORT_SYMBOL(sclp_unregister);
  501. /* Remove event buffers which are marked processed. Return the number of
  502. * remaining event buffers. */
  503. int
  504. sclp_remove_processed(struct sccb_header *sccb)
  505. {
  506. struct evbuf_header *evbuf;
  507. int unprocessed;
  508. u16 remaining;
  509. evbuf = (struct evbuf_header *) (sccb + 1);
  510. unprocessed = 0;
  511. remaining = sccb->length - sizeof(struct sccb_header);
  512. while (remaining > 0) {
  513. remaining -= evbuf->length;
  514. if (evbuf->flags & 0x80) {
  515. sccb->length -= evbuf->length;
  516. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  517. remaining);
  518. } else {
  519. unprocessed++;
  520. evbuf = (struct evbuf_header *)
  521. ((addr_t) evbuf + evbuf->length);
  522. }
  523. }
  524. return unprocessed;
  525. }
  526. EXPORT_SYMBOL(sclp_remove_processed);
  527. struct init_sccb {
  528. struct sccb_header header;
  529. u16 _reserved;
  530. u16 mask_length;
  531. sccb_mask_t receive_mask;
  532. sccb_mask_t send_mask;
  533. sccb_mask_t sclp_send_mask;
  534. sccb_mask_t sclp_receive_mask;
  535. } __attribute__((packed));
  536. /* Prepare init mask request. Called while sclp_lock is locked. */
  537. static inline void
  538. __sclp_make_init_req(u32 receive_mask, u32 send_mask)
  539. {
  540. struct init_sccb *sccb;
  541. sccb = (struct init_sccb *) sclp_init_sccb;
  542. clear_page(sccb);
  543. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  544. sclp_init_req.command = SCLP_CMDW_WRITEMASK;
  545. sclp_init_req.status = SCLP_REQ_FILLED;
  546. sclp_init_req.start_count = 0;
  547. sclp_init_req.callback = NULL;
  548. sclp_init_req.callback_data = NULL;
  549. sclp_init_req.sccb = sccb;
  550. sccb->header.length = sizeof(struct init_sccb);
  551. sccb->mask_length = sizeof(sccb_mask_t);
  552. sccb->receive_mask = receive_mask;
  553. sccb->send_mask = send_mask;
  554. sccb->sclp_receive_mask = 0;
  555. sccb->sclp_send_mask = 0;
  556. }
  557. /* Start init mask request. If calculate is non-zero, calculate the mask as
  558. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  559. * success, non-zero otherwise. */
  560. static int
  561. sclp_init_mask(int calculate)
  562. {
  563. unsigned long flags;
  564. struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
  565. sccb_mask_t receive_mask;
  566. sccb_mask_t send_mask;
  567. int retry;
  568. int rc;
  569. unsigned long wait;
  570. spin_lock_irqsave(&sclp_lock, flags);
  571. /* Check if interface is in appropriate state */
  572. if (sclp_mask_state != sclp_mask_state_idle) {
  573. spin_unlock_irqrestore(&sclp_lock, flags);
  574. return -EBUSY;
  575. }
  576. if (sclp_activation_state == sclp_activation_state_inactive) {
  577. spin_unlock_irqrestore(&sclp_lock, flags);
  578. return -EINVAL;
  579. }
  580. sclp_mask_state = sclp_mask_state_initializing;
  581. /* Determine mask */
  582. if (calculate)
  583. __sclp_get_mask(&receive_mask, &send_mask);
  584. else {
  585. receive_mask = 0;
  586. send_mask = 0;
  587. }
  588. rc = -EIO;
  589. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  590. /* Prepare request */
  591. __sclp_make_init_req(receive_mask, send_mask);
  592. spin_unlock_irqrestore(&sclp_lock, flags);
  593. if (sclp_add_request(&sclp_init_req)) {
  594. /* Try again later */
  595. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  596. while (time_before(jiffies, wait))
  597. sclp_sync_wait();
  598. spin_lock_irqsave(&sclp_lock, flags);
  599. continue;
  600. }
  601. while (sclp_init_req.status != SCLP_REQ_DONE &&
  602. sclp_init_req.status != SCLP_REQ_FAILED)
  603. sclp_sync_wait();
  604. spin_lock_irqsave(&sclp_lock, flags);
  605. if (sclp_init_req.status == SCLP_REQ_DONE &&
  606. sccb->header.response_code == 0x20) {
  607. /* Successful request */
  608. if (calculate) {
  609. sclp_receive_mask = sccb->sclp_receive_mask;
  610. sclp_send_mask = sccb->sclp_send_mask;
  611. } else {
  612. sclp_receive_mask = 0;
  613. sclp_send_mask = 0;
  614. }
  615. spin_unlock_irqrestore(&sclp_lock, flags);
  616. sclp_dispatch_state_change();
  617. spin_lock_irqsave(&sclp_lock, flags);
  618. rc = 0;
  619. break;
  620. }
  621. }
  622. sclp_mask_state = sclp_mask_state_idle;
  623. spin_unlock_irqrestore(&sclp_lock, flags);
  624. return rc;
  625. }
  626. /* Deactivate SCLP interface. On success, new requests will be rejected,
  627. * events will no longer be dispatched. Return 0 on success, non-zero
  628. * otherwise. */
  629. int
  630. sclp_deactivate(void)
  631. {
  632. unsigned long flags;
  633. int rc;
  634. spin_lock_irqsave(&sclp_lock, flags);
  635. /* Deactivate can only be called when active */
  636. if (sclp_activation_state != sclp_activation_state_active) {
  637. spin_unlock_irqrestore(&sclp_lock, flags);
  638. return -EINVAL;
  639. }
  640. sclp_activation_state = sclp_activation_state_deactivating;
  641. spin_unlock_irqrestore(&sclp_lock, flags);
  642. rc = sclp_init_mask(0);
  643. spin_lock_irqsave(&sclp_lock, flags);
  644. if (rc == 0)
  645. sclp_activation_state = sclp_activation_state_inactive;
  646. else
  647. sclp_activation_state = sclp_activation_state_active;
  648. spin_unlock_irqrestore(&sclp_lock, flags);
  649. return rc;
  650. }
  651. EXPORT_SYMBOL(sclp_deactivate);
  652. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  653. * requests will be accepted, events will be dispatched again. Return 0 on
  654. * success, non-zero otherwise. */
  655. int
  656. sclp_reactivate(void)
  657. {
  658. unsigned long flags;
  659. int rc;
  660. spin_lock_irqsave(&sclp_lock, flags);
  661. /* Reactivate can only be called when inactive */
  662. if (sclp_activation_state != sclp_activation_state_inactive) {
  663. spin_unlock_irqrestore(&sclp_lock, flags);
  664. return -EINVAL;
  665. }
  666. sclp_activation_state = sclp_activation_state_activating;
  667. spin_unlock_irqrestore(&sclp_lock, flags);
  668. rc = sclp_init_mask(1);
  669. spin_lock_irqsave(&sclp_lock, flags);
  670. if (rc == 0)
  671. sclp_activation_state = sclp_activation_state_active;
  672. else
  673. sclp_activation_state = sclp_activation_state_inactive;
  674. spin_unlock_irqrestore(&sclp_lock, flags);
  675. return rc;
  676. }
  677. EXPORT_SYMBOL(sclp_reactivate);
  678. /* Handler for external interruption used during initialization. Modify
  679. * request state to done. */
  680. static void
  681. sclp_check_handler(struct pt_regs *regs, __u16 code)
  682. {
  683. u32 finished_sccb;
  684. finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
  685. /* Is this the interrupt we are waiting for? */
  686. if (finished_sccb == 0)
  687. return;
  688. if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
  689. printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
  690. "for buffer at 0x%x\n", finished_sccb);
  691. return;
  692. }
  693. spin_lock(&sclp_lock);
  694. if (sclp_running_state == sclp_running_state_running) {
  695. sclp_init_req.status = SCLP_REQ_DONE;
  696. sclp_running_state = sclp_running_state_idle;
  697. }
  698. spin_unlock(&sclp_lock);
  699. }
  700. /* Initial init mask request timed out. Modify request state to failed. */
  701. static void
  702. sclp_check_timeout(unsigned long data)
  703. {
  704. unsigned long flags;
  705. spin_lock_irqsave(&sclp_lock, flags);
  706. if (sclp_running_state == sclp_running_state_running) {
  707. sclp_init_req.status = SCLP_REQ_FAILED;
  708. sclp_running_state = sclp_running_state_idle;
  709. }
  710. spin_unlock_irqrestore(&sclp_lock, flags);
  711. }
  712. /* Perform a check of the SCLP interface. Return zero if the interface is
  713. * available and there are no pending requests from a previous instance.
  714. * Return non-zero otherwise. */
  715. static int
  716. sclp_check_interface(void)
  717. {
  718. struct init_sccb *sccb;
  719. unsigned long flags;
  720. int retry;
  721. int rc;
  722. spin_lock_irqsave(&sclp_lock, flags);
  723. /* Prepare init mask command */
  724. rc = register_early_external_interrupt(0x2401, sclp_check_handler,
  725. &ext_int_info_hwc);
  726. if (rc) {
  727. spin_unlock_irqrestore(&sclp_lock, flags);
  728. return rc;
  729. }
  730. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  731. __sclp_make_init_req(0, 0);
  732. sccb = (struct init_sccb *) sclp_init_req.sccb;
  733. rc = service_call(sclp_init_req.command, sccb);
  734. if (rc == -EIO)
  735. break;
  736. sclp_init_req.status = SCLP_REQ_RUNNING;
  737. sclp_running_state = sclp_running_state_running;
  738. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  739. sclp_check_timeout, 0);
  740. spin_unlock_irqrestore(&sclp_lock, flags);
  741. /* Enable service-signal interruption - needs to happen
  742. * with IRQs enabled. */
  743. ctl_set_bit(0, 9);
  744. /* Wait for signal from interrupt or timeout */
  745. sclp_sync_wait();
  746. /* Disable service-signal interruption - needs to happen
  747. * with IRQs enabled. */
  748. ctl_clear_bit(0,9);
  749. spin_lock_irqsave(&sclp_lock, flags);
  750. del_timer(&sclp_request_timer);
  751. if (sclp_init_req.status == SCLP_REQ_DONE &&
  752. sccb->header.response_code == 0x20) {
  753. rc = 0;
  754. break;
  755. } else
  756. rc = -EBUSY;
  757. }
  758. unregister_early_external_interrupt(0x2401, sclp_check_handler,
  759. &ext_int_info_hwc);
  760. spin_unlock_irqrestore(&sclp_lock, flags);
  761. return rc;
  762. }
  763. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  764. * events from interfering with rebooted system. */
  765. static int
  766. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  767. {
  768. sclp_deactivate();
  769. return NOTIFY_DONE;
  770. }
  771. static struct notifier_block sclp_reboot_notifier = {
  772. .notifier_call = sclp_reboot_event
  773. };
  774. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  775. * otherwise. */
  776. static int
  777. sclp_init(void)
  778. {
  779. unsigned long flags;
  780. int rc;
  781. if (!MACHINE_HAS_SCLP)
  782. return -ENODEV;
  783. spin_lock_irqsave(&sclp_lock, flags);
  784. /* Check for previous or running initialization */
  785. if (sclp_init_state != sclp_init_state_uninitialized) {
  786. spin_unlock_irqrestore(&sclp_lock, flags);
  787. return 0;
  788. }
  789. sclp_init_state = sclp_init_state_initializing;
  790. /* Set up variables */
  791. INIT_LIST_HEAD(&sclp_req_queue);
  792. INIT_LIST_HEAD(&sclp_reg_list);
  793. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  794. init_timer(&sclp_request_timer);
  795. /* Check interface */
  796. spin_unlock_irqrestore(&sclp_lock, flags);
  797. rc = sclp_check_interface();
  798. spin_lock_irqsave(&sclp_lock, flags);
  799. if (rc) {
  800. sclp_init_state = sclp_init_state_uninitialized;
  801. spin_unlock_irqrestore(&sclp_lock, flags);
  802. return rc;
  803. }
  804. /* Register reboot handler */
  805. rc = register_reboot_notifier(&sclp_reboot_notifier);
  806. if (rc) {
  807. sclp_init_state = sclp_init_state_uninitialized;
  808. spin_unlock_irqrestore(&sclp_lock, flags);
  809. return rc;
  810. }
  811. /* Register interrupt handler */
  812. rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
  813. &ext_int_info_hwc);
  814. if (rc) {
  815. unregister_reboot_notifier(&sclp_reboot_notifier);
  816. sclp_init_state = sclp_init_state_uninitialized;
  817. spin_unlock_irqrestore(&sclp_lock, flags);
  818. return rc;
  819. }
  820. sclp_init_state = sclp_init_state_initialized;
  821. spin_unlock_irqrestore(&sclp_lock, flags);
  822. /* Enable service-signal external interruption - needs to happen with
  823. * IRQs enabled. */
  824. ctl_set_bit(0, 9);
  825. sclp_init_mask(1);
  826. return 0;
  827. }