sclp.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. /*
  2. * core function to access sclp interface
  3. *
  4. * Copyright IBM Corp. 1999, 2009
  5. *
  6. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  7. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  8. */
  9. #include <linux/kernel_stat.h>
  10. #include <linux/module.h>
  11. #include <linux/err.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/timer.h>
  15. #include <linux/reboot.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/init.h>
  18. #include <linux/suspend.h>
  19. #include <linux/completion.h>
  20. #include <linux/platform_device.h>
  21. #include <asm/types.h>
  22. #include <asm/irq.h>
  23. #include "sclp.h"
  24. #define SCLP_HEADER "sclp: "
  25. /* Lock to protect internal data consistency. */
  26. static DEFINE_SPINLOCK(sclp_lock);
  27. /* Mask of events that we can send to the sclp interface. */
  28. static sccb_mask_t sclp_receive_mask;
  29. /* Mask of events that we can receive from the sclp interface. */
  30. static sccb_mask_t sclp_send_mask;
  31. /* List of registered event listeners and senders. */
  32. static struct list_head sclp_reg_list;
  33. /* List of queued requests. */
  34. static struct list_head sclp_req_queue;
  35. /* Data for read and and init requests. */
  36. static struct sclp_req sclp_read_req;
  37. static struct sclp_req sclp_init_req;
  38. static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  39. static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  40. /* Suspend request */
  41. static DECLARE_COMPLETION(sclp_request_queue_flushed);
  42. static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
  43. {
  44. complete(&sclp_request_queue_flushed);
  45. }
  46. static struct sclp_req sclp_suspend_req;
  47. /* Timer for request retries. */
  48. static struct timer_list sclp_request_timer;
  49. /* Internal state: is the driver initialized? */
  50. static volatile enum sclp_init_state_t {
  51. sclp_init_state_uninitialized,
  52. sclp_init_state_initializing,
  53. sclp_init_state_initialized
  54. } sclp_init_state = sclp_init_state_uninitialized;
  55. /* Internal state: is a request active at the sclp? */
  56. static volatile enum sclp_running_state_t {
  57. sclp_running_state_idle,
  58. sclp_running_state_running,
  59. sclp_running_state_reset_pending
  60. } sclp_running_state = sclp_running_state_idle;
  61. /* Internal state: is a read request pending? */
  62. static volatile enum sclp_reading_state_t {
  63. sclp_reading_state_idle,
  64. sclp_reading_state_reading
  65. } sclp_reading_state = sclp_reading_state_idle;
  66. /* Internal state: is the driver currently serving requests? */
  67. static volatile enum sclp_activation_state_t {
  68. sclp_activation_state_active,
  69. sclp_activation_state_deactivating,
  70. sclp_activation_state_inactive,
  71. sclp_activation_state_activating
  72. } sclp_activation_state = sclp_activation_state_active;
  73. /* Internal state: is an init mask request pending? */
  74. static volatile enum sclp_mask_state_t {
  75. sclp_mask_state_idle,
  76. sclp_mask_state_initializing
  77. } sclp_mask_state = sclp_mask_state_idle;
  78. /* Internal state: is the driver suspended? */
  79. static enum sclp_suspend_state_t {
  80. sclp_suspend_state_running,
  81. sclp_suspend_state_suspended,
  82. } sclp_suspend_state = sclp_suspend_state_running;
  83. /* Maximum retry counts */
  84. #define SCLP_INIT_RETRY 3
  85. #define SCLP_MASK_RETRY 3
  86. /* Timeout intervals in seconds.*/
  87. #define SCLP_BUSY_INTERVAL 10
  88. #define SCLP_RETRY_INTERVAL 30
  89. static void sclp_process_queue(void);
  90. static void __sclp_make_read_req(void);
  91. static int sclp_init_mask(int calculate);
  92. static int sclp_init(void);
  93. /* Perform service call. Return 0 on success, non-zero otherwise. */
  94. int
  95. sclp_service_call(sclp_cmdw_t command, void *sccb)
  96. {
  97. int cc;
  98. asm volatile(
  99. " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
  100. " ipm %0\n"
  101. " srl %0,28"
  102. : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
  103. : "cc", "memory");
  104. if (cc == 3)
  105. return -EIO;
  106. if (cc == 2)
  107. return -EBUSY;
  108. return 0;
  109. }
  110. static void
  111. __sclp_queue_read_req(void)
  112. {
  113. if (sclp_reading_state == sclp_reading_state_idle) {
  114. sclp_reading_state = sclp_reading_state_reading;
  115. __sclp_make_read_req();
  116. /* Add request to head of queue */
  117. list_add(&sclp_read_req.list, &sclp_req_queue);
  118. }
  119. }
  120. /* Set up request retry timer. Called while sclp_lock is locked. */
  121. static inline void
  122. __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
  123. unsigned long data)
  124. {
  125. del_timer(&sclp_request_timer);
  126. sclp_request_timer.function = function;
  127. sclp_request_timer.data = data;
  128. sclp_request_timer.expires = jiffies + time;
  129. add_timer(&sclp_request_timer);
  130. }
  131. /* Request timeout handler. Restart the request queue. If DATA is non-zero,
  132. * force restart of running request. */
  133. static void
  134. sclp_request_timeout(unsigned long data)
  135. {
  136. unsigned long flags;
  137. spin_lock_irqsave(&sclp_lock, flags);
  138. if (data) {
  139. if (sclp_running_state == sclp_running_state_running) {
  140. /* Break running state and queue NOP read event request
  141. * to get a defined interface state. */
  142. __sclp_queue_read_req();
  143. sclp_running_state = sclp_running_state_idle;
  144. }
  145. } else {
  146. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  147. sclp_request_timeout, 0);
  148. }
  149. spin_unlock_irqrestore(&sclp_lock, flags);
  150. sclp_process_queue();
  151. }
  152. /* Try to start a request. Return zero if the request was successfully
  153. * started or if it will be started at a later time. Return non-zero otherwise.
  154. * Called while sclp_lock is locked. */
  155. static int
  156. __sclp_start_request(struct sclp_req *req)
  157. {
  158. int rc;
  159. if (sclp_running_state != sclp_running_state_idle)
  160. return 0;
  161. del_timer(&sclp_request_timer);
  162. rc = sclp_service_call(req->command, req->sccb);
  163. req->start_count++;
  164. if (rc == 0) {
  165. /* Successfully started request */
  166. req->status = SCLP_REQ_RUNNING;
  167. sclp_running_state = sclp_running_state_running;
  168. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  169. sclp_request_timeout, 1);
  170. return 0;
  171. } else if (rc == -EBUSY) {
  172. /* Try again later */
  173. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  174. sclp_request_timeout, 0);
  175. return 0;
  176. }
  177. /* Request failed */
  178. req->status = SCLP_REQ_FAILED;
  179. return rc;
  180. }
  181. /* Try to start queued requests. */
  182. static void
  183. sclp_process_queue(void)
  184. {
  185. struct sclp_req *req;
  186. int rc;
  187. unsigned long flags;
  188. spin_lock_irqsave(&sclp_lock, flags);
  189. if (sclp_running_state != sclp_running_state_idle) {
  190. spin_unlock_irqrestore(&sclp_lock, flags);
  191. return;
  192. }
  193. del_timer(&sclp_request_timer);
  194. while (!list_empty(&sclp_req_queue)) {
  195. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  196. if (!req->sccb)
  197. goto do_post;
  198. rc = __sclp_start_request(req);
  199. if (rc == 0)
  200. break;
  201. /* Request failed */
  202. if (req->start_count > 1) {
  203. /* Cannot abort already submitted request - could still
  204. * be active at the SCLP */
  205. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  206. sclp_request_timeout, 0);
  207. break;
  208. }
  209. do_post:
  210. /* Post-processing for aborted request */
  211. list_del(&req->list);
  212. if (req->callback) {
  213. spin_unlock_irqrestore(&sclp_lock, flags);
  214. req->callback(req, req->callback_data);
  215. spin_lock_irqsave(&sclp_lock, flags);
  216. }
  217. }
  218. spin_unlock_irqrestore(&sclp_lock, flags);
  219. }
  220. static int __sclp_can_add_request(struct sclp_req *req)
  221. {
  222. if (req == &sclp_suspend_req || req == &sclp_init_req)
  223. return 1;
  224. if (sclp_suspend_state != sclp_suspend_state_running)
  225. return 0;
  226. if (sclp_init_state != sclp_init_state_initialized)
  227. return 0;
  228. if (sclp_activation_state != sclp_activation_state_active)
  229. return 0;
  230. return 1;
  231. }
  232. /* Queue a new request. Return zero on success, non-zero otherwise. */
  233. int
  234. sclp_add_request(struct sclp_req *req)
  235. {
  236. unsigned long flags;
  237. int rc;
  238. spin_lock_irqsave(&sclp_lock, flags);
  239. if (!__sclp_can_add_request(req)) {
  240. spin_unlock_irqrestore(&sclp_lock, flags);
  241. return -EIO;
  242. }
  243. req->status = SCLP_REQ_QUEUED;
  244. req->start_count = 0;
  245. list_add_tail(&req->list, &sclp_req_queue);
  246. rc = 0;
  247. /* Start if request is first in list */
  248. if (sclp_running_state == sclp_running_state_idle &&
  249. req->list.prev == &sclp_req_queue) {
  250. if (!req->sccb) {
  251. list_del(&req->list);
  252. rc = -ENODATA;
  253. goto out;
  254. }
  255. rc = __sclp_start_request(req);
  256. if (rc)
  257. list_del(&req->list);
  258. }
  259. out:
  260. spin_unlock_irqrestore(&sclp_lock, flags);
  261. return rc;
  262. }
  263. EXPORT_SYMBOL(sclp_add_request);
  264. /* Dispatch events found in request buffer to registered listeners. Return 0
  265. * if all events were dispatched, non-zero otherwise. */
  266. static int
  267. sclp_dispatch_evbufs(struct sccb_header *sccb)
  268. {
  269. unsigned long flags;
  270. struct evbuf_header *evbuf;
  271. struct list_head *l;
  272. struct sclp_register *reg;
  273. int offset;
  274. int rc;
  275. spin_lock_irqsave(&sclp_lock, flags);
  276. rc = 0;
  277. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  278. offset += evbuf->length) {
  279. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  280. /* Check for malformed hardware response */
  281. if (evbuf->length == 0)
  282. break;
  283. /* Search for event handler */
  284. reg = NULL;
  285. list_for_each(l, &sclp_reg_list) {
  286. reg = list_entry(l, struct sclp_register, list);
  287. if (reg->receive_mask & (1 << (32 - evbuf->type)))
  288. break;
  289. else
  290. reg = NULL;
  291. }
  292. if (reg && reg->receiver_fn) {
  293. spin_unlock_irqrestore(&sclp_lock, flags);
  294. reg->receiver_fn(evbuf);
  295. spin_lock_irqsave(&sclp_lock, flags);
  296. } else if (reg == NULL)
  297. rc = -EOPNOTSUPP;
  298. }
  299. spin_unlock_irqrestore(&sclp_lock, flags);
  300. return rc;
  301. }
  302. /* Read event data request callback. */
  303. static void
  304. sclp_read_cb(struct sclp_req *req, void *data)
  305. {
  306. unsigned long flags;
  307. struct sccb_header *sccb;
  308. sccb = (struct sccb_header *) req->sccb;
  309. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  310. sccb->response_code == 0x220))
  311. sclp_dispatch_evbufs(sccb);
  312. spin_lock_irqsave(&sclp_lock, flags);
  313. sclp_reading_state = sclp_reading_state_idle;
  314. spin_unlock_irqrestore(&sclp_lock, flags);
  315. }
  316. /* Prepare read event data request. Called while sclp_lock is locked. */
  317. static void __sclp_make_read_req(void)
  318. {
  319. struct sccb_header *sccb;
  320. sccb = (struct sccb_header *) sclp_read_sccb;
  321. clear_page(sccb);
  322. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  323. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  324. sclp_read_req.status = SCLP_REQ_QUEUED;
  325. sclp_read_req.start_count = 0;
  326. sclp_read_req.callback = sclp_read_cb;
  327. sclp_read_req.sccb = sccb;
  328. sccb->length = PAGE_SIZE;
  329. sccb->function_code = 0;
  330. sccb->control_mask[2] = 0x80;
  331. }
  332. /* Search request list for request with matching sccb. Return request if found,
  333. * NULL otherwise. Called while sclp_lock is locked. */
  334. static inline struct sclp_req *
  335. __sclp_find_req(u32 sccb)
  336. {
  337. struct list_head *l;
  338. struct sclp_req *req;
  339. list_for_each(l, &sclp_req_queue) {
  340. req = list_entry(l, struct sclp_req, list);
  341. if (sccb == (u32) (addr_t) req->sccb)
  342. return req;
  343. }
  344. return NULL;
  345. }
  346. /* Handler for external interruption. Perform request post-processing.
  347. * Prepare read event data request if necessary. Start processing of next
  348. * request on queue. */
  349. static void sclp_interrupt_handler(struct ext_code ext_code,
  350. unsigned int param32, unsigned long param64)
  351. {
  352. struct sclp_req *req;
  353. u32 finished_sccb;
  354. u32 evbuf_pending;
  355. inc_irq_stat(IRQEXT_SCP);
  356. spin_lock(&sclp_lock);
  357. finished_sccb = param32 & 0xfffffff8;
  358. evbuf_pending = param32 & 0x3;
  359. if (finished_sccb) {
  360. del_timer(&sclp_request_timer);
  361. sclp_running_state = sclp_running_state_reset_pending;
  362. req = __sclp_find_req(finished_sccb);
  363. if (req) {
  364. /* Request post-processing */
  365. list_del(&req->list);
  366. req->status = SCLP_REQ_DONE;
  367. if (req->callback) {
  368. spin_unlock(&sclp_lock);
  369. req->callback(req, req->callback_data);
  370. spin_lock(&sclp_lock);
  371. }
  372. }
  373. sclp_running_state = sclp_running_state_idle;
  374. }
  375. if (evbuf_pending &&
  376. sclp_activation_state == sclp_activation_state_active)
  377. __sclp_queue_read_req();
  378. spin_unlock(&sclp_lock);
  379. sclp_process_queue();
  380. }
  381. /* Convert interval in jiffies to TOD ticks. */
  382. static inline u64
  383. sclp_tod_from_jiffies(unsigned long jiffies)
  384. {
  385. return (u64) (jiffies / HZ) << 32;
  386. }
  387. /* Wait until a currently running request finished. Note: while this function
  388. * is running, no timers are served on the calling CPU. */
  389. void
  390. sclp_sync_wait(void)
  391. {
  392. unsigned long long old_tick;
  393. unsigned long flags;
  394. unsigned long cr0, cr0_sync;
  395. u64 timeout;
  396. int irq_context;
  397. /* We'll be disabling timer interrupts, so we need a custom timeout
  398. * mechanism */
  399. timeout = 0;
  400. if (timer_pending(&sclp_request_timer)) {
  401. /* Get timeout TOD value */
  402. timeout = get_tod_clock() +
  403. sclp_tod_from_jiffies(sclp_request_timer.expires -
  404. jiffies);
  405. }
  406. local_irq_save(flags);
  407. /* Prevent bottom half from executing once we force interrupts open */
  408. irq_context = in_interrupt();
  409. if (!irq_context)
  410. local_bh_disable();
  411. /* Enable service-signal interruption, disable timer interrupts */
  412. old_tick = local_tick_disable();
  413. trace_hardirqs_on();
  414. __ctl_store(cr0, 0, 0);
  415. cr0_sync = cr0;
  416. cr0_sync &= 0xffff00a0;
  417. cr0_sync |= 0x00000200;
  418. __ctl_load(cr0_sync, 0, 0);
  419. __arch_local_irq_stosm(0x01);
  420. /* Loop until driver state indicates finished request */
  421. while (sclp_running_state != sclp_running_state_idle) {
  422. /* Check for expired request timer */
  423. if (timer_pending(&sclp_request_timer) &&
  424. get_tod_clock() > timeout &&
  425. del_timer(&sclp_request_timer))
  426. sclp_request_timer.function(sclp_request_timer.data);
  427. cpu_relax();
  428. }
  429. local_irq_disable();
  430. __ctl_load(cr0, 0, 0);
  431. if (!irq_context)
  432. _local_bh_enable();
  433. local_tick_enable(old_tick);
  434. local_irq_restore(flags);
  435. }
  436. EXPORT_SYMBOL(sclp_sync_wait);
  437. /* Dispatch changes in send and receive mask to registered listeners. */
  438. static void
  439. sclp_dispatch_state_change(void)
  440. {
  441. struct list_head *l;
  442. struct sclp_register *reg;
  443. unsigned long flags;
  444. sccb_mask_t receive_mask;
  445. sccb_mask_t send_mask;
  446. do {
  447. spin_lock_irqsave(&sclp_lock, flags);
  448. reg = NULL;
  449. list_for_each(l, &sclp_reg_list) {
  450. reg = list_entry(l, struct sclp_register, list);
  451. receive_mask = reg->send_mask & sclp_receive_mask;
  452. send_mask = reg->receive_mask & sclp_send_mask;
  453. if (reg->sclp_receive_mask != receive_mask ||
  454. reg->sclp_send_mask != send_mask) {
  455. reg->sclp_receive_mask = receive_mask;
  456. reg->sclp_send_mask = send_mask;
  457. break;
  458. } else
  459. reg = NULL;
  460. }
  461. spin_unlock_irqrestore(&sclp_lock, flags);
  462. if (reg && reg->state_change_fn)
  463. reg->state_change_fn(reg);
  464. } while (reg);
  465. }
  466. struct sclp_statechangebuf {
  467. struct evbuf_header header;
  468. u8 validity_sclp_active_facility_mask : 1;
  469. u8 validity_sclp_receive_mask : 1;
  470. u8 validity_sclp_send_mask : 1;
  471. u8 validity_read_data_function_mask : 1;
  472. u16 _zeros : 12;
  473. u16 mask_length;
  474. u64 sclp_active_facility_mask;
  475. sccb_mask_t sclp_receive_mask;
  476. sccb_mask_t sclp_send_mask;
  477. u32 read_data_function_mask;
  478. } __attribute__((packed));
  479. /* State change event callback. Inform listeners of changes. */
  480. static void
  481. sclp_state_change_cb(struct evbuf_header *evbuf)
  482. {
  483. unsigned long flags;
  484. struct sclp_statechangebuf *scbuf;
  485. scbuf = (struct sclp_statechangebuf *) evbuf;
  486. if (scbuf->mask_length != sizeof(sccb_mask_t))
  487. return;
  488. spin_lock_irqsave(&sclp_lock, flags);
  489. if (scbuf->validity_sclp_receive_mask)
  490. sclp_receive_mask = scbuf->sclp_receive_mask;
  491. if (scbuf->validity_sclp_send_mask)
  492. sclp_send_mask = scbuf->sclp_send_mask;
  493. spin_unlock_irqrestore(&sclp_lock, flags);
  494. if (scbuf->validity_sclp_active_facility_mask)
  495. sclp_facilities = scbuf->sclp_active_facility_mask;
  496. sclp_dispatch_state_change();
  497. }
  498. static struct sclp_register sclp_state_change_event = {
  499. .receive_mask = EVTYP_STATECHANGE_MASK,
  500. .receiver_fn = sclp_state_change_cb
  501. };
  502. /* Calculate receive and send mask of currently registered listeners.
  503. * Called while sclp_lock is locked. */
  504. static inline void
  505. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  506. {
  507. struct list_head *l;
  508. struct sclp_register *t;
  509. *receive_mask = 0;
  510. *send_mask = 0;
  511. list_for_each(l, &sclp_reg_list) {
  512. t = list_entry(l, struct sclp_register, list);
  513. *receive_mask |= t->receive_mask;
  514. *send_mask |= t->send_mask;
  515. }
  516. }
  517. /* Register event listener. Return 0 on success, non-zero otherwise. */
  518. int
  519. sclp_register(struct sclp_register *reg)
  520. {
  521. unsigned long flags;
  522. sccb_mask_t receive_mask;
  523. sccb_mask_t send_mask;
  524. int rc;
  525. rc = sclp_init();
  526. if (rc)
  527. return rc;
  528. spin_lock_irqsave(&sclp_lock, flags);
  529. /* Check event mask for collisions */
  530. __sclp_get_mask(&receive_mask, &send_mask);
  531. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  532. spin_unlock_irqrestore(&sclp_lock, flags);
  533. return -EBUSY;
  534. }
  535. /* Trigger initial state change callback */
  536. reg->sclp_receive_mask = 0;
  537. reg->sclp_send_mask = 0;
  538. reg->pm_event_posted = 0;
  539. list_add(&reg->list, &sclp_reg_list);
  540. spin_unlock_irqrestore(&sclp_lock, flags);
  541. rc = sclp_init_mask(1);
  542. if (rc) {
  543. spin_lock_irqsave(&sclp_lock, flags);
  544. list_del(&reg->list);
  545. spin_unlock_irqrestore(&sclp_lock, flags);
  546. }
  547. return rc;
  548. }
  549. EXPORT_SYMBOL(sclp_register);
  550. /* Unregister event listener. */
  551. void
  552. sclp_unregister(struct sclp_register *reg)
  553. {
  554. unsigned long flags;
  555. spin_lock_irqsave(&sclp_lock, flags);
  556. list_del(&reg->list);
  557. spin_unlock_irqrestore(&sclp_lock, flags);
  558. sclp_init_mask(1);
  559. }
  560. EXPORT_SYMBOL(sclp_unregister);
  561. /* Remove event buffers which are marked processed. Return the number of
  562. * remaining event buffers. */
  563. int
  564. sclp_remove_processed(struct sccb_header *sccb)
  565. {
  566. struct evbuf_header *evbuf;
  567. int unprocessed;
  568. u16 remaining;
  569. evbuf = (struct evbuf_header *) (sccb + 1);
  570. unprocessed = 0;
  571. remaining = sccb->length - sizeof(struct sccb_header);
  572. while (remaining > 0) {
  573. remaining -= evbuf->length;
  574. if (evbuf->flags & 0x80) {
  575. sccb->length -= evbuf->length;
  576. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  577. remaining);
  578. } else {
  579. unprocessed++;
  580. evbuf = (struct evbuf_header *)
  581. ((addr_t) evbuf + evbuf->length);
  582. }
  583. }
  584. return unprocessed;
  585. }
  586. EXPORT_SYMBOL(sclp_remove_processed);
  587. /* Prepare init mask request. Called while sclp_lock is locked. */
  588. static inline void
  589. __sclp_make_init_req(u32 receive_mask, u32 send_mask)
  590. {
  591. struct init_sccb *sccb;
  592. sccb = (struct init_sccb *) sclp_init_sccb;
  593. clear_page(sccb);
  594. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  595. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  596. sclp_init_req.status = SCLP_REQ_FILLED;
  597. sclp_init_req.start_count = 0;
  598. sclp_init_req.callback = NULL;
  599. sclp_init_req.callback_data = NULL;
  600. sclp_init_req.sccb = sccb;
  601. sccb->header.length = sizeof(struct init_sccb);
  602. sccb->mask_length = sizeof(sccb_mask_t);
  603. sccb->receive_mask = receive_mask;
  604. sccb->send_mask = send_mask;
  605. sccb->sclp_receive_mask = 0;
  606. sccb->sclp_send_mask = 0;
  607. }
  608. /* Start init mask request. If calculate is non-zero, calculate the mask as
  609. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  610. * success, non-zero otherwise. */
  611. static int
  612. sclp_init_mask(int calculate)
  613. {
  614. unsigned long flags;
  615. struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
  616. sccb_mask_t receive_mask;
  617. sccb_mask_t send_mask;
  618. int retry;
  619. int rc;
  620. unsigned long wait;
  621. spin_lock_irqsave(&sclp_lock, flags);
  622. /* Check if interface is in appropriate state */
  623. if (sclp_mask_state != sclp_mask_state_idle) {
  624. spin_unlock_irqrestore(&sclp_lock, flags);
  625. return -EBUSY;
  626. }
  627. if (sclp_activation_state == sclp_activation_state_inactive) {
  628. spin_unlock_irqrestore(&sclp_lock, flags);
  629. return -EINVAL;
  630. }
  631. sclp_mask_state = sclp_mask_state_initializing;
  632. /* Determine mask */
  633. if (calculate)
  634. __sclp_get_mask(&receive_mask, &send_mask);
  635. else {
  636. receive_mask = 0;
  637. send_mask = 0;
  638. }
  639. rc = -EIO;
  640. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  641. /* Prepare request */
  642. __sclp_make_init_req(receive_mask, send_mask);
  643. spin_unlock_irqrestore(&sclp_lock, flags);
  644. if (sclp_add_request(&sclp_init_req)) {
  645. /* Try again later */
  646. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  647. while (time_before(jiffies, wait))
  648. sclp_sync_wait();
  649. spin_lock_irqsave(&sclp_lock, flags);
  650. continue;
  651. }
  652. while (sclp_init_req.status != SCLP_REQ_DONE &&
  653. sclp_init_req.status != SCLP_REQ_FAILED)
  654. sclp_sync_wait();
  655. spin_lock_irqsave(&sclp_lock, flags);
  656. if (sclp_init_req.status == SCLP_REQ_DONE &&
  657. sccb->header.response_code == 0x20) {
  658. /* Successful request */
  659. if (calculate) {
  660. sclp_receive_mask = sccb->sclp_receive_mask;
  661. sclp_send_mask = sccb->sclp_send_mask;
  662. } else {
  663. sclp_receive_mask = 0;
  664. sclp_send_mask = 0;
  665. }
  666. spin_unlock_irqrestore(&sclp_lock, flags);
  667. sclp_dispatch_state_change();
  668. spin_lock_irqsave(&sclp_lock, flags);
  669. rc = 0;
  670. break;
  671. }
  672. }
  673. sclp_mask_state = sclp_mask_state_idle;
  674. spin_unlock_irqrestore(&sclp_lock, flags);
  675. return rc;
  676. }
  677. /* Deactivate SCLP interface. On success, new requests will be rejected,
  678. * events will no longer be dispatched. Return 0 on success, non-zero
  679. * otherwise. */
  680. int
  681. sclp_deactivate(void)
  682. {
  683. unsigned long flags;
  684. int rc;
  685. spin_lock_irqsave(&sclp_lock, flags);
  686. /* Deactivate can only be called when active */
  687. if (sclp_activation_state != sclp_activation_state_active) {
  688. spin_unlock_irqrestore(&sclp_lock, flags);
  689. return -EINVAL;
  690. }
  691. sclp_activation_state = sclp_activation_state_deactivating;
  692. spin_unlock_irqrestore(&sclp_lock, flags);
  693. rc = sclp_init_mask(0);
  694. spin_lock_irqsave(&sclp_lock, flags);
  695. if (rc == 0)
  696. sclp_activation_state = sclp_activation_state_inactive;
  697. else
  698. sclp_activation_state = sclp_activation_state_active;
  699. spin_unlock_irqrestore(&sclp_lock, flags);
  700. return rc;
  701. }
  702. EXPORT_SYMBOL(sclp_deactivate);
  703. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  704. * requests will be accepted, events will be dispatched again. Return 0 on
  705. * success, non-zero otherwise. */
  706. int
  707. sclp_reactivate(void)
  708. {
  709. unsigned long flags;
  710. int rc;
  711. spin_lock_irqsave(&sclp_lock, flags);
  712. /* Reactivate can only be called when inactive */
  713. if (sclp_activation_state != sclp_activation_state_inactive) {
  714. spin_unlock_irqrestore(&sclp_lock, flags);
  715. return -EINVAL;
  716. }
  717. sclp_activation_state = sclp_activation_state_activating;
  718. spin_unlock_irqrestore(&sclp_lock, flags);
  719. rc = sclp_init_mask(1);
  720. spin_lock_irqsave(&sclp_lock, flags);
  721. if (rc == 0)
  722. sclp_activation_state = sclp_activation_state_active;
  723. else
  724. sclp_activation_state = sclp_activation_state_inactive;
  725. spin_unlock_irqrestore(&sclp_lock, flags);
  726. return rc;
  727. }
  728. EXPORT_SYMBOL(sclp_reactivate);
  729. /* Handler for external interruption used during initialization. Modify
  730. * request state to done. */
  731. static void sclp_check_handler(struct ext_code ext_code,
  732. unsigned int param32, unsigned long param64)
  733. {
  734. u32 finished_sccb;
  735. inc_irq_stat(IRQEXT_SCP);
  736. finished_sccb = param32 & 0xfffffff8;
  737. /* Is this the interrupt we are waiting for? */
  738. if (finished_sccb == 0)
  739. return;
  740. if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
  741. panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
  742. finished_sccb);
  743. spin_lock(&sclp_lock);
  744. if (sclp_running_state == sclp_running_state_running) {
  745. sclp_init_req.status = SCLP_REQ_DONE;
  746. sclp_running_state = sclp_running_state_idle;
  747. }
  748. spin_unlock(&sclp_lock);
  749. }
  750. /* Initial init mask request timed out. Modify request state to failed. */
  751. static void
  752. sclp_check_timeout(unsigned long data)
  753. {
  754. unsigned long flags;
  755. spin_lock_irqsave(&sclp_lock, flags);
  756. if (sclp_running_state == sclp_running_state_running) {
  757. sclp_init_req.status = SCLP_REQ_FAILED;
  758. sclp_running_state = sclp_running_state_idle;
  759. }
  760. spin_unlock_irqrestore(&sclp_lock, flags);
  761. }
  762. /* Perform a check of the SCLP interface. Return zero if the interface is
  763. * available and there are no pending requests from a previous instance.
  764. * Return non-zero otherwise. */
  765. static int
  766. sclp_check_interface(void)
  767. {
  768. struct init_sccb *sccb;
  769. unsigned long flags;
  770. int retry;
  771. int rc;
  772. spin_lock_irqsave(&sclp_lock, flags);
  773. /* Prepare init mask command */
  774. rc = register_external_interrupt(0x2401, sclp_check_handler);
  775. if (rc) {
  776. spin_unlock_irqrestore(&sclp_lock, flags);
  777. return rc;
  778. }
  779. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  780. __sclp_make_init_req(0, 0);
  781. sccb = (struct init_sccb *) sclp_init_req.sccb;
  782. rc = sclp_service_call(sclp_init_req.command, sccb);
  783. if (rc == -EIO)
  784. break;
  785. sclp_init_req.status = SCLP_REQ_RUNNING;
  786. sclp_running_state = sclp_running_state_running;
  787. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  788. sclp_check_timeout, 0);
  789. spin_unlock_irqrestore(&sclp_lock, flags);
  790. /* Enable service-signal interruption - needs to happen
  791. * with IRQs enabled. */
  792. service_subclass_irq_register();
  793. /* Wait for signal from interrupt or timeout */
  794. sclp_sync_wait();
  795. /* Disable service-signal interruption - needs to happen
  796. * with IRQs enabled. */
  797. service_subclass_irq_unregister();
  798. spin_lock_irqsave(&sclp_lock, flags);
  799. del_timer(&sclp_request_timer);
  800. if (sclp_init_req.status == SCLP_REQ_DONE &&
  801. sccb->header.response_code == 0x20) {
  802. rc = 0;
  803. break;
  804. } else
  805. rc = -EBUSY;
  806. }
  807. unregister_external_interrupt(0x2401, sclp_check_handler);
  808. spin_unlock_irqrestore(&sclp_lock, flags);
  809. return rc;
  810. }
  811. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  812. * events from interfering with rebooted system. */
  813. static int
  814. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  815. {
  816. sclp_deactivate();
  817. return NOTIFY_DONE;
  818. }
  819. static struct notifier_block sclp_reboot_notifier = {
  820. .notifier_call = sclp_reboot_event
  821. };
  822. /*
  823. * Suspend/resume SCLP notifier implementation
  824. */
  825. static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
  826. {
  827. struct sclp_register *reg;
  828. unsigned long flags;
  829. if (!rollback) {
  830. spin_lock_irqsave(&sclp_lock, flags);
  831. list_for_each_entry(reg, &sclp_reg_list, list)
  832. reg->pm_event_posted = 0;
  833. spin_unlock_irqrestore(&sclp_lock, flags);
  834. }
  835. do {
  836. spin_lock_irqsave(&sclp_lock, flags);
  837. list_for_each_entry(reg, &sclp_reg_list, list) {
  838. if (rollback && reg->pm_event_posted)
  839. goto found;
  840. if (!rollback && !reg->pm_event_posted)
  841. goto found;
  842. }
  843. spin_unlock_irqrestore(&sclp_lock, flags);
  844. return;
  845. found:
  846. spin_unlock_irqrestore(&sclp_lock, flags);
  847. if (reg->pm_event_fn)
  848. reg->pm_event_fn(reg, sclp_pm_event);
  849. reg->pm_event_posted = rollback ? 0 : 1;
  850. } while (1);
  851. }
  852. /*
  853. * Susend/resume callbacks for platform device
  854. */
  855. static int sclp_freeze(struct device *dev)
  856. {
  857. unsigned long flags;
  858. int rc;
  859. sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
  860. spin_lock_irqsave(&sclp_lock, flags);
  861. sclp_suspend_state = sclp_suspend_state_suspended;
  862. spin_unlock_irqrestore(&sclp_lock, flags);
  863. /* Init supend data */
  864. memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
  865. sclp_suspend_req.callback = sclp_suspend_req_cb;
  866. sclp_suspend_req.status = SCLP_REQ_FILLED;
  867. init_completion(&sclp_request_queue_flushed);
  868. rc = sclp_add_request(&sclp_suspend_req);
  869. if (rc == 0)
  870. wait_for_completion(&sclp_request_queue_flushed);
  871. else if (rc != -ENODATA)
  872. goto fail_thaw;
  873. rc = sclp_deactivate();
  874. if (rc)
  875. goto fail_thaw;
  876. return 0;
  877. fail_thaw:
  878. spin_lock_irqsave(&sclp_lock, flags);
  879. sclp_suspend_state = sclp_suspend_state_running;
  880. spin_unlock_irqrestore(&sclp_lock, flags);
  881. sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
  882. return rc;
  883. }
  884. static int sclp_undo_suspend(enum sclp_pm_event event)
  885. {
  886. unsigned long flags;
  887. int rc;
  888. rc = sclp_reactivate();
  889. if (rc)
  890. return rc;
  891. spin_lock_irqsave(&sclp_lock, flags);
  892. sclp_suspend_state = sclp_suspend_state_running;
  893. spin_unlock_irqrestore(&sclp_lock, flags);
  894. sclp_pm_event(event, 0);
  895. return 0;
  896. }
  897. static int sclp_thaw(struct device *dev)
  898. {
  899. return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  900. }
  901. static int sclp_restore(struct device *dev)
  902. {
  903. return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
  904. }
  905. static const struct dev_pm_ops sclp_pm_ops = {
  906. .freeze = sclp_freeze,
  907. .thaw = sclp_thaw,
  908. .restore = sclp_restore,
  909. };
  910. static struct platform_driver sclp_pdrv = {
  911. .driver = {
  912. .name = "sclp",
  913. .owner = THIS_MODULE,
  914. .pm = &sclp_pm_ops,
  915. },
  916. };
  917. static struct platform_device *sclp_pdev;
  918. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  919. * otherwise. */
  920. static int
  921. sclp_init(void)
  922. {
  923. unsigned long flags;
  924. int rc = 0;
  925. spin_lock_irqsave(&sclp_lock, flags);
  926. /* Check for previous or running initialization */
  927. if (sclp_init_state != sclp_init_state_uninitialized)
  928. goto fail_unlock;
  929. sclp_init_state = sclp_init_state_initializing;
  930. /* Set up variables */
  931. INIT_LIST_HEAD(&sclp_req_queue);
  932. INIT_LIST_HEAD(&sclp_reg_list);
  933. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  934. init_timer(&sclp_request_timer);
  935. /* Check interface */
  936. spin_unlock_irqrestore(&sclp_lock, flags);
  937. rc = sclp_check_interface();
  938. spin_lock_irqsave(&sclp_lock, flags);
  939. if (rc)
  940. goto fail_init_state_uninitialized;
  941. /* Register reboot handler */
  942. rc = register_reboot_notifier(&sclp_reboot_notifier);
  943. if (rc)
  944. goto fail_init_state_uninitialized;
  945. /* Register interrupt handler */
  946. rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
  947. if (rc)
  948. goto fail_unregister_reboot_notifier;
  949. sclp_init_state = sclp_init_state_initialized;
  950. spin_unlock_irqrestore(&sclp_lock, flags);
  951. /* Enable service-signal external interruption - needs to happen with
  952. * IRQs enabled. */
  953. service_subclass_irq_register();
  954. sclp_init_mask(1);
  955. return 0;
  956. fail_unregister_reboot_notifier:
  957. unregister_reboot_notifier(&sclp_reboot_notifier);
  958. fail_init_state_uninitialized:
  959. sclp_init_state = sclp_init_state_uninitialized;
  960. fail_unlock:
  961. spin_unlock_irqrestore(&sclp_lock, flags);
  962. return rc;
  963. }
  964. /*
  965. * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
  966. * to print the panic message.
  967. */
  968. static int sclp_panic_notify(struct notifier_block *self,
  969. unsigned long event, void *data)
  970. {
  971. if (sclp_suspend_state == sclp_suspend_state_suspended)
  972. sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  973. return NOTIFY_OK;
  974. }
  975. static struct notifier_block sclp_on_panic_nb = {
  976. .notifier_call = sclp_panic_notify,
  977. .priority = SCLP_PANIC_PRIO,
  978. };
  979. static __init int sclp_initcall(void)
  980. {
  981. int rc;
  982. rc = platform_driver_register(&sclp_pdrv);
  983. if (rc)
  984. return rc;
  985. sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
  986. rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
  987. if (rc)
  988. goto fail_platform_driver_unregister;
  989. rc = atomic_notifier_chain_register(&panic_notifier_list,
  990. &sclp_on_panic_nb);
  991. if (rc)
  992. goto fail_platform_device_unregister;
  993. return sclp_init();
  994. fail_platform_device_unregister:
  995. platform_device_unregister(sclp_pdev);
  996. fail_platform_driver_unregister:
  997. platform_driver_unregister(&sclp_pdrv);
  998. return rc;
  999. }
  1000. arch_initcall(sclp_initcall);