qdio_thinint.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * linux/drivers/s390/cio/thinint_qdio.c
  3. *
  4. * thin interrupt support for qdio
  5. *
  6. * Copyright 2000-2008 IBM Corp.
  7. * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
  8. * Cornelia Huck <cornelia.huck@de.ibm.com>
  9. * Jan Glauber <jang@linux.vnet.ibm.com>
  10. */
  11. #include <linux/io.h>
  12. #include <asm/atomic.h>
  13. #include <asm/debug.h>
  14. #include <asm/qdio.h>
  15. #include <asm/airq.h>
  16. #include <asm/isc.h>
  17. #include "cio.h"
  18. #include "ioasm.h"
  19. #include "qdio.h"
  20. #include "qdio_debug.h"
  21. #include "qdio_perf.h"
  22. /*
  23. * Restriction: only 63 iqdio subchannels would have its own indicator,
  24. * after that, subsequent subchannels share one indicator
  25. */
  26. #define TIQDIO_NR_NONSHARED_IND 63
  27. #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
  28. #define TIQDIO_SHARED_IND 63
  29. /* list of thin interrupt input queues */
  30. static LIST_HEAD(tiq_list);
  31. DEFINE_MUTEX(tiq_list_lock);
  32. /* adapter local summary indicator */
  33. static unsigned char *tiqdio_alsi;
  34. /* device state change indicators */
  35. struct indicator_t {
  36. u32 ind; /* u32 because of compare-and-swap performance */
  37. atomic_t count; /* use count, 0 or 1 for non-shared indicators */
  38. };
  39. static struct indicator_t *q_indicators;
  40. static void tiqdio_tasklet_fn(unsigned long data);
  41. static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
  42. static int css_qdio_omit_svs;
  43. static inline unsigned long do_clear_global_summary(void)
  44. {
  45. register unsigned long __fn asm("1") = 3;
  46. register unsigned long __tmp asm("2");
  47. register unsigned long __time asm("3");
  48. asm volatile(
  49. " .insn rre,0xb2650000,2,0"
  50. : "+d" (__fn), "=d" (__tmp), "=d" (__time));
  51. return __time;
  52. }
  53. /* returns addr for the device state change indicator */
  54. static u32 *get_indicator(void)
  55. {
  56. int i;
  57. for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
  58. if (!atomic_read(&q_indicators[i].count)) {
  59. atomic_set(&q_indicators[i].count, 1);
  60. return &q_indicators[i].ind;
  61. }
  62. /* use the shared indicator */
  63. atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
  64. return &q_indicators[TIQDIO_SHARED_IND].ind;
  65. }
  66. static void put_indicator(u32 *addr)
  67. {
  68. int i;
  69. if (!addr)
  70. return;
  71. i = ((unsigned long)addr - (unsigned long)q_indicators) /
  72. sizeof(struct indicator_t);
  73. atomic_dec(&q_indicators[i].count);
  74. }
  75. void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
  76. {
  77. struct qdio_q *q;
  78. int i;
  79. /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
  80. if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
  81. css_qdio_omit_svs = 1;
  82. mutex_lock(&tiq_list_lock);
  83. for_each_input_queue(irq_ptr, q, i)
  84. list_add_rcu(&q->entry, &tiq_list);
  85. mutex_unlock(&tiq_list_lock);
  86. xchg(irq_ptr->dsci, 1);
  87. }
  88. /*
  89. * we cannot stop the tiqdio tasklet here since it is for all
  90. * thinint qdio devices and it must run as long as there is a
  91. * thinint device left
  92. */
  93. void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
  94. {
  95. struct qdio_q *q;
  96. int i;
  97. for (i = 0; i < irq_ptr->nr_input_qs; i++) {
  98. q = irq_ptr->input_qs[i];
  99. /* if establish triggered an error */
  100. if (!q || !q->entry.prev || !q->entry.next)
  101. continue;
  102. mutex_lock(&tiq_list_lock);
  103. list_del_rcu(&q->entry);
  104. mutex_unlock(&tiq_list_lock);
  105. synchronize_rcu();
  106. }
  107. }
  108. static inline int tiqdio_inbound_q_done(struct qdio_q *q)
  109. {
  110. unsigned char state = 0;
  111. if (!atomic_read(&q->nr_buf_used))
  112. return 1;
  113. qdio_siga_sync_q(q);
  114. get_buf_state(q, q->first_to_check, &state, 0);
  115. if (state == SLSB_P_INPUT_PRIMED)
  116. /* more work coming */
  117. return 0;
  118. return 1;
  119. }
  120. static inline int shared_ind(struct qdio_irq *irq_ptr)
  121. {
  122. return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
  123. }
  124. static void __tiqdio_inbound_processing(struct qdio_q *q)
  125. {
  126. qdio_perf_stat_inc(&perf_stats.thinint_inbound);
  127. qdio_sync_after_thinint(q);
  128. /*
  129. * Maybe we have work on our outbound queues... at least
  130. * we have to check the PCI capable queues.
  131. */
  132. qdio_check_outbound_after_thinint(q);
  133. if (!qdio_inbound_q_moved(q))
  134. return;
  135. qdio_kick_handler(q);
  136. if (!tiqdio_inbound_q_done(q)) {
  137. qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
  138. if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
  139. tasklet_schedule(&q->tasklet);
  140. }
  141. qdio_stop_polling(q);
  142. /*
  143. * We need to check again to not lose initiative after
  144. * resetting the ACK state.
  145. */
  146. if (!tiqdio_inbound_q_done(q)) {
  147. qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
  148. if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
  149. tasklet_schedule(&q->tasklet);
  150. }
  151. }
  152. void tiqdio_inbound_processing(unsigned long data)
  153. {
  154. struct qdio_q *q = (struct qdio_q *)data;
  155. __tiqdio_inbound_processing(q);
  156. }
  157. /* check for work on all inbound thinint queues */
  158. static void tiqdio_tasklet_fn(unsigned long data)
  159. {
  160. struct qdio_q *q;
  161. qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
  162. again:
  163. /* protect tiq_list entries, only changed in activate or shutdown */
  164. rcu_read_lock();
  165. list_for_each_entry_rcu(q, &tiq_list, entry)
  166. /* only process queues from changed sets */
  167. if (*q->irq_ptr->dsci) {
  168. /* only clear it if the indicator is non-shared */
  169. if (!shared_ind(q->irq_ptr))
  170. xchg(q->irq_ptr->dsci, 0);
  171. /*
  172. * don't call inbound processing directly since
  173. * that could starve other thinint queues
  174. */
  175. tasklet_schedule(&q->tasklet);
  176. }
  177. rcu_read_unlock();
  178. /*
  179. * if we used the shared indicator clear it now after all queues
  180. * were processed
  181. */
  182. if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
  183. xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
  184. /* prevent racing */
  185. if (*tiqdio_alsi)
  186. xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
  187. }
  188. /* check for more work */
  189. if (*tiqdio_alsi) {
  190. xchg(tiqdio_alsi, 0);
  191. qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
  192. goto again;
  193. }
  194. }
  195. /**
  196. * tiqdio_thinint_handler - thin interrupt handler for qdio
  197. * @ind: pointer to adapter local summary indicator
  198. * @drv_data: NULL
  199. */
  200. static void tiqdio_thinint_handler(void *ind, void *drv_data)
  201. {
  202. qdio_perf_stat_inc(&perf_stats.thin_int);
  203. /*
  204. * SVS only when needed: issue SVS to benefit from iqdio interrupt
  205. * avoidance (SVS clears adapter interrupt suppression overwrite)
  206. */
  207. if (!css_qdio_omit_svs)
  208. do_clear_global_summary();
  209. /*
  210. * reset local summary indicator (tiqdio_alsi) to stop adapter
  211. * interrupts for now, the tasklet will clean all dsci's
  212. */
  213. xchg((u8 *)ind, 0);
  214. tasklet_hi_schedule(&tiqdio_tasklet);
  215. }
  216. static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
  217. {
  218. struct scssc_area *scssc_area;
  219. int rc;
  220. scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
  221. memset(scssc_area, 0, PAGE_SIZE);
  222. if (reset) {
  223. scssc_area->summary_indicator_addr = 0;
  224. scssc_area->subchannel_indicator_addr = 0;
  225. } else {
  226. scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
  227. scssc_area->subchannel_indicator_addr =
  228. virt_to_phys(irq_ptr->dsci);
  229. }
  230. scssc_area->request = (struct chsc_header) {
  231. .length = 0x0fe0,
  232. .code = 0x0021,
  233. };
  234. scssc_area->operation_code = 0;
  235. scssc_area->ks = PAGE_DEFAULT_KEY;
  236. scssc_area->kc = PAGE_DEFAULT_KEY;
  237. scssc_area->isc = QDIO_AIRQ_ISC;
  238. scssc_area->schid = irq_ptr->schid;
  239. /* enable the time delay disablement facility */
  240. if (css_general_characteristics.aif_tdd)
  241. scssc_area->word_with_d_bit = 0x10000000;
  242. rc = chsc(scssc_area);
  243. if (rc)
  244. return -EIO;
  245. rc = chsc_error_from_response(scssc_area->response.code);
  246. if (rc) {
  247. DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
  248. scssc_area->response.code);
  249. DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
  250. return rc;
  251. }
  252. DBF_EVENT("setscind");
  253. DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
  254. DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long));
  255. return 0;
  256. }
  257. /* allocate non-shared indicators and shared indicator */
  258. int __init tiqdio_allocate_memory(void)
  259. {
  260. q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
  261. GFP_KERNEL);
  262. if (!q_indicators)
  263. return -ENOMEM;
  264. return 0;
  265. }
  266. void tiqdio_free_memory(void)
  267. {
  268. kfree(q_indicators);
  269. }
  270. int __init tiqdio_register_thinints(void)
  271. {
  272. isc_register(QDIO_AIRQ_ISC);
  273. tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
  274. NULL, QDIO_AIRQ_ISC);
  275. if (IS_ERR(tiqdio_alsi)) {
  276. DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
  277. tiqdio_alsi = NULL;
  278. isc_unregister(QDIO_AIRQ_ISC);
  279. return -ENOMEM;
  280. }
  281. return 0;
  282. }
  283. int qdio_establish_thinint(struct qdio_irq *irq_ptr)
  284. {
  285. if (!is_thinint_irq(irq_ptr))
  286. return 0;
  287. /* Check for aif time delay disablement. If installed,
  288. * omit SVS even under LPAR
  289. */
  290. if (css_general_characteristics.aif_tdd)
  291. css_qdio_omit_svs = 1;
  292. return set_subchannel_ind(irq_ptr, 0);
  293. }
  294. void qdio_setup_thinint(struct qdio_irq *irq_ptr)
  295. {
  296. if (!is_thinint_irq(irq_ptr))
  297. return;
  298. irq_ptr->dsci = get_indicator();
  299. DBF_HEX(&irq_ptr->dsci, sizeof(void *));
  300. }
  301. void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
  302. {
  303. if (!is_thinint_irq(irq_ptr))
  304. return;
  305. /* reset adapter interrupt indicators */
  306. put_indicator(irq_ptr->dsci);
  307. set_subchannel_ind(irq_ptr, 1);
  308. }
  309. void __exit tiqdio_unregister_thinints(void)
  310. {
  311. WARN_ON(!list_empty(&tiq_list));
  312. if (tiqdio_alsi) {
  313. s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
  314. isc_unregister(QDIO_AIRQ_ISC);
  315. }
  316. tasklet_kill(&tiqdio_tasklet);
  317. }