uv_irq.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * SGI UV IRQ functions
  7. *
  8. * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/rbtree.h>
  12. #include <linux/irq.h>
  13. #include <asm/apic.h>
  14. #include <asm/uv/uv_irq.h>
  15. #include <asm/uv/uv_hub.h>
  16. /* MMR offset and pnode of hub sourcing interrupts for a given irq */
  17. struct uv_irq_2_mmr_pnode{
  18. struct rb_node list;
  19. unsigned long offset;
  20. int pnode;
  21. int irq;
  22. };
  23. static spinlock_t uv_irq_lock;
  24. static struct rb_root uv_irq_root;
  25. static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
  26. static void uv_noop(unsigned int irq)
  27. {
  28. }
  29. static unsigned int uv_noop_ret(unsigned int irq)
  30. {
  31. return 0;
  32. }
  33. static void uv_ack_apic(unsigned int irq)
  34. {
  35. ack_APIC_irq();
  36. }
  37. struct irq_chip uv_irq_chip = {
  38. .name = "UV-CORE",
  39. .startup = uv_noop_ret,
  40. .shutdown = uv_noop,
  41. .enable = uv_noop,
  42. .disable = uv_noop,
  43. .ack = uv_noop,
  44. .mask = uv_noop,
  45. .unmask = uv_noop,
  46. .eoi = uv_ack_apic,
  47. .end = uv_noop,
  48. .set_affinity = uv_set_irq_affinity,
  49. };
  50. /*
  51. * Add offset and pnode information of the hub sourcing interrupts to the
  52. * rb tree for a specific irq.
  53. */
  54. static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
  55. {
  56. struct rb_node **link = &uv_irq_root.rb_node;
  57. struct rb_node *parent = NULL;
  58. struct uv_irq_2_mmr_pnode *n;
  59. struct uv_irq_2_mmr_pnode *e;
  60. unsigned long irqflags;
  61. n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
  62. uv_blade_to_memory_nid(blade));
  63. if (!n)
  64. return -ENOMEM;
  65. n->irq = irq;
  66. n->offset = offset;
  67. n->pnode = uv_blade_to_pnode(blade);
  68. spin_lock_irqsave(&uv_irq_lock, irqflags);
  69. /* Find the right place in the rbtree: */
  70. while (*link) {
  71. parent = *link;
  72. e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
  73. if (unlikely(irq == e->irq)) {
  74. /* irq entry exists */
  75. e->pnode = uv_blade_to_pnode(blade);
  76. e->offset = offset;
  77. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  78. kfree(n);
  79. return 0;
  80. }
  81. if (irq < e->irq)
  82. link = &(*link)->rb_left;
  83. else
  84. link = &(*link)->rb_right;
  85. }
  86. /* Insert the node into the rbtree. */
  87. rb_link_node(&n->list, parent, link);
  88. rb_insert_color(&n->list, &uv_irq_root);
  89. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  90. return 0;
  91. }
  92. /* Retrieve offset and pnode information from the rb tree for a specific irq */
  93. int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
  94. {
  95. struct uv_irq_2_mmr_pnode *e;
  96. struct rb_node *n;
  97. unsigned long irqflags;
  98. spin_lock_irqsave(&uv_irq_lock, irqflags);
  99. n = uv_irq_root.rb_node;
  100. while (n) {
  101. e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
  102. if (e->irq == irq) {
  103. *offset = e->offset;
  104. *pnode = e->pnode;
  105. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  106. return 0;
  107. }
  108. if (irq < e->irq)
  109. n = n->rb_left;
  110. else
  111. n = n->rb_right;
  112. }
  113. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  114. return -1;
  115. }
  116. /*
  117. * Re-target the irq to the specified CPU and enable the specified MMR located
  118. * on the specified blade to allow the sending of MSIs to the specified CPU.
  119. */
  120. static int
  121. arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
  122. unsigned long mmr_offset, int restrict)
  123. {
  124. const struct cpumask *eligible_cpu = cpumask_of(cpu);
  125. struct irq_desc *desc = irq_to_desc(irq);
  126. struct irq_cfg *cfg;
  127. int mmr_pnode;
  128. unsigned long mmr_value;
  129. struct uv_IO_APIC_route_entry *entry;
  130. int err;
  131. BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
  132. sizeof(unsigned long));
  133. cfg = irq_cfg(irq);
  134. err = assign_irq_vector(irq, cfg, eligible_cpu);
  135. if (err != 0)
  136. return err;
  137. if (restrict == UV_AFFINITY_CPU)
  138. desc->status |= IRQ_NO_BALANCING;
  139. else
  140. desc->status |= IRQ_MOVE_PCNTXT;
  141. set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
  142. irq_name);
  143. mmr_value = 0;
  144. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  145. entry->vector = cfg->vector;
  146. entry->delivery_mode = apic->irq_delivery_mode;
  147. entry->dest_mode = apic->irq_dest_mode;
  148. entry->polarity = 0;
  149. entry->trigger = 0;
  150. entry->mask = 0;
  151. entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
  152. mmr_pnode = uv_blade_to_pnode(mmr_blade);
  153. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  154. if (cfg->move_in_progress)
  155. send_cleanup_vector(cfg);
  156. return irq;
  157. }
  158. /*
  159. * Disable the specified MMR located on the specified blade so that MSIs are
  160. * longer allowed to be sent.
  161. */
  162. static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
  163. {
  164. unsigned long mmr_value;
  165. struct uv_IO_APIC_route_entry *entry;
  166. BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
  167. sizeof(unsigned long));
  168. mmr_value = 0;
  169. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  170. entry->mask = 1;
  171. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  172. }
  173. static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
  174. {
  175. struct irq_desc *desc = irq_to_desc(irq);
  176. struct irq_cfg *cfg = desc->chip_data;
  177. unsigned int dest;
  178. unsigned long mmr_value;
  179. struct uv_IO_APIC_route_entry *entry;
  180. unsigned long mmr_offset;
  181. unsigned mmr_pnode;
  182. dest = set_desc_affinity(desc, mask);
  183. if (dest == BAD_APICID)
  184. return -1;
  185. mmr_value = 0;
  186. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  187. entry->vector = cfg->vector;
  188. entry->delivery_mode = apic->irq_delivery_mode;
  189. entry->dest_mode = apic->irq_dest_mode;
  190. entry->polarity = 0;
  191. entry->trigger = 0;
  192. entry->mask = 0;
  193. entry->dest = dest;
  194. /* Get previously stored MMR and pnode of hub sourcing interrupts */
  195. if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
  196. return -1;
  197. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  198. if (cfg->move_in_progress)
  199. send_cleanup_vector(cfg);
  200. return 0;
  201. }
  202. /*
  203. * Set up a mapping of an available irq and vector, and enable the specified
  204. * MMR that defines the MSI that is to be sent to the specified CPU when an
  205. * interrupt is raised.
  206. */
  207. int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
  208. unsigned long mmr_offset, int restrict)
  209. {
  210. int irq, ret;
  211. irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
  212. if (irq <= 0)
  213. return -EBUSY;
  214. ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
  215. restrict);
  216. if (ret == irq)
  217. uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
  218. else
  219. destroy_irq(irq);
  220. return ret;
  221. }
  222. EXPORT_SYMBOL_GPL(uv_setup_irq);
  223. /*
  224. * Tear down a mapping of an irq and vector, and disable the specified MMR that
  225. * defined the MSI that was to be sent to the specified CPU when an interrupt
  226. * was raised.
  227. *
  228. * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
  229. */
  230. void uv_teardown_irq(unsigned int irq)
  231. {
  232. struct uv_irq_2_mmr_pnode *e;
  233. struct rb_node *n;
  234. unsigned long irqflags;
  235. spin_lock_irqsave(&uv_irq_lock, irqflags);
  236. n = uv_irq_root.rb_node;
  237. while (n) {
  238. e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
  239. if (e->irq == irq) {
  240. arch_disable_uv_irq(e->pnode, e->offset);
  241. rb_erase(n, &uv_irq_root);
  242. kfree(e);
  243. break;
  244. }
  245. if (irq < e->irq)
  246. n = n->rb_left;
  247. else
  248. n = n->rb_right;
  249. }
  250. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  251. destroy_irq(irq);
  252. }
  253. EXPORT_SYMBOL_GPL(uv_teardown_irq);