uv_irq.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * SGI UV IRQ functions
  7. *
  8. * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/rbtree.h>
  12. #include <linux/slab.h>
  13. #include <linux/irq.h>
  14. #include <asm/apic.h>
  15. #include <asm/uv/uv_irq.h>
  16. #include <asm/uv/uv_hub.h>
  17. /* MMR offset and pnode of hub sourcing interrupts for a given irq */
  18. struct uv_irq_2_mmr_pnode{
  19. struct rb_node list;
  20. unsigned long offset;
  21. int pnode;
  22. int irq;
  23. };
  24. static spinlock_t uv_irq_lock;
  25. static struct rb_root uv_irq_root;
  26. static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
  27. static void uv_noop(unsigned int irq)
  28. {
  29. }
  30. static unsigned int uv_noop_ret(unsigned int irq)
  31. {
  32. return 0;
  33. }
  34. static void uv_ack_apic(unsigned int irq)
  35. {
  36. ack_APIC_irq();
  37. }
  38. static struct irq_chip uv_irq_chip = {
  39. .name = "UV-CORE",
  40. .startup = uv_noop_ret,
  41. .shutdown = uv_noop,
  42. .enable = uv_noop,
  43. .disable = uv_noop,
  44. .ack = uv_noop,
  45. .mask = uv_noop,
  46. .unmask = uv_noop,
  47. .eoi = uv_ack_apic,
  48. .end = uv_noop,
  49. .set_affinity = uv_set_irq_affinity,
  50. };
  51. /*
  52. * Add offset and pnode information of the hub sourcing interrupts to the
  53. * rb tree for a specific irq.
  54. */
  55. static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
  56. {
  57. struct rb_node **link = &uv_irq_root.rb_node;
  58. struct rb_node *parent = NULL;
  59. struct uv_irq_2_mmr_pnode *n;
  60. struct uv_irq_2_mmr_pnode *e;
  61. unsigned long irqflags;
  62. n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
  63. uv_blade_to_memory_nid(blade));
  64. if (!n)
  65. return -ENOMEM;
  66. n->irq = irq;
  67. n->offset = offset;
  68. n->pnode = uv_blade_to_pnode(blade);
  69. spin_lock_irqsave(&uv_irq_lock, irqflags);
  70. /* Find the right place in the rbtree: */
  71. while (*link) {
  72. parent = *link;
  73. e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
  74. if (unlikely(irq == e->irq)) {
  75. /* irq entry exists */
  76. e->pnode = uv_blade_to_pnode(blade);
  77. e->offset = offset;
  78. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  79. kfree(n);
  80. return 0;
  81. }
  82. if (irq < e->irq)
  83. link = &(*link)->rb_left;
  84. else
  85. link = &(*link)->rb_right;
  86. }
  87. /* Insert the node into the rbtree. */
  88. rb_link_node(&n->list, parent, link);
  89. rb_insert_color(&n->list, &uv_irq_root);
  90. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  91. return 0;
  92. }
  93. /* Retrieve offset and pnode information from the rb tree for a specific irq */
  94. int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
  95. {
  96. struct uv_irq_2_mmr_pnode *e;
  97. struct rb_node *n;
  98. unsigned long irqflags;
  99. spin_lock_irqsave(&uv_irq_lock, irqflags);
  100. n = uv_irq_root.rb_node;
  101. while (n) {
  102. e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
  103. if (e->irq == irq) {
  104. *offset = e->offset;
  105. *pnode = e->pnode;
  106. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  107. return 0;
  108. }
  109. if (irq < e->irq)
  110. n = n->rb_left;
  111. else
  112. n = n->rb_right;
  113. }
  114. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  115. return -1;
  116. }
  117. /*
  118. * Re-target the irq to the specified CPU and enable the specified MMR located
  119. * on the specified blade to allow the sending of MSIs to the specified CPU.
  120. */
  121. static int
  122. arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
  123. unsigned long mmr_offset, int limit)
  124. {
  125. const struct cpumask *eligible_cpu = cpumask_of(cpu);
  126. struct irq_desc *desc = irq_to_desc(irq);
  127. struct irq_cfg *cfg;
  128. int mmr_pnode;
  129. unsigned long mmr_value;
  130. struct uv_IO_APIC_route_entry *entry;
  131. int err;
  132. BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
  133. sizeof(unsigned long));
  134. cfg = irq_cfg(irq);
  135. err = assign_irq_vector(irq, cfg, eligible_cpu);
  136. if (err != 0)
  137. return err;
  138. if (limit == UV_AFFINITY_CPU)
  139. desc->status |= IRQ_NO_BALANCING;
  140. else
  141. desc->status |= IRQ_MOVE_PCNTXT;
  142. set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
  143. irq_name);
  144. mmr_value = 0;
  145. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  146. entry->vector = cfg->vector;
  147. entry->delivery_mode = apic->irq_delivery_mode;
  148. entry->dest_mode = apic->irq_dest_mode;
  149. entry->polarity = 0;
  150. entry->trigger = 0;
  151. entry->mask = 0;
  152. entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
  153. mmr_pnode = uv_blade_to_pnode(mmr_blade);
  154. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  155. if (cfg->move_in_progress)
  156. send_cleanup_vector(cfg);
  157. return irq;
  158. }
  159. /*
  160. * Disable the specified MMR located on the specified blade so that MSIs are
  161. * longer allowed to be sent.
  162. */
  163. static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
  164. {
  165. unsigned long mmr_value;
  166. struct uv_IO_APIC_route_entry *entry;
  167. BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
  168. sizeof(unsigned long));
  169. mmr_value = 0;
  170. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  171. entry->mask = 1;
  172. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  173. }
  174. static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
  175. {
  176. struct irq_desc *desc = irq_to_desc(irq);
  177. struct irq_cfg *cfg = desc->chip_data;
  178. unsigned int dest;
  179. unsigned long mmr_value;
  180. struct uv_IO_APIC_route_entry *entry;
  181. unsigned long mmr_offset;
  182. int mmr_pnode;
  183. if (set_desc_affinity(desc, mask, &dest))
  184. return -1;
  185. mmr_value = 0;
  186. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  187. entry->vector = cfg->vector;
  188. entry->delivery_mode = apic->irq_delivery_mode;
  189. entry->dest_mode = apic->irq_dest_mode;
  190. entry->polarity = 0;
  191. entry->trigger = 0;
  192. entry->mask = 0;
  193. entry->dest = dest;
  194. /* Get previously stored MMR and pnode of hub sourcing interrupts */
  195. if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
  196. return -1;
  197. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  198. if (cfg->move_in_progress)
  199. send_cleanup_vector(cfg);
  200. return 0;
  201. }
  202. /*
  203. * Set up a mapping of an available irq and vector, and enable the specified
  204. * MMR that defines the MSI that is to be sent to the specified CPU when an
  205. * interrupt is raised.
  206. */
  207. int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
  208. unsigned long mmr_offset, int limit)
  209. {
  210. int irq, ret;
  211. irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
  212. if (irq <= 0)
  213. return -EBUSY;
  214. ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
  215. limit);
  216. if (ret == irq)
  217. uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
  218. else
  219. destroy_irq(irq);
  220. return ret;
  221. }
  222. EXPORT_SYMBOL_GPL(uv_setup_irq);
  223. /*
  224. * Tear down a mapping of an irq and vector, and disable the specified MMR that
  225. * defined the MSI that was to be sent to the specified CPU when an interrupt
  226. * was raised.
  227. *
  228. * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
  229. */
  230. void uv_teardown_irq(unsigned int irq)
  231. {
  232. struct uv_irq_2_mmr_pnode *e;
  233. struct rb_node *n;
  234. unsigned long irqflags;
  235. spin_lock_irqsave(&uv_irq_lock, irqflags);
  236. n = uv_irq_root.rb_node;
  237. while (n) {
  238. e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
  239. if (e->irq == irq) {
  240. arch_disable_uv_irq(e->pnode, e->offset);
  241. rb_erase(n, &uv_irq_root);
  242. kfree(e);
  243. break;
  244. }
  245. if (irq < e->irq)
  246. n = n->rb_left;
  247. else
  248. n = n->rb_right;
  249. }
  250. spin_unlock_irqrestore(&uv_irq_lock, irqflags);
  251. destroy_irq(irq);
  252. }
  253. EXPORT_SYMBOL_GPL(uv_teardown_irq);