|
@@ -9,10 +9,22 @@
|
|
|
*/
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
+#include <linux/rbtree.h>
|
|
|
#include <linux/irq.h>
|
|
|
|
|
|
#include <asm/apic.h>
|
|
|
#include <asm/uv/uv_irq.h>
|
|
|
+#include <asm/uv/uv_hub.h>
|
|
|
+
|
|
|
+/* MMR offset and pnode of hub sourcing interrupts for a given irq */
|
|
|
+struct uv_irq_2_mmr_pnode{
|
|
|
+ struct rb_node list;
|
|
|
+ unsigned long offset;
|
|
|
+ int pnode;
|
|
|
+ int irq;
|
|
|
+};
|
|
|
+static spinlock_t uv_irq_lock;
|
|
|
+static struct rb_root uv_irq_root;
|
|
|
|
|
|
static void uv_noop(unsigned int irq)
|
|
|
{
|
|
@@ -39,25 +51,106 @@ struct irq_chip uv_irq_chip = {
|
|
|
.unmask = uv_noop,
|
|
|
.eoi = uv_ack_apic,
|
|
|
.end = uv_noop,
|
|
|
+ .set_affinity = uv_set_irq_affinity,
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * Add offset and pnode information of the hub sourcing interrupts to the
|
|
|
+ * rb tree for a specific irq.
|
|
|
+ */
|
|
|
+static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
|
|
|
+{
|
|
|
+ struct rb_node **link = &uv_irq_root.rb_node;
|
|
|
+ struct rb_node *parent = NULL;
|
|
|
+ struct uv_irq_2_mmr_pnode *n;
|
|
|
+ struct uv_irq_2_mmr_pnode *e;
|
|
|
+ unsigned long irqflags;
|
|
|
+
|
|
|
+ n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
|
|
|
+ uv_blade_to_memory_nid(blade));
|
|
|
+ if (!n)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ n->irq = irq;
|
|
|
+ n->offset = offset;
|
|
|
+ n->pnode = uv_blade_to_pnode(blade);
|
|
|
+ spin_lock_irqsave(&uv_irq_lock, irqflags);
|
|
|
+ /* Find the right place in the rbtree: */
|
|
|
+ while (*link) {
|
|
|
+ parent = *link;
|
|
|
+ e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
|
|
|
+
|
|
|
+ if (unlikely(irq == e->irq)) {
|
|
|
+ /* irq entry exists */
|
|
|
+ e->pnode = uv_blade_to_pnode(blade);
|
|
|
+ e->offset = offset;
|
|
|
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
|
|
|
+ kfree(n);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (irq < e->irq)
|
|
|
+ link = &(*link)->rb_left;
|
|
|
+ else
|
|
|
+ link = &(*link)->rb_right;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Insert the node into the rbtree. */
|
|
|
+ rb_link_node(&n->list, parent, link);
|
|
|
+ rb_insert_color(&n->list, &uv_irq_root);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* Retrieve offset and pnode information from the rb tree for a specific irq */
|
|
|
+int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
|
|
|
+{
|
|
|
+ struct uv_irq_2_mmr_pnode *e;
|
|
|
+ struct rb_node *n;
|
|
|
+ unsigned long irqflags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&uv_irq_lock, irqflags);
|
|
|
+ n = uv_irq_root.rb_node;
|
|
|
+ while (n) {
|
|
|
+ e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
|
|
|
+
|
|
|
+ if (e->irq == irq) {
|
|
|
+ *offset = e->offset;
|
|
|
+ *pnode = e->pnode;
|
|
|
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (irq < e->irq)
|
|
|
+ n = n->rb_left;
|
|
|
+ else
|
|
|
+ n = n->rb_right;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
|
|
|
+ return -1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Set up a mapping of an available irq and vector, and enable the specified
|
|
|
* MMR that defines the MSI that is to be sent to the specified CPU when an
|
|
|
* interrupt is raised.
|
|
|
*/
|
|
|
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
|
|
|
- unsigned long mmr_offset)
|
|
|
+ unsigned long mmr_offset, int restrict)
|
|
|
{
|
|
|
- int irq;
|
|
|
- int ret;
|
|
|
+ int irq, ret;
|
|
|
+
|
|
|
+ irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
|
|
|
|
|
|
- irq = create_irq();
|
|
|
if (irq <= 0)
|
|
|
return -EBUSY;
|
|
|
|
|
|
- ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset);
|
|
|
- if (ret != irq)
|
|
|
+ ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
|
|
|
+ restrict);
|
|
|
+ if (ret == irq)
|
|
|
+ uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
|
|
|
+ else
|
|
|
destroy_irq(irq);
|
|
|
|
|
|
return ret;
|
|
@@ -71,9 +164,28 @@ EXPORT_SYMBOL_GPL(uv_setup_irq);
|
|
|
*
|
|
|
* Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
|
|
|
*/
|
|
|
-void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset)
|
|
|
+void uv_teardown_irq(unsigned int irq)
|
|
|
{
|
|
|
- arch_disable_uv_irq(mmr_blade, mmr_offset);
|
|
|
+ struct uv_irq_2_mmr_pnode *e;
|
|
|
+ struct rb_node *n;
|
|
|
+ unsigned long irqflags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&uv_irq_lock, irqflags);
|
|
|
+ n = uv_irq_root.rb_node;
|
|
|
+ while (n) {
|
|
|
+ e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
|
|
|
+ if (e->irq == irq) {
|
|
|
+ arch_disable_uv_irq(e->pnode, e->offset);
|
|
|
+ rb_erase(n, &uv_irq_root);
|
|
|
+ kfree(e);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (irq < e->irq)
|
|
|
+ n = n->rb_left;
|
|
|
+ else
|
|
|
+ n = n->rb_right;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&uv_irq_lock, irqflags);
|
|
|
destroy_irq(irq);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(uv_teardown_irq);
|