|
@@ -60,8 +60,6 @@
|
|
#include <asm/irq_remapping.h>
|
|
#include <asm/irq_remapping.h>
|
|
#include <asm/hpet.h>
|
|
#include <asm/hpet.h>
|
|
#include <asm/hw_irq.h>
|
|
#include <asm/hw_irq.h>
|
|
-#include <asm/uv/uv_hub.h>
|
|
|
|
-#include <asm/uv/uv_irq.h>
|
|
|
|
|
|
|
|
#include <asm/apic.h>
|
|
#include <asm/apic.h>
|
|
|
|
|
|
@@ -140,20 +138,6 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
|
|
return pin;
|
|
return pin;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * This is performance-critical, we want to do it O(1)
|
|
|
|
- *
|
|
|
|
- * Most irqs are mapped 1:1 with pins.
|
|
|
|
- */
|
|
|
|
-struct irq_cfg {
|
|
|
|
- struct irq_pin_list *irq_2_pin;
|
|
|
|
- cpumask_var_t domain;
|
|
|
|
- cpumask_var_t old_domain;
|
|
|
|
- unsigned move_cleanup_count;
|
|
|
|
- u8 vector;
|
|
|
|
- u8 move_in_progress : 1;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
|
|
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
static struct irq_cfg irq_cfgx[] = {
|
|
static struct irq_cfg irq_cfgx[] = {
|
|
@@ -209,7 +193,7 @@ int __init arch_early_irq_init(void)
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
-static struct irq_cfg *irq_cfg(unsigned int irq)
|
|
|
|
|
|
+struct irq_cfg *irq_cfg(unsigned int irq)
|
|
{
|
|
{
|
|
struct irq_cfg *cfg = NULL;
|
|
struct irq_cfg *cfg = NULL;
|
|
struct irq_desc *desc;
|
|
struct irq_desc *desc;
|
|
@@ -361,7 +345,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
|
|
/* end for move_irq_desc */
|
|
/* end for move_irq_desc */
|
|
|
|
|
|
#else
|
|
#else
|
|
-static struct irq_cfg *irq_cfg(unsigned int irq)
|
|
|
|
|
|
+struct irq_cfg *irq_cfg(unsigned int irq)
|
|
{
|
|
{
|
|
return irq < nr_irqs ? irq_cfgx + irq : NULL;
|
|
return irq < nr_irqs ? irq_cfgx + irq : NULL;
|
|
}
|
|
}
|
|
@@ -555,23 +539,41 @@ static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
|
|
add_pin_to_irq_node(cfg, node, newapic, newpin);
|
|
add_pin_to_irq_node(cfg, node, newapic, newpin);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __io_apic_modify_irq(struct irq_pin_list *entry,
|
|
|
|
+ int mask_and, int mask_or,
|
|
|
|
+ void (*final)(struct irq_pin_list *entry))
|
|
|
|
+{
|
|
|
|
+ unsigned int reg, pin;
|
|
|
|
+
|
|
|
|
+ pin = entry->pin;
|
|
|
|
+ reg = io_apic_read(entry->apic, 0x10 + pin * 2);
|
|
|
|
+ reg &= mask_and;
|
|
|
|
+ reg |= mask_or;
|
|
|
|
+ io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
|
|
|
|
+ if (final)
|
|
|
|
+ final(entry);
|
|
|
|
+}
|
|
|
|
+
|
|
static void io_apic_modify_irq(struct irq_cfg *cfg,
|
|
static void io_apic_modify_irq(struct irq_cfg *cfg,
|
|
int mask_and, int mask_or,
|
|
int mask_and, int mask_or,
|
|
void (*final)(struct irq_pin_list *entry))
|
|
void (*final)(struct irq_pin_list *entry))
|
|
{
|
|
{
|
|
- int pin;
|
|
|
|
struct irq_pin_list *entry;
|
|
struct irq_pin_list *entry;
|
|
|
|
|
|
- for_each_irq_pin(entry, cfg->irq_2_pin) {
|
|
|
|
- unsigned int reg;
|
|
|
|
- pin = entry->pin;
|
|
|
|
- reg = io_apic_read(entry->apic, 0x10 + pin * 2);
|
|
|
|
- reg &= mask_and;
|
|
|
|
- reg |= mask_or;
|
|
|
|
- io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
|
|
|
|
- if (final)
|
|
|
|
- final(entry);
|
|
|
|
- }
|
|
|
|
|
|
+ for_each_irq_pin(entry, cfg->irq_2_pin)
|
|
|
|
+ __io_apic_modify_irq(entry, mask_and, mask_or, final);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
|
|
|
|
+{
|
|
|
|
+ __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
|
|
|
|
+ IO_APIC_REDIR_MASKED, NULL);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
|
|
|
|
+{
|
|
|
|
+ __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
|
|
|
|
+ IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
|
|
static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
|
|
@@ -595,18 +597,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
|
|
io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
|
|
io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
|
|
}
|
|
}
|
|
|
|
|
|
-static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
|
|
|
|
-{
|
|
|
|
- io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
|
|
|
|
- IO_APIC_REDIR_MASKED, NULL);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
|
|
|
|
-{
|
|
|
|
- io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
|
|
|
|
- IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
|
|
static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
|
|
{
|
|
{
|
|
struct irq_cfg *cfg = desc->chip_data;
|
|
struct irq_cfg *cfg = desc->chip_data;
|
|
@@ -1177,7 +1167,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
|
int cpu, err;
|
|
int cpu, err;
|
|
cpumask_var_t tmp_mask;
|
|
cpumask_var_t tmp_mask;
|
|
|
|
|
|
- if ((cfg->move_in_progress) || cfg->move_cleanup_count)
|
|
|
|
|
|
+ if (cfg->move_in_progress)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
|
|
|
|
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
|
|
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
|
|
@@ -1237,8 +1227,7 @@ next:
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
|
|
|
|
|
+int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -1599,9 +1588,6 @@ __apicdebuginit(void) print_IO_APIC(void)
|
|
struct irq_desc *desc;
|
|
struct irq_desc *desc;
|
|
unsigned int irq;
|
|
unsigned int irq;
|
|
|
|
|
|
- if (apic_verbosity == APIC_QUIET)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
|
|
printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
|
|
for (i = 0; i < nr_ioapics; i++)
|
|
for (i = 0; i < nr_ioapics; i++)
|
|
printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
|
|
printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
|
|
@@ -1708,9 +1694,6 @@ __apicdebuginit(void) print_APIC_field(int base)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- if (apic_verbosity == APIC_QUIET)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
printk(KERN_DEBUG);
|
|
printk(KERN_DEBUG);
|
|
|
|
|
|
for (i = 0; i < 8; i++)
|
|
for (i = 0; i < 8; i++)
|
|
@@ -1724,9 +1707,6 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
|
|
unsigned int i, v, ver, maxlvt;
|
|
unsigned int i, v, ver, maxlvt;
|
|
u64 icr;
|
|
u64 icr;
|
|
|
|
|
|
- if (apic_verbosity == APIC_QUIET)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
|
|
printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
|
|
smp_processor_id(), hard_smp_processor_id());
|
|
smp_processor_id(), hard_smp_processor_id());
|
|
v = apic_read(APIC_ID);
|
|
v = apic_read(APIC_ID);
|
|
@@ -1824,13 +1804,19 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
|
|
printk("\n");
|
|
printk("\n");
|
|
}
|
|
}
|
|
|
|
|
|
-__apicdebuginit(void) print_all_local_APICs(void)
|
|
|
|
|
|
+__apicdebuginit(void) print_local_APICs(int maxcpu)
|
|
{
|
|
{
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
|
|
+ if (!maxcpu)
|
|
|
|
+ return;
|
|
|
|
+
|
|
preempt_disable();
|
|
preempt_disable();
|
|
- for_each_online_cpu(cpu)
|
|
|
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
|
+ if (cpu >= maxcpu)
|
|
|
|
+ break;
|
|
smp_call_function_single(cpu, print_local_APIC, NULL, 1);
|
|
smp_call_function_single(cpu, print_local_APIC, NULL, 1);
|
|
|
|
+ }
|
|
preempt_enable();
|
|
preempt_enable();
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1839,7 +1825,7 @@ __apicdebuginit(void) print_PIC(void)
|
|
unsigned int v;
|
|
unsigned int v;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- if (apic_verbosity == APIC_QUIET || !nr_legacy_irqs)
|
|
|
|
|
|
+ if (!nr_legacy_irqs)
|
|
return;
|
|
return;
|
|
|
|
|
|
printk(KERN_DEBUG "\nprinting PIC contents\n");
|
|
printk(KERN_DEBUG "\nprinting PIC contents\n");
|
|
@@ -1866,21 +1852,41 @@ __apicdebuginit(void) print_PIC(void)
|
|
printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
|
|
printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
|
|
}
|
|
}
|
|
|
|
|
|
-__apicdebuginit(int) print_all_ICs(void)
|
|
|
|
|
|
+static int __initdata show_lapic = 1;
|
|
|
|
+static __init int setup_show_lapic(char *arg)
|
|
{
|
|
{
|
|
|
|
+ int num = -1;
|
|
|
|
+
|
|
|
|
+ if (strcmp(arg, "all") == 0) {
|
|
|
|
+ show_lapic = CONFIG_NR_CPUS;
|
|
|
|
+ } else {
|
|
|
|
+ get_option(&arg, &num);
|
|
|
|
+ if (num >= 0)
|
|
|
|
+ show_lapic = num;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+__setup("show_lapic=", setup_show_lapic);
|
|
|
|
+
|
|
|
|
+__apicdebuginit(int) print_ICs(void)
|
|
|
|
+{
|
|
|
|
+ if (apic_verbosity == APIC_QUIET)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
print_PIC();
|
|
print_PIC();
|
|
|
|
|
|
/* don't print out if apic is not there */
|
|
/* don't print out if apic is not there */
|
|
if (!cpu_has_apic && !apic_from_smp_config())
|
|
if (!cpu_has_apic && !apic_from_smp_config())
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- print_all_local_APICs();
|
|
|
|
|
|
+ print_local_APICs(show_lapic);
|
|
print_IO_APIC();
|
|
print_IO_APIC();
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-fs_initcall(print_all_ICs);
|
|
|
|
|
|
+fs_initcall(print_ICs);
|
|
|
|
|
|
|
|
|
|
/* Where if anywhere is the i8259 connect in external int mode */
|
|
/* Where if anywhere is the i8259 connect in external int mode */
|
|
@@ -2031,7 +2037,7 @@ void __init setup_ioapic_ids_from_mpc(void)
|
|
* This is broken; anything with a real cpu count has to
|
|
* This is broken; anything with a real cpu count has to
|
|
* circumvent this idiocy regardless.
|
|
* circumvent this idiocy regardless.
|
|
*/
|
|
*/
|
|
- phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
|
|
|
|
|
|
+ apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Set the IOAPIC ID to the value stored in the MPC table.
|
|
* Set the IOAPIC ID to the value stored in the MPC table.
|
|
@@ -2058,7 +2064,7 @@ void __init setup_ioapic_ids_from_mpc(void)
|
|
* system must have a unique ID or we get lots of nice
|
|
* system must have a unique ID or we get lots of nice
|
|
* 'stuck on smp_invalidate_needed IPI wait' messages.
|
|
* 'stuck on smp_invalidate_needed IPI wait' messages.
|
|
*/
|
|
*/
|
|
- if (apic->check_apicid_used(phys_id_present_map,
|
|
|
|
|
|
+ if (apic->check_apicid_used(&phys_id_present_map,
|
|
mp_ioapics[apic_id].apicid)) {
|
|
mp_ioapics[apic_id].apicid)) {
|
|
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
|
|
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
|
|
apic_id, mp_ioapics[apic_id].apicid);
|
|
apic_id, mp_ioapics[apic_id].apicid);
|
|
@@ -2073,7 +2079,7 @@ void __init setup_ioapic_ids_from_mpc(void)
|
|
mp_ioapics[apic_id].apicid = i;
|
|
mp_ioapics[apic_id].apicid = i;
|
|
} else {
|
|
} else {
|
|
physid_mask_t tmp;
|
|
physid_mask_t tmp;
|
|
- tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
|
|
|
|
|
|
+ apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
|
|
apic_printk(APIC_VERBOSE, "Setting %d in the "
|
|
apic_printk(APIC_VERBOSE, "Setting %d in the "
|
|
"phys_id_present_map\n",
|
|
"phys_id_present_map\n",
|
|
mp_ioapics[apic_id].apicid);
|
|
mp_ioapics[apic_id].apicid);
|
|
@@ -2228,20 +2234,16 @@ static int ioapic_retrigger_irq(unsigned int irq)
|
|
*/
|
|
*/
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
-static void send_cleanup_vector(struct irq_cfg *cfg)
|
|
|
|
|
|
+void send_cleanup_vector(struct irq_cfg *cfg)
|
|
{
|
|
{
|
|
cpumask_var_t cleanup_mask;
|
|
cpumask_var_t cleanup_mask;
|
|
|
|
|
|
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
|
|
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
|
|
unsigned int i;
|
|
unsigned int i;
|
|
- cfg->move_cleanup_count = 0;
|
|
|
|
- for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
|
|
|
|
- cfg->move_cleanup_count++;
|
|
|
|
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
|
|
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
|
|
apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
|
|
apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
|
|
} else {
|
|
} else {
|
|
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
|
|
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
|
|
- cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
|
|
|
|
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
|
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
|
free_cpumask_var(cleanup_mask);
|
|
free_cpumask_var(cleanup_mask);
|
|
}
|
|
}
|
|
@@ -2272,15 +2274,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Either sets desc->affinity to a valid value, and returns
|
|
* Either sets desc->affinity to a valid value, and returns
|
|
* ->cpu_mask_to_apicid of that, or returns BAD_APICID and
|
|
* ->cpu_mask_to_apicid of that, or returns BAD_APICID and
|
|
* leaves desc->affinity untouched.
|
|
* leaves desc->affinity untouched.
|
|
*/
|
|
*/
|
|
-static unsigned int
|
|
|
|
|
|
+unsigned int
|
|
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
|
|
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
|
|
{
|
|
{
|
|
struct irq_cfg *cfg;
|
|
struct irq_cfg *cfg;
|
|
@@ -2433,8 +2432,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
|
|
|
|
|
cfg = irq_cfg(irq);
|
|
cfg = irq_cfg(irq);
|
|
spin_lock(&desc->lock);
|
|
spin_lock(&desc->lock);
|
|
- if (!cfg->move_cleanup_count)
|
|
|
|
- goto unlock;
|
|
|
|
|
|
|
|
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
|
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
|
goto unlock;
|
|
goto unlock;
|
|
@@ -2452,7 +2449,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
|
goto unlock;
|
|
goto unlock;
|
|
}
|
|
}
|
|
__get_cpu_var(vector_irq)[vector] = -1;
|
|
__get_cpu_var(vector_irq)[vector] = -1;
|
|
- cfg->move_cleanup_count--;
|
|
|
|
unlock:
|
|
unlock:
|
|
spin_unlock(&desc->lock);
|
|
spin_unlock(&desc->lock);
|
|
}
|
|
}
|
|
@@ -2460,21 +2456,33 @@ unlock:
|
|
irq_exit();
|
|
irq_exit();
|
|
}
|
|
}
|
|
|
|
|
|
-static void irq_complete_move(struct irq_desc **descp)
|
|
|
|
|
|
+static void __irq_complete_move(struct irq_desc **descp, unsigned vector)
|
|
{
|
|
{
|
|
struct irq_desc *desc = *descp;
|
|
struct irq_desc *desc = *descp;
|
|
struct irq_cfg *cfg = desc->chip_data;
|
|
struct irq_cfg *cfg = desc->chip_data;
|
|
- unsigned vector, me;
|
|
|
|
|
|
+ unsigned me;
|
|
|
|
|
|
if (likely(!cfg->move_in_progress))
|
|
if (likely(!cfg->move_in_progress))
|
|
return;
|
|
return;
|
|
|
|
|
|
- vector = ~get_irq_regs()->orig_ax;
|
|
|
|
me = smp_processor_id();
|
|
me = smp_processor_id();
|
|
|
|
|
|
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
|
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
|
send_cleanup_vector(cfg);
|
|
send_cleanup_vector(cfg);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static void irq_complete_move(struct irq_desc **descp)
|
|
|
|
+{
|
|
|
|
+ __irq_complete_move(descp, ~get_irq_regs()->orig_ax);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void irq_force_complete_move(int irq)
|
|
|
|
+{
|
|
|
|
+ struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
+ struct irq_cfg *cfg = desc->chip_data;
|
|
|
|
+
|
|
|
|
+ __irq_complete_move(&desc, cfg->vector);
|
|
|
|
+}
|
|
#else
|
|
#else
|
|
static inline void irq_complete_move(struct irq_desc **descp) {}
|
|
static inline void irq_complete_move(struct irq_desc **descp) {}
|
|
#endif
|
|
#endif
|
|
@@ -2490,6 +2498,59 @@ static void ack_apic_edge(unsigned int irq)
|
|
|
|
|
|
atomic_t irq_mis_count;
|
|
atomic_t irq_mis_count;
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * IO-APIC versions below 0x20 don't support EOI register.
|
|
|
|
+ * For the record, here is the information about various versions:
|
|
|
|
+ * 0Xh 82489DX
|
|
|
|
+ * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
|
|
|
|
+ * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
|
|
|
|
+ * 30h-FFh Reserved
|
|
|
|
+ *
|
|
|
|
+ * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
|
|
|
|
+ * version as 0x2. This is an error with documentation and these ICH chips
|
|
|
|
+ * use io-apic's of version 0x20.
|
|
|
|
+ *
|
|
|
|
+ * For IO-APIC's with EOI register, we use that to do an explicit EOI.
|
|
|
|
+ * Otherwise, we simulate the EOI message manually by changing the trigger
|
|
|
|
+ * mode to edge and then back to level, with RTE being masked during this.
|
|
|
|
+*/
|
|
|
|
+static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
|
|
|
|
+{
|
|
|
|
+ struct irq_pin_list *entry;
|
|
|
|
+
|
|
|
|
+ for_each_irq_pin(entry, cfg->irq_2_pin) {
|
|
|
|
+ if (mp_ioapics[entry->apic].apicver >= 0x20) {
|
|
|
|
+ /*
|
|
|
|
+ * Intr-remapping uses pin number as the virtual vector
|
|
|
|
+ * in the RTE. Actual vector is programmed in
|
|
|
|
+ * intr-remapping table entry. Hence for the io-apic
|
|
|
|
+ * EOI we use the pin number.
|
|
|
|
+ */
|
|
|
|
+ if (irq_remapped(irq))
|
|
|
|
+ io_apic_eoi(entry->apic, entry->pin);
|
|
|
|
+ else
|
|
|
|
+ io_apic_eoi(entry->apic, cfg->vector);
|
|
|
|
+ } else {
|
|
|
|
+ __mask_and_edge_IO_APIC_irq(entry);
|
|
|
|
+ __unmask_and_level_IO_APIC_irq(entry);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void eoi_ioapic_irq(struct irq_desc *desc)
|
|
|
|
+{
|
|
|
|
+ struct irq_cfg *cfg;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ unsigned int irq;
|
|
|
|
+
|
|
|
|
+ irq = desc->irq;
|
|
|
|
+ cfg = desc->chip_data;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ioapic_lock, flags);
|
|
|
|
+ __eoi_ioapic_irq(irq, cfg);
|
|
|
|
+ spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
static void ack_apic_level(unsigned int irq)
|
|
static void ack_apic_level(unsigned int irq)
|
|
{
|
|
{
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
@@ -2525,6 +2586,19 @@ static void ack_apic_level(unsigned int irq)
|
|
* level-triggered interrupt. We mask the source for the time of the
|
|
* level-triggered interrupt. We mask the source for the time of the
|
|
* operation to prevent an edge-triggered interrupt escaping meanwhile.
|
|
* operation to prevent an edge-triggered interrupt escaping meanwhile.
|
|
* The idea is from Manfred Spraul. --macro
|
|
* The idea is from Manfred Spraul. --macro
|
|
|
|
+ *
|
|
|
|
+ * Also in the case when cpu goes offline, fixup_irqs() will forward
|
|
|
|
+ * any unhandled interrupt on the offlined cpu to the new cpu
|
|
|
|
+ * destination that is handling the corresponding interrupt. This
|
|
|
|
+ * interrupt forwarding is done via IPI's. Hence, in this case also
|
|
|
|
+ * level-triggered io-apic interrupt will be seen as an edge
|
|
|
|
+ * interrupt in the IRR. And we can't rely on the cpu's EOI
|
|
|
|
+ * to be broadcasted to the IO-APIC's which will clear the remoteIRR
|
|
|
|
+ * corresponding to the level-triggered interrupt. Hence on IO-APIC's
|
|
|
|
+ * supporting EOI register, we do an explicit EOI to clear the
|
|
|
|
+ * remote IRR and on IO-APIC's which don't have an EOI register,
|
|
|
|
+ * we use the above logic (mask+edge followed by unmask+level) from
|
|
|
|
+ * Manfred Spraul to clear the remote IRR.
|
|
*/
|
|
*/
|
|
cfg = desc->chip_data;
|
|
cfg = desc->chip_data;
|
|
i = cfg->vector;
|
|
i = cfg->vector;
|
|
@@ -2536,6 +2610,19 @@ static void ack_apic_level(unsigned int irq)
|
|
*/
|
|
*/
|
|
ack_APIC_irq();
|
|
ack_APIC_irq();
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Tail end of clearing remote IRR bit (either by delivering the EOI
|
|
|
|
+ * message via io-apic EOI register write or simulating it using
|
|
|
|
+ * mask+edge followed by unnask+level logic) manually when the
|
|
|
|
+ * level triggered interrupt is seen as the edge triggered interrupt
|
|
|
|
+ * at the cpu.
|
|
|
|
+ */
|
|
|
|
+ if (!(v & (1 << (i & 0x1f)))) {
|
|
|
|
+ atomic_inc(&irq_mis_count);
|
|
|
|
+
|
|
|
|
+ eoi_ioapic_irq(desc);
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Now we can move and renable the irq */
|
|
/* Now we can move and renable the irq */
|
|
if (unlikely(do_unmask_irq)) {
|
|
if (unlikely(do_unmask_irq)) {
|
|
/* Only migrate the irq if the ack has been received.
|
|
/* Only migrate the irq if the ack has been received.
|
|
@@ -2569,41 +2656,9 @@ static void ack_apic_level(unsigned int irq)
|
|
move_masked_irq(irq);
|
|
move_masked_irq(irq);
|
|
unmask_IO_APIC_irq_desc(desc);
|
|
unmask_IO_APIC_irq_desc(desc);
|
|
}
|
|
}
|
|
-
|
|
|
|
- /* Tail end of version 0x11 I/O APIC bug workaround */
|
|
|
|
- if (!(v & (1 << (i & 0x1f)))) {
|
|
|
|
- atomic_inc(&irq_mis_count);
|
|
|
|
- spin_lock(&ioapic_lock);
|
|
|
|
- __mask_and_edge_IO_APIC_irq(cfg);
|
|
|
|
- __unmask_and_level_IO_APIC_irq(cfg);
|
|
|
|
- spin_unlock(&ioapic_lock);
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_INTR_REMAP
|
|
#ifdef CONFIG_INTR_REMAP
|
|
-static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
|
|
|
|
-{
|
|
|
|
- struct irq_pin_list *entry;
|
|
|
|
-
|
|
|
|
- for_each_irq_pin(entry, cfg->irq_2_pin)
|
|
|
|
- io_apic_eoi(entry->apic, entry->pin);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void
|
|
|
|
-eoi_ioapic_irq(struct irq_desc *desc)
|
|
|
|
-{
|
|
|
|
- struct irq_cfg *cfg;
|
|
|
|
- unsigned long flags;
|
|
|
|
- unsigned int irq;
|
|
|
|
-
|
|
|
|
- irq = desc->irq;
|
|
|
|
- cfg = desc->chip_data;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&ioapic_lock, flags);
|
|
|
|
- __eoi_ioapic_irq(irq, cfg);
|
|
|
|
- spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void ir_ack_apic_edge(unsigned int irq)
|
|
static void ir_ack_apic_edge(unsigned int irq)
|
|
{
|
|
{
|
|
ack_APIC_irq();
|
|
ack_APIC_irq();
|
|
@@ -3157,6 +3212,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
desc_new = move_irq_desc(desc_new, node);
|
|
desc_new = move_irq_desc(desc_new, node);
|
|
|
|
+ cfg_new = desc_new->chip_data;
|
|
|
|
|
|
if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
|
|
if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
|
|
irq = new;
|
|
irq = new;
|
|
@@ -3708,75 +3764,6 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
|
}
|
|
}
|
|
#endif /* CONFIG_HT_IRQ */
|
|
#endif /* CONFIG_HT_IRQ */
|
|
|
|
|
|
-#ifdef CONFIG_X86_UV
|
|
|
|
-/*
|
|
|
|
- * Re-target the irq to the specified CPU and enable the specified MMR located
|
|
|
|
- * on the specified blade to allow the sending of MSIs to the specified CPU.
|
|
|
|
- */
|
|
|
|
-int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
|
|
|
- unsigned long mmr_offset)
|
|
|
|
-{
|
|
|
|
- const struct cpumask *eligible_cpu = cpumask_of(cpu);
|
|
|
|
- struct irq_cfg *cfg;
|
|
|
|
- int mmr_pnode;
|
|
|
|
- unsigned long mmr_value;
|
|
|
|
- struct uv_IO_APIC_route_entry *entry;
|
|
|
|
- unsigned long flags;
|
|
|
|
- int err;
|
|
|
|
-
|
|
|
|
- BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
|
|
|
|
-
|
|
|
|
- cfg = irq_cfg(irq);
|
|
|
|
-
|
|
|
|
- err = assign_irq_vector(irq, cfg, eligible_cpu);
|
|
|
|
- if (err != 0)
|
|
|
|
- return err;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&vector_lock, flags);
|
|
|
|
- set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
|
|
|
|
- irq_name);
|
|
|
|
- spin_unlock_irqrestore(&vector_lock, flags);
|
|
|
|
-
|
|
|
|
- mmr_value = 0;
|
|
|
|
- entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
|
|
|
|
- entry->vector = cfg->vector;
|
|
|
|
- entry->delivery_mode = apic->irq_delivery_mode;
|
|
|
|
- entry->dest_mode = apic->irq_dest_mode;
|
|
|
|
- entry->polarity = 0;
|
|
|
|
- entry->trigger = 0;
|
|
|
|
- entry->mask = 0;
|
|
|
|
- entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
|
|
|
|
-
|
|
|
|
- mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
|
|
|
- uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
|
|
|
-
|
|
|
|
- if (cfg->move_in_progress)
|
|
|
|
- send_cleanup_vector(cfg);
|
|
|
|
-
|
|
|
|
- return irq;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Disable the specified MMR located on the specified blade so that MSIs are
|
|
|
|
- * longer allowed to be sent.
|
|
|
|
- */
|
|
|
|
-void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
|
|
|
|
-{
|
|
|
|
- unsigned long mmr_value;
|
|
|
|
- struct uv_IO_APIC_route_entry *entry;
|
|
|
|
- int mmr_pnode;
|
|
|
|
-
|
|
|
|
- BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
|
|
|
|
-
|
|
|
|
- mmr_value = 0;
|
|
|
|
- entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
|
|
|
|
- entry->mask = 1;
|
|
|
|
-
|
|
|
|
- mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
|
|
|
- uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
|
|
|
-}
|
|
|
|
-#endif /* CONFIG_X86_64 */
|
|
|
|
-
|
|
|
|
int __init io_apic_get_redir_entries (int ioapic)
|
|
int __init io_apic_get_redir_entries (int ioapic)
|
|
{
|
|
{
|
|
union IO_APIC_reg_01 reg_01;
|
|
union IO_APIC_reg_01 reg_01;
|
|
@@ -3944,7 +3931,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
|
*/
|
|
*/
|
|
|
|
|
|
if (physids_empty(apic_id_map))
|
|
if (physids_empty(apic_id_map))
|
|
- apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
|
|
|
|
|
|
+ apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
|
|
|
|
|
|
spin_lock_irqsave(&ioapic_lock, flags);
|
|
spin_lock_irqsave(&ioapic_lock, flags);
|
|
reg_00.raw = io_apic_read(ioapic, 0);
|
|
reg_00.raw = io_apic_read(ioapic, 0);
|
|
@@ -3960,10 +3947,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
|
* Every APIC in a system must have a unique ID or we get lots of nice
|
|
* Every APIC in a system must have a unique ID or we get lots of nice
|
|
* 'stuck on smp_invalidate_needed IPI wait' messages.
|
|
* 'stuck on smp_invalidate_needed IPI wait' messages.
|
|
*/
|
|
*/
|
|
- if (apic->check_apicid_used(apic_id_map, apic_id)) {
|
|
|
|
|
|
+ if (apic->check_apicid_used(&apic_id_map, apic_id)) {
|
|
|
|
|
|
for (i = 0; i < get_physical_broadcast(); i++) {
|
|
for (i = 0; i < get_physical_broadcast(); i++) {
|
|
- if (!apic->check_apicid_used(apic_id_map, i))
|
|
|
|
|
|
+ if (!apic->check_apicid_used(&apic_id_map, i))
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3976,7 +3963,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
|
apic_id = i;
|
|
apic_id = i;
|
|
}
|
|
}
|
|
|
|
|
|
- tmp = apic->apicid_to_cpu_present(apic_id);
|
|
|
|
|
|
+ apic->apicid_to_cpu_present(apic_id, &tmp);
|
|
physids_or(apic_id_map, apic_id_map, tmp);
|
|
physids_or(apic_id_map, apic_id_map, tmp);
|
|
|
|
|
|
if (reg_00.bits.ID != apic_id) {
|
|
if (reg_00.bits.ID != apic_id) {
|
|
@@ -4106,7 +4093,7 @@ static struct resource * __init ioapic_setup_resources(int nr_ioapics)
|
|
for (i = 0; i < nr_ioapics; i++) {
|
|
for (i = 0; i < nr_ioapics; i++) {
|
|
res[i].name = mem;
|
|
res[i].name = mem;
|
|
res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
|
res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
|
- sprintf(mem, "IOAPIC %u", i);
|
|
|
|
|
|
+ snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
|
|
mem += IOAPIC_RESOURCE_NAME_SIZE;
|
|
mem += IOAPIC_RESOURCE_NAME_SIZE;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4140,18 +4127,17 @@ void __init ioapic_init_mappings(void)
|
|
#ifdef CONFIG_X86_32
|
|
#ifdef CONFIG_X86_32
|
|
fake_ioapic_page:
|
|
fake_ioapic_page:
|
|
#endif
|
|
#endif
|
|
- ioapic_phys = (unsigned long)
|
|
|
|
- alloc_bootmem_pages(PAGE_SIZE);
|
|
|
|
|
|
+ ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
|
|
ioapic_phys = __pa(ioapic_phys);
|
|
ioapic_phys = __pa(ioapic_phys);
|
|
}
|
|
}
|
|
set_fixmap_nocache(idx, ioapic_phys);
|
|
set_fixmap_nocache(idx, ioapic_phys);
|
|
- apic_printk(APIC_VERBOSE,
|
|
|
|
- "mapped IOAPIC to %08lx (%08lx)\n",
|
|
|
|
- __fix_to_virt(idx), ioapic_phys);
|
|
|
|
|
|
+ apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
|
|
|
|
+ __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
|
|
|
|
+ ioapic_phys);
|
|
idx++;
|
|
idx++;
|
|
|
|
|
|
ioapic_res->start = ioapic_phys;
|
|
ioapic_res->start = ioapic_phys;
|
|
- ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
|
|
|
|
|
|
+ ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
|
|
ioapic_res++;
|
|
ioapic_res++;
|
|
}
|
|
}
|
|
}
|
|
}
|