|
@@ -76,7 +76,6 @@ EXPORT_SYMBOL(acpi_in_debugger);
|
|
|
extern char line_buf[80];
|
|
|
#endif /*ENABLE_DEBUGGER */
|
|
|
|
|
|
-static unsigned int acpi_irq_irq;
|
|
|
static acpi_osd_handler acpi_irq_handler;
|
|
|
static void *acpi_irq_context;
|
|
|
static struct workqueue_struct *kacpid_wq;
|
|
@@ -105,11 +104,11 @@ struct acpi_ioremap {
|
|
|
void __iomem *virt;
|
|
|
acpi_physical_address phys;
|
|
|
acpi_size size;
|
|
|
- struct kref ref;
|
|
|
+ unsigned long refcount;
|
|
|
};
|
|
|
|
|
|
static LIST_HEAD(acpi_ioremaps);
|
|
|
-static DEFINE_SPINLOCK(acpi_ioremap_lock);
|
|
|
+static DEFINE_MUTEX(acpi_ioremap_lock);
|
|
|
|
|
|
static void __init acpi_osi_setup_late(void);
|
|
|
|
|
@@ -285,6 +284,22 @@ acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
|
|
|
+{
|
|
|
+ struct acpi_ioremap *map;
|
|
|
+ void __iomem *virt = NULL;
|
|
|
+
|
|
|
+ mutex_lock(&acpi_ioremap_lock);
|
|
|
+ map = acpi_map_lookup(phys, size);
|
|
|
+ if (map) {
|
|
|
+ virt = map->virt + (phys - map->phys);
|
|
|
+ map->refcount++;
|
|
|
+ }
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
+ return virt;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
|
|
|
+
|
|
|
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
|
|
|
static struct acpi_ioremap *
|
|
|
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
|
@@ -302,8 +317,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
|
|
void __iomem *__init_refok
|
|
|
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
|
|
{
|
|
|
- struct acpi_ioremap *map, *tmp_map;
|
|
|
- unsigned long flags;
|
|
|
+ struct acpi_ioremap *map;
|
|
|
void __iomem *virt;
|
|
|
acpi_physical_address pg_off;
|
|
|
acpi_size pg_sz;
|
|
@@ -316,14 +330,25 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
|
|
if (!acpi_gbl_permanent_mmap)
|
|
|
return __acpi_map_table((unsigned long)phys, size);
|
|
|
|
|
|
+ mutex_lock(&acpi_ioremap_lock);
|
|
|
+ /* Check if there's a suitable mapping already. */
|
|
|
+ map = acpi_map_lookup(phys, size);
|
|
|
+ if (map) {
|
|
|
+ map->refcount++;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
|
|
- if (!map)
|
|
|
+ if (!map) {
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
return NULL;
|
|
|
+ }
|
|
|
|
|
|
pg_off = round_down(phys, PAGE_SIZE);
|
|
|
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
|
|
|
virt = acpi_os_ioremap(pg_off, pg_sz);
|
|
|
if (!virt) {
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
kfree(map);
|
|
|
return NULL;
|
|
|
}
|
|
@@ -332,62 +357,51 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
|
|
map->virt = virt;
|
|
|
map->phys = pg_off;
|
|
|
map->size = pg_sz;
|
|
|
- kref_init(&map->ref);
|
|
|
-
|
|
|
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
|
|
|
- /* Check if page has already been mapped. */
|
|
|
- tmp_map = acpi_map_lookup(phys, size);
|
|
|
- if (tmp_map) {
|
|
|
- kref_get(&tmp_map->ref);
|
|
|
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
|
|
- iounmap(map->virt);
|
|
|
- kfree(map);
|
|
|
- return tmp_map->virt + (phys - tmp_map->phys);
|
|
|
- }
|
|
|
+ map->refcount = 1;
|
|
|
+
|
|
|
list_add_tail_rcu(&map->list, &acpi_ioremaps);
|
|
|
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
|
|
|
|
|
+ out:
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
return map->virt + (phys - map->phys);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
|
|
|
|
|
|
-static void acpi_kref_del_iomap(struct kref *ref)
|
|
|
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
|
|
|
{
|
|
|
- struct acpi_ioremap *map;
|
|
|
+ if (!--map->refcount)
|
|
|
+ list_del_rcu(&map->list);
|
|
|
+}
|
|
|
|
|
|
- map = container_of(ref, struct acpi_ioremap, ref);
|
|
|
- list_del_rcu(&map->list);
|
|
|
+static void acpi_os_map_cleanup(struct acpi_ioremap *map)
|
|
|
+{
|
|
|
+ if (!map->refcount) {
|
|
|
+ synchronize_rcu();
|
|
|
+ iounmap(map->virt);
|
|
|
+ kfree(map);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
|
|
|
{
|
|
|
struct acpi_ioremap *map;
|
|
|
- unsigned long flags;
|
|
|
- int del;
|
|
|
|
|
|
if (!acpi_gbl_permanent_mmap) {
|
|
|
__acpi_unmap_table(virt, size);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
|
|
|
+ mutex_lock(&acpi_ioremap_lock);
|
|
|
map = acpi_map_lookup_virt(virt, size);
|
|
|
if (!map) {
|
|
|
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
|
|
- printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
|
|
|
- dump_stack();
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
+ WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
|
|
|
return;
|
|
|
}
|
|
|
+ acpi_os_drop_map_ref(map);
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
|
|
|
- del = kref_put(&map->ref, acpi_kref_del_iomap);
|
|
|
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
|
|
-
|
|
|
- if (!del)
|
|
|
- return;
|
|
|
-
|
|
|
- synchronize_rcu();
|
|
|
- iounmap(map->virt);
|
|
|
- kfree(map);
|
|
|
+ acpi_os_map_cleanup(map);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
|
|
|
|
|
@@ -397,7 +411,7 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
|
|
|
__acpi_unmap_table(virt, size);
|
|
|
}
|
|
|
|
|
|
-int acpi_os_map_generic_address(struct acpi_generic_address *addr)
|
|
|
+static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
|
|
|
{
|
|
|
void __iomem *virt;
|
|
|
|
|
@@ -413,13 +427,10 @@ int acpi_os_map_generic_address(struct acpi_generic_address *addr)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
|
|
|
|
|
|
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
|
|
|
+static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
|
|
|
{
|
|
|
- void __iomem *virt;
|
|
|
- unsigned long flags;
|
|
|
- acpi_size size = addr->bit_width / 8;
|
|
|
+ struct acpi_ioremap *map;
|
|
|
|
|
|
if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
|
|
|
return;
|
|
@@ -427,13 +438,17 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
|
|
|
if (!addr->address || !addr->bit_width)
|
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&acpi_ioremap_lock, flags);
|
|
|
- virt = acpi_map_vaddr_lookup(addr->address, size);
|
|
|
- spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
|
|
+ mutex_lock(&acpi_ioremap_lock);
|
|
|
+ map = acpi_map_lookup(addr->address, addr->bit_width / 8);
|
|
|
+ if (!map) {
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ acpi_os_drop_map_ref(map);
|
|
|
+ mutex_unlock(&acpi_ioremap_lock);
|
|
|
|
|
|
- acpi_os_unmap_memory(virt, size);
|
|
|
+ acpi_os_map_cleanup(map);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
|
|
|
|
|
|
#ifdef ACPI_FUTURE_USAGE
|
|
|
acpi_status
|
|
@@ -516,11 +531,15 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
|
|
|
acpi_irq_stats_init();
|
|
|
|
|
|
/*
|
|
|
- * Ignore the GSI from the core, and use the value in our copy of the
|
|
|
- * FADT. It may not be the same if an interrupt source override exists
|
|
|
- * for the SCI.
|
|
|
+ * ACPI interrupts different from the SCI in our copy of the FADT are
|
|
|
+ * not supported.
|
|
|
*/
|
|
|
- gsi = acpi_gbl_FADT.sci_interrupt;
|
|
|
+ if (gsi != acpi_gbl_FADT.sci_interrupt)
|
|
|
+ return AE_BAD_PARAMETER;
|
|
|
+
|
|
|
+ if (acpi_irq_handler)
|
|
|
+ return AE_ALREADY_ACQUIRED;
|
|
|
+
|
|
|
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
|
|
|
printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
|
|
|
gsi);
|
|
@@ -531,20 +550,20 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
|
|
|
acpi_irq_context = context;
|
|
|
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
|
|
|
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
|
|
|
+ acpi_irq_handler = NULL;
|
|
|
return AE_NOT_ACQUIRED;
|
|
|
}
|
|
|
- acpi_irq_irq = irq;
|
|
|
|
|
|
return AE_OK;
|
|
|
}
|
|
|
|
|
|
acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
|
|
|
{
|
|
|
- if (irq) {
|
|
|
- free_irq(irq, acpi_irq);
|
|
|
- acpi_irq_handler = NULL;
|
|
|
- acpi_irq_irq = 0;
|
|
|
- }
|
|
|
+ if (irq != acpi_gbl_FADT.sci_interrupt)
|
|
|
+ return AE_BAD_PARAMETER;
|
|
|
+
|
|
|
+ free_irq(irq, acpi_irq);
|
|
|
+ acpi_irq_handler = NULL;
|
|
|
|
|
|
return AE_OK;
|
|
|
}
|
|
@@ -1603,7 +1622,7 @@ acpi_status __init acpi_os_initialize1(void)
|
|
|
acpi_status acpi_os_terminate(void)
|
|
|
{
|
|
|
if (acpi_irq_handler) {
|
|
|
- acpi_os_remove_interrupt_handler(acpi_irq_irq,
|
|
|
+ acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
|
|
|
acpi_irq_handler);
|
|
|
}
|
|
|
|