|
@@ -31,6 +31,7 @@
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/mm.h>
|
|
|
+#include <linux/highmem.h>
|
|
|
#include <linux/pci.h>
|
|
|
#include <linux/interrupt.h>
|
|
|
#include <linux/kmod.h>
|
|
@@ -321,6 +322,37 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+#ifndef CONFIG_IA64
|
|
|
+#define should_use_kmap(pfn) page_is_ram(pfn)
|
|
|
+#else
|
|
|
+/* ioremap will take care of cache attributes */
|
|
|
+#define should_use_kmap(pfn) 0
|
|
|
+#endif
|
|
|
+
|
|
|
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
|
|
|
+{
|
|
|
+ unsigned long pfn;
|
|
|
+
|
|
|
+ pfn = pg_off >> PAGE_SHIFT;
|
|
|
+ if (should_use_kmap(pfn)) {
|
|
|
+ if (pg_sz > PAGE_SIZE)
|
|
|
+ return NULL;
|
|
|
+ return (void __iomem __force *)kmap(pfn_to_page(pfn));
|
|
|
+ } else
|
|
|
+ return acpi_os_ioremap(pg_off, pg_sz);
|
|
|
+}
|
|
|
+
|
|
|
+static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
|
|
|
+{
|
|
|
+ unsigned long pfn;
|
|
|
+
|
|
|
+ pfn = pg_off >> PAGE_SHIFT;
|
|
|
+ if (page_is_ram(pfn))
|
|
|
+ kunmap(pfn_to_page(pfn));
|
|
|
+ else
|
|
|
+ iounmap(vaddr);
|
|
|
+}
|
|
|
+
|
|
|
void __iomem *__init_refok
|
|
|
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
|
|
{
|
|
@@ -353,7 +385,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
|
|
|
|
|
pg_off = round_down(phys, PAGE_SIZE);
|
|
|
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
|
|
|
- virt = acpi_os_ioremap(pg_off, pg_sz);
|
|
|
+ virt = acpi_map(pg_off, pg_sz);
|
|
|
if (!virt) {
|
|
|
mutex_unlock(&acpi_ioremap_lock);
|
|
|
kfree(map);
|
|
@@ -384,7 +416,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
|
|
|
{
|
|
|
if (!map->refcount) {
|
|
|
synchronize_rcu();
|
|
|
- iounmap(map->virt);
|
|
|
+ acpi_unmap(map->phys, map->virt);
|
|
|
kfree(map);
|
|
|
}
|
|
|
}
|