|
@@ -36,12 +36,6 @@
|
|
|
#include <asm/mach/map.h>
|
|
|
#include "mm.h"
|
|
|
|
|
|
-/*
|
|
|
- * Used by ioremap() and iounmap() code to mark (super)section-mapped
|
|
|
- * I/O regions in vm_struct->flags field.
|
|
|
- */
|
|
|
-#define VM_ARM_SECTION_MAPPING 0x80000000
|
|
|
-
|
|
|
int ioremap_page(unsigned long virt, unsigned long phys,
|
|
|
const struct mem_type *mtype)
|
|
|
{
|
|
@@ -201,12 +195,6 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|
|
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
|
|
|
return NULL;
|
|
|
|
|
|
- /*
|
|
|
- * Don't allow RAM to be mapped - this causes problems with ARMv6+
|
|
|
- */
|
|
|
- if (WARN_ON(pfn_valid(pfn)))
|
|
|
- return NULL;
|
|
|
-
|
|
|
type = get_mem_type(mtype);
|
|
|
if (!type)
|
|
|
return NULL;
|
|
@@ -216,6 +204,34 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|
|
*/
|
|
|
size = PAGE_ALIGN(offset + size);
|
|
|
|
|
|
+ /*
|
|
|
+ * Try to reuse one of the static mapping whenever possible.
|
|
|
+ */
|
|
|
+ read_lock(&vmlist_lock);
|
|
|
+ for (area = vmlist; area; area = area->next) {
|
|
|
+ if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
|
|
|
+ break;
|
|
|
+ if (!(area->flags & VM_ARM_STATIC_MAPPING))
|
|
|
+ continue;
|
|
|
+ if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
|
|
|
+ continue;
|
|
|
+ if (__phys_to_pfn(area->phys_addr) > pfn ||
|
|
|
+ __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
|
|
|
+ continue;
|
|
|
+ /* we can drop the lock here as we know *area is static */
|
|
|
+ read_unlock(&vmlist_lock);
|
|
|
+ addr = (unsigned long)area->addr;
|
|
|
+ addr += __pfn_to_phys(pfn) - area->phys_addr;
|
|
|
+ return (void __iomem *) (offset + addr);
|
|
|
+ }
|
|
|
+ read_unlock(&vmlist_lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't allow RAM to be mapped - this causes problems with ARMv6+
|
|
|
+ */
|
|
|
+ if (WARN_ON(pfn_valid(pfn)))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
|
|
if (!area)
|
|
|
return NULL;
|
|
@@ -313,26 +329,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
|
|
|
void __iounmap(volatile void __iomem *io_addr)
|
|
|
{
|
|
|
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
|
|
-#ifndef CONFIG_SMP
|
|
|
struct vm_struct *vm;
|
|
|
|
|
|
- /*
|
|
|
- * If this is a section based mapping we need to handle it
|
|
|
- * specially as the VM subsystem does not know how to handle
|
|
|
- * such a beast.
|
|
|
- */
|
|
|
read_lock(&vmlist_lock);
|
|
|
for (vm = vmlist; vm; vm = vm->next) {
|
|
|
- if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) {
|
|
|
- if (vm->flags & VM_ARM_SECTION_MAPPING) {
|
|
|
- unmap_area_sections((unsigned long)vm->addr,
|
|
|
- vm->size);
|
|
|
- }
|
|
|
+ if (vm->addr > addr)
|
|
|
break;
|
|
|
+ if (!(vm->flags & VM_IOREMAP))
|
|
|
+ continue;
|
|
|
+ /* If this is a static mapping we must leave it alone */
|
|
|
+ if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
|
|
|
+ (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
|
|
|
+ read_unlock(&vmlist_lock);
|
|
|
+ return;
|
|
|
}
|
|
|
+#ifndef CONFIG_SMP
|
|
|
+ /*
|
|
|
+ * If this is a section based mapping we need to handle it
|
|
|
+ * specially as the VM subsystem does not know how to handle
|
|
|
+ * such a beast.
|
|
|
+ */
|
|
|
+ if ((vm->addr == addr) &&
|
|
|
+ (vm->flags & VM_ARM_SECTION_MAPPING)) {
|
|
|
+ unmap_area_sections((unsigned long)vm->addr, vm->size);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+#endif
|
|
|
}
|
|
|
read_unlock(&vmlist_lock);
|
|
|
-#endif
|
|
|
|
|
|
vunmap(addr);
|
|
|
}
|