|
@@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|
|
const struct mem_type *type;
|
|
|
int err;
|
|
|
unsigned long addr;
|
|
|
- struct vm_struct * area;
|
|
|
+ struct vm_struct *area;
|
|
|
+ phys_addr_t paddr = __pfn_to_phys(pfn);
|
|
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
|
/*
|
|
|
* High mappings must be supersection aligned
|
|
|
*/
|
|
|
- if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
|
|
|
+ if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
|
|
|
return NULL;
|
|
|
#endif
|
|
|
|
|
@@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|
|
/*
|
|
|
* Try to reuse one of the static mapping whenever possible.
|
|
|
*/
|
|
|
- read_lock(&vmlist_lock);
|
|
|
- for (area = vmlist; area; area = area->next) {
|
|
|
- if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
|
|
|
- break;
|
|
|
- if (!(area->flags & VM_ARM_STATIC_MAPPING))
|
|
|
- continue;
|
|
|
- if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
|
|
|
- continue;
|
|
|
- if (__phys_to_pfn(area->phys_addr) > pfn ||
|
|
|
- __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
|
|
|
- continue;
|
|
|
- /* we can drop the lock here as we know *area is static */
|
|
|
- read_unlock(&vmlist_lock);
|
|
|
- addr = (unsigned long)area->addr;
|
|
|
- addr += __pfn_to_phys(pfn) - area->phys_addr;
|
|
|
- return (void __iomem *) (offset + addr);
|
|
|
+ if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
|
|
|
+ struct static_vm *svm;
|
|
|
+
|
|
|
+ svm = find_static_vm_paddr(paddr, size, mtype);
|
|
|
+ if (svm) {
|
|
|
+ addr = (unsigned long)svm->vm.addr;
|
|
|
+ addr += paddr - svm->vm.phys_addr;
|
|
|
+ return (void __iomem *) (offset + addr);
|
|
|
+ }
|
|
|
}
|
|
|
- read_unlock(&vmlist_lock);
|
|
|
|
|
|
/*
|
|
|
* Don't allow RAM to be mapped - this causes problems with ARMv6+
|
|
@@ -312,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|
|
if (!area)
|
|
|
return NULL;
|
|
|
addr = (unsigned long)area->addr;
|
|
|
- area->phys_addr = __pfn_to_phys(pfn);
|
|
|
+ area->phys_addr = paddr;
|
|
|
|
|
|
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
|
|
if (DOMAIN_IO == 0 &&
|
|
|
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
|
|
|
cpu_is_xsc3()) && pfn >= 0x100000 &&
|
|
|
- !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
|
|
|
+ !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
|
|
|
area->flags |= VM_ARM_SECTION_MAPPING;
|
|
|
err = remap_area_supersections(addr, pfn, size, type);
|
|
|
- } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
|
|
|
+ } else if (!((paddr | size | addr) & ~PMD_MASK)) {
|
|
|
area->flags |= VM_ARM_SECTION_MAPPING;
|
|
|
err = remap_area_sections(addr, pfn, size, type);
|
|
|
} else
|
|
|
#endif
|
|
|
- err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
|
|
|
+ err = ioremap_page_range(addr, addr + size, paddr,
|
|
|
__pgprot(type->prot_pte));
|
|
|
|
|
|
if (err) {
|
|
@@ -410,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
|
|
|
void __iounmap(volatile void __iomem *io_addr)
|
|
|
{
|
|
|
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
|
|
- struct vm_struct *vm;
|
|
|
+ struct static_vm *svm;
|
|
|
+
|
|
|
+ /* If this is a static mapping, we must leave it alone */
|
|
|
+ svm = find_static_vm_vaddr(addr);
|
|
|
+ if (svm)
|
|
|
+ return;
|
|
|
|
|
|
- read_lock(&vmlist_lock);
|
|
|
- for (vm = vmlist; vm; vm = vm->next) {
|
|
|
- if (vm->addr > addr)
|
|
|
- break;
|
|
|
- if (!(vm->flags & VM_IOREMAP))
|
|
|
- continue;
|
|
|
- /* If this is a static mapping we must leave it alone */
|
|
|
- if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
|
|
|
- (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
|
|
|
- read_unlock(&vmlist_lock);
|
|
|
- return;
|
|
|
- }
|
|
|
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
|
|
+ {
|
|
|
+ struct vm_struct *vm;
|
|
|
+
|
|
|
+ vm = find_vm_area(addr);
|
|
|
+
|
|
|
/*
|
|
|
* If this is a section based mapping we need to handle it
|
|
|
* specially as the VM subsystem does not know how to handle
|
|
|
* such a beast.
|
|
|
*/
|
|
|
- if ((vm->addr == addr) &&
|
|
|
- (vm->flags & VM_ARM_SECTION_MAPPING)) {
|
|
|
+ if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
|
|
|
unmap_area_sections((unsigned long)vm->addr, vm->size);
|
|
|
- break;
|
|
|
- }
|
|
|
-#endif
|
|
|
}
|
|
|
- read_unlock(&vmlist_lock);
|
|
|
+#endif
|
|
|
|
|
|
vunmap(addr);
|
|
|
}
|