|
@@ -314,26 +314,24 @@ void __iounmap(volatile void __iomem *io_addr)
|
|
|
{
|
|
|
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
|
|
#ifndef CONFIG_SMP
|
|
|
- struct vm_struct **p, *tmp;
|
|
|
+ struct vm_struct *vm;
|
|
|
|
|
|
/*
|
|
|
* If this is a section based mapping we need to handle it
|
|
|
* specially as the VM subsystem does not know how to handle
|
|
|
- * such a beast. We need the lock here b/c we need to clear
|
|
|
- * all the mappings before the area can be reclaimed
|
|
|
- * by someone else.
|
|
|
+ * such a beast.
|
|
|
*/
|
|
|
- write_lock(&vmlist_lock);
|
|
|
- for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
|
|
|
- if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
|
|
|
- if (tmp->flags & VM_ARM_SECTION_MAPPING) {
|
|
|
- unmap_area_sections((unsigned long)tmp->addr,
|
|
|
- tmp->size);
|
|
|
+ read_lock(&vmlist_lock);
|
|
|
+ for (vm = vmlist; vm; vm = vm->next) {
|
|
|
+ if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) {
|
|
|
+ if (vm->flags & VM_ARM_SECTION_MAPPING) {
|
|
|
+ unmap_area_sections((unsigned long)vm->addr,
|
|
|
+ vm->size);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
- write_unlock(&vmlist_lock);
|
|
|
+ read_unlock(&vmlist_lock);
|
|
|
#endif
|
|
|
|
|
|
vunmap(addr);
|