|
@@ -136,14 +136,78 @@ void iounmap(volatile void __iomem *addr)
|
|
|
|
|
|
#else
|
|
|
|
|
|
+static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
|
|
|
+ unsigned long end)
|
|
|
+{
|
|
|
+ pte_t *pte;
|
|
|
+
|
|
|
+ pte = pte_offset_kernel(pmd, addr);
|
|
|
+ do {
|
|
|
+ pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
|
|
|
+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
|
|
|
+ } while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
|
|
|
+ unsigned long end)
|
|
|
+{
|
|
|
+ pmd_t *pmd;
|
|
|
+ unsigned long next;
|
|
|
+
|
|
|
+ pmd = pmd_offset(pud, addr);
|
|
|
+ do {
|
|
|
+ next = pmd_addr_end(addr, end);
|
|
|
+ if (pmd_none_or_clear_bad(pmd))
|
|
|
+ continue;
|
|
|
+ unmap_im_area_pte(pmd, addr, next);
|
|
|
+ } while (pmd++, addr = next, addr != end);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
|
|
|
+ unsigned long end)
|
|
|
+{
|
|
|
+ pud_t *pud;
|
|
|
+ unsigned long next;
|
|
|
+
|
|
|
+ pud = pud_offset(pgd, addr);
|
|
|
+ do {
|
|
|
+ next = pud_addr_end(addr, end);
|
|
|
+ if (pud_none_or_clear_bad(pud))
|
|
|
+ continue;
|
|
|
+ unmap_im_area_pmd(pud, addr, next);
|
|
|
+ } while (pud++, addr = next, addr != end);
|
|
|
+}
|
|
|
+
|
|
|
+static void unmap_im_area(unsigned long addr, unsigned long end)
|
|
|
+{
|
|
|
+ struct mm_struct *mm = &ioremap_mm;
|
|
|
+ unsigned long next;
|
|
|
+ pgd_t *pgd;
|
|
|
+
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
+
|
|
|
+ pgd = pgd_offset_i(addr);
|
|
|
+ flush_cache_vunmap(addr, end);
|
|
|
+ do {
|
|
|
+ next = pgd_addr_end(addr, end);
|
|
|
+ if (pgd_none_or_clear_bad(pgd))
|
|
|
+ continue;
|
|
|
+ unmap_im_area_pud(pgd, addr, next);
|
|
|
+ } while (pgd++, addr = next, addr != end);
|
|
|
+ flush_tlb_kernel_range(start, end);
|
|
|
+
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* map_io_page currently only called by __ioremap
|
|
|
* map_io_page adds an entry to the ioremap page table
|
|
|
* and adds an entry to the HPT, possibly bolting it
|
|
|
*/
|
|
|
-static void map_io_page(unsigned long ea, unsigned long pa, int flags)
|
|
|
+static int map_io_page(unsigned long ea, unsigned long pa, int flags)
|
|
|
{
|
|
|
pgd_t *pgdp;
|
|
|
+ pud_t *pudp;
|
|
|
pmd_t *pmdp;
|
|
|
pte_t *ptep;
|
|
|
unsigned long vsid;
|
|
@@ -151,9 +215,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
|
|
|
if (mem_init_done) {
|
|
|
spin_lock(&ioremap_mm.page_table_lock);
|
|
|
pgdp = pgd_offset_i(ea);
|
|
|
- pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);
|
|
|
+ pudp = pud_alloc(&ioremap_mm, pgdp, ea);
|
|
|
+ if (!pudp)
|
|
|
+ return -ENOMEM;
|
|
|
+ pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
|
|
|
+ if (!pmdp)
|
|
|
+ return -ENOMEM;
|
|
|
ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
|
|
|
-
|
|
|
+ if (!ptep)
|
|
|
+ return -ENOMEM;
|
|
|
pa = abs_to_phys(pa);
|
|
|
set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
|
|
|
__pgprot(flags)));
|
|
@@ -181,6 +251,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
|
|
|
panic("map_io_page: could not insert mapping");
|
|
|
}
|
|
|
}
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -194,9 +265,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
|
|
|
flags |= pgprot_val(PAGE_KERNEL);
|
|
|
|
|
|
for (i = 0; i < size; i += PAGE_SIZE)
|
|
|
- map_io_page(ea+i, pa+i, flags);
|
|
|
+ if (map_io_page(ea+i, pa+i, flags))
|
|
|
+ goto failure;
|
|
|
|
|
|
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
|
|
|
+ failure:
|
|
|
+ if (mem_init_done)
|
|
|
+ unmap_im_area(ea, ea + size);
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -206,10 +282,11 @@ ioremap(unsigned long addr, unsigned long size)
|
|
|
return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
|
|
|
}
|
|
|
|
|
|
-void __iomem *
|
|
|
-__ioremap(unsigned long addr, unsigned long size, unsigned long flags)
|
|
|
+void __iomem * __ioremap(unsigned long addr, unsigned long size,
|
|
|
+ unsigned long flags)
|
|
|
{
|
|
|
unsigned long pa, ea;
|
|
|
+ void __iomem *ret;
|
|
|
|
|
|
/*
|
|
|
* Choose an address to map it to.
|
|
@@ -232,12 +309,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
|
|
|
if (area == NULL)
|
|
|
return NULL;
|
|
|
ea = (unsigned long)(area->addr);
|
|
|
+ ret = __ioremap_com(addr, pa, ea, size, flags);
|
|
|
+ if (!ret)
|
|
|
+ im_free(area->addr);
|
|
|
} else {
|
|
|
ea = ioremap_bot;
|
|
|
- ioremap_bot += size;
|
|
|
+ ret = __ioremap_com(addr, pa, ea, size, flags);
|
|
|
+ if (ret)
|
|
|
+ ioremap_bot += size;
|
|
|
}
|
|
|
-
|
|
|
- return __ioremap_com(addr, pa, ea, size, flags);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
|
|
@@ -246,6 +327,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
|
|
|
unsigned long size, unsigned long flags)
|
|
|
{
|
|
|
struct vm_struct *area;
|
|
|
+ void __iomem *ret;
|
|
|
|
|
|
/* For now, require page-aligned values for pa, ea, and size */
|
|
|
if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
|
|
@@ -276,7 +358,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) {
|
|
|
+ ret = __ioremap_com(pa, pa, ea, size, flags);
|
|
|
+ if (ret == NULL) {
|
|
|
+ printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ if (ret != (void *) ea) {
|
|
|
printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
|
|
|
return 1;
|
|
|
}
|
|
@@ -284,69 +371,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
|
|
|
- unsigned long size)
|
|
|
-{
|
|
|
- unsigned long base, end;
|
|
|
- pte_t *pte;
|
|
|
-
|
|
|
- if (pmd_none(*pmd))
|
|
|
- return;
|
|
|
- if (pmd_bad(*pmd)) {
|
|
|
- pmd_ERROR(*pmd);
|
|
|
- pmd_clear(pmd);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- pte = pte_offset_kernel(pmd, address);
|
|
|
- base = address & PMD_MASK;
|
|
|
- address &= ~PMD_MASK;
|
|
|
- end = address + size;
|
|
|
- if (end > PMD_SIZE)
|
|
|
- end = PMD_SIZE;
|
|
|
-
|
|
|
- do {
|
|
|
- pte_t page;
|
|
|
- page = ptep_get_and_clear(&ioremap_mm, base + address, pte);
|
|
|
- address += PAGE_SIZE;
|
|
|
- pte++;
|
|
|
- if (pte_none(page))
|
|
|
- continue;
|
|
|
- if (pte_present(page))
|
|
|
- continue;
|
|
|
- printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
|
|
|
- " table\n");
|
|
|
- } while (address < end);
|
|
|
-}
|
|
|
-
|
|
|
-static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
|
|
|
- unsigned long size)
|
|
|
-{
|
|
|
- unsigned long base, end;
|
|
|
- pmd_t *pmd;
|
|
|
-
|
|
|
- if (pgd_none(*dir))
|
|
|
- return;
|
|
|
- if (pgd_bad(*dir)) {
|
|
|
- pgd_ERROR(*dir);
|
|
|
- pgd_clear(dir);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- pmd = pmd_offset(dir, address);
|
|
|
- base = address & PGDIR_MASK;
|
|
|
- address &= ~PGDIR_MASK;
|
|
|
- end = address + size;
|
|
|
- if (end > PGDIR_SIZE)
|
|
|
- end = PGDIR_SIZE;
|
|
|
-
|
|
|
- do {
|
|
|
- unmap_im_area_pte(pmd, base + address, end - address);
|
|
|
- address = (address + PMD_SIZE) & PMD_MASK;
|
|
|
- pmd++;
|
|
|
- } while (address < end);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Unmap an IO region and remove it from imalloc'd list.
|
|
|
* Access to IO memory should be serialized by driver.
|
|
@@ -356,39 +380,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
|
|
|
*/
|
|
|
void iounmap(volatile void __iomem *token)
|
|
|
{
|
|
|
- unsigned long address, start, end, size;
|
|
|
- struct mm_struct *mm;
|
|
|
- pgd_t *dir;
|
|
|
+ unsigned long address, size;
|
|
|
void *addr;
|
|
|
|
|
|
- if (!mem_init_done) {
|
|
|
+ if (!mem_init_done)
|
|
|
return;
|
|
|
- }
|
|
|
|
|
|
addr = (void *) ((unsigned long __force) token & PAGE_MASK);
|
|
|
|
|
|
- if ((size = im_free(addr)) == 0) {
|
|
|
+ if ((size = im_free(addr)) == 0)
|
|
|
return;
|
|
|
- }
|
|
|
|
|
|
address = (unsigned long)addr;
|
|
|
- start = address;
|
|
|
- end = address + size;
|
|
|
-
|
|
|
- mm = &ioremap_mm;
|
|
|
- spin_lock(&mm->page_table_lock);
|
|
|
-
|
|
|
- dir = pgd_offset_i(address);
|
|
|
- flush_cache_vunmap(address, end);
|
|
|
- do {
|
|
|
- unmap_im_area_pmd(dir, address, end - address);
|
|
|
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
|
|
|
- dir++;
|
|
|
- } while (address && (address < end));
|
|
|
- flush_tlb_kernel_range(start, end);
|
|
|
-
|
|
|
- spin_unlock(&mm->page_table_lock);
|
|
|
- return;
|
|
|
+ unmap_im_area(address, address + size);
|
|
|
}
|
|
|
|
|
|
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
|