|
@@ -16,25 +16,49 @@
|
|
|
|
|
|
void __iomem *ioremap(unsigned long paddr, unsigned long size)
|
|
|
{
|
|
|
- unsigned long vaddr;
|
|
|
- struct vm_struct *area;
|
|
|
- unsigned long off, end;
|
|
|
- const pgprot_t prot = PAGE_KERNEL_NO_CACHE;
|
|
|
+ unsigned long end;
|
|
|
|
|
|
/* Don't allow wraparound or zero size */
|
|
|
end = paddr + size - 1;
|
|
|
if (!size || (end < paddr))
|
|
|
return NULL;
|
|
|
|
|
|
- /* If the region is h/w uncached, nothing special needed */
|
|
|
+ /* If the region is h/w uncached, avoid MMU mappings */
|
|
|
if (paddr >= ARC_UNCACHED_ADDR_SPACE)
|
|
|
return (void __iomem *)paddr;
|
|
|
|
|
|
+ return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(ioremap);
|
|
|
+
|
|
|
+/*
|
|
|
+ * ioremap with access flags
|
|
|
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
|
|
|
+ * However unline vanilla ioremap which bypasses ARC MMU for addresses in
|
|
|
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
|
|
|
+ * might need finer access control (R/W/X)
|
|
|
+ */
|
|
|
+void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
|
|
+ unsigned long flags)
|
|
|
+{
|
|
|
+ void __iomem *vaddr;
|
|
|
+ struct vm_struct *area;
|
|
|
+ unsigned long off, end;
|
|
|
+ pgprot_t prot = __pgprot(flags);
|
|
|
+
|
|
|
+ /* Don't allow wraparound, zero size */
|
|
|
+ end = paddr + size - 1;
|
|
|
+ if ((!size) || (end < paddr))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
/* An early platform driver might end up here */
|
|
|
if (!slab_is_available())
|
|
|
return NULL;
|
|
|
|
|
|
- /* Mappings have to be page-aligned, page-sized */
|
|
|
+ /* force uncached */
|
|
|
+ prot = pgprot_noncached(prot);
|
|
|
+
|
|
|
+ /* Mappings have to be page-aligned */
|
|
|
off = paddr & ~PAGE_MASK;
|
|
|
paddr &= PAGE_MASK;
|
|
|
size = PAGE_ALIGN(end + 1) - paddr;
|
|
@@ -45,17 +69,17 @@ void __iomem *ioremap(unsigned long paddr, unsigned long size)
|
|
|
area = get_vm_area(size, VM_IOREMAP);
|
|
|
if (!area)
|
|
|
return NULL;
|
|
|
-
|
|
|
area->phys_addr = paddr;
|
|
|
- vaddr = (unsigned long)area->addr;
|
|
|
- if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
|
|
|
- vfree(area->addr);
|
|
|
+ vaddr = (void __iomem *)area->addr;
|
|
|
+ if (ioremap_page_range((unsigned long)vaddr,
|
|
|
+ (unsigned long)vaddr + size, paddr, prot)) {
|
|
|
+ vunmap((void __force *)vaddr);
|
|
|
return NULL;
|
|
|
}
|
|
|
-
|
|
|
return (void __iomem *)(off + (char __iomem *)vaddr);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(ioremap);
|
|
|
+EXPORT_SYMBOL(ioremap_prot);
|
|
|
+
|
|
|
|
|
|
void iounmap(const void __iomem *addr)
|
|
|
{
|