|
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
|
|
|
* which requires the new ioremap'd region to be referenced, the CPU will
|
|
|
* reference the _old_ region.
|
|
|
*
|
|
|
- * Note that get_vm_area() allocates a guard 4K page, so we need to mask
|
|
|
- * the size back to 1MB aligned or we will overflow in the loop below.
|
|
|
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
|
|
|
+ * mask the size back to 1MB aligned or we will overflow in the loop below.
|
|
|
*/
|
|
|
static void unmap_area_sections(unsigned long virt, unsigned long size)
|
|
|
{
|
|
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-
|
|
|
-/*
|
|
|
- * Remap an arbitrary physical address space into the kernel virtual
|
|
|
- * address space. Needed when the kernel wants to access high addresses
|
|
|
- * directly.
|
|
|
- *
|
|
|
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
|
|
- * have to convert them into an offset in a page-aligned mapping, but the
|
|
|
- * caller shouldn't need to know that small detail.
|
|
|
- *
|
|
|
- * 'flags' are the extra L_PTE_ flags that you want to specify for this
|
|
|
- * mapping. See <asm/pgtable.h> for more information.
|
|
|
- */
|
|
|
-void __iomem *
|
|
|
-__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
|
|
- unsigned int mtype)
|
|
|
+void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|
|
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
|
|
|
{
|
|
|
const struct mem_type *type;
|
|
|
int err;
|
|
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
|
|
*/
|
|
|
size = PAGE_ALIGN(offset + size);
|
|
|
|
|
|
- area = get_vm_area(size, VM_IOREMAP);
|
|
|
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
|
|
if (!area)
|
|
|
return NULL;
|
|
|
addr = (unsigned long)area->addr;
|
|
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
|
|
flush_cache_vmap(addr, addr + size);
|
|
|
return (void __iomem *) (offset + addr);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(__arm_ioremap_pfn);
|
|
|
|
|
|
-void __iomem *
|
|
|
-__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
|
|
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
|
|
|
+ unsigned int mtype, void *caller)
|
|
|
{
|
|
|
unsigned long last_addr;
|
|
|
unsigned long offset = phys_addr & ~PAGE_MASK;
|
|
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
|
|
if (!size || last_addr < phys_addr)
|
|
|
return NULL;
|
|
|
|
|
|
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
|
|
|
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
|
|
|
+ caller);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Remap an arbitrary physical address space into the kernel virtual
|
|
|
+ * address space. Needed when the kernel wants to access high addresses
|
|
|
+ * directly.
|
|
|
+ *
|
|
|
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
|
|
+ * have to convert them into an offset in a page-aligned mapping, but the
|
|
|
+ * caller shouldn't need to know that small detail.
|
|
|
+ */
|
|
|
+void __iomem *
|
|
|
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
|
|
+ unsigned int mtype)
|
|
|
+{
|
|
|
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
|
|
|
+ __builtin_return_address(0));
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__arm_ioremap_pfn);
|
|
|
+
|
|
|
+void __iomem *
|
|
|
+__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
|
|
|
+{
|
|
|
+ return __arm_ioremap_caller(phys_addr, size, mtype,
|
|
|
+ __builtin_return_address(0));
|
|
|
}
|
|
|
EXPORT_SYMBOL(__arm_ioremap);
|
|
|
|