|
@@ -62,8 +62,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
|
|
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|
|
unsigned long size, unsigned long prot_val, void *caller)
|
|
|
{
|
|
|
- unsigned long pfn, offset, vaddr;
|
|
|
- resource_size_t last_addr;
|
|
|
+ unsigned long offset, vaddr;
|
|
|
+ resource_size_t pfn, last_pfn, last_addr;
|
|
|
const resource_size_t unaligned_phys_addr = phys_addr;
|
|
|
const unsigned long unaligned_size = size;
|
|
|
struct vm_struct *area;
|
|
@@ -100,10 +100,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|
|
/*
|
|
|
* Don't allow anybody to remap normal RAM that we're using..
|
|
|
*/
|
|
|
- for (pfn = phys_addr >> PAGE_SHIFT;
|
|
|
- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
|
|
|
- pfn++) {
|
|
|
-
|
|
|
+ last_pfn = last_addr >> PAGE_SHIFT;
|
|
|
+ for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
|
|
|
int is_ram = page_is_ram(pfn);
|
|
|
|
|
|
if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
|
|
@@ -115,7 +113,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|
|
* Mappings have to be page-aligned
|
|
|
*/
|
|
|
offset = phys_addr & ~PAGE_MASK;
|
|
|
- phys_addr &= PAGE_MASK;
|
|
|
+ phys_addr &= PHYSICAL_PAGE_MASK;
|
|
|
size = PAGE_ALIGN(last_addr+1) - phys_addr;
|
|
|
|
|
|
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
|
|
@@ -613,7 +611,7 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
|
|
|
return;
|
|
|
}
|
|
|
offset = virt_addr & ~PAGE_MASK;
|
|
|
- nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
|
|
|
+ nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
|
|
|
|
|
|
idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
|
|
|
while (nrpages > 0) {
|