|
@@ -1878,15 +1878,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
|
|
|
-{
|
|
|
- /*
|
|
|
- * Is this a new hole at the lowest possible address?
|
|
|
- */
|
|
|
- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
|
|
|
- mm->free_area_cache = addr;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* This mmap-allocator allocates new areas top-down from below the
|
|
|
* stack's low limit (the base):
|
|
@@ -1943,19 +1934,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
|
|
|
-{
|
|
|
- /*
|
|
|
- * Is this a new hole at the highest possible address?
|
|
|
- */
|
|
|
- if (addr > mm->free_area_cache)
|
|
|
- mm->free_area_cache = addr;
|
|
|
-
|
|
|
- /* dont allow allocations above current base */
|
|
|
- if (mm->free_area_cache > mm->mmap_base)
|
|
|
- mm->free_area_cache = mm->mmap_base;
|
|
|
-}
|
|
|
-
|
|
|
unsigned long
|
|
|
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
|
|
unsigned long pgoff, unsigned long flags)
|
|
@@ -2376,7 +2354,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
{
|
|
|
struct vm_area_struct **insertion_point;
|
|
|
struct vm_area_struct *tail_vma = NULL;
|
|
|
- unsigned long addr;
|
|
|
|
|
|
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
|
|
|
vma->vm_prev = NULL;
|
|
@@ -2393,11 +2370,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
} else
|
|
|
mm->highest_vm_end = prev ? prev->vm_end : 0;
|
|
|
tail_vma->vm_next = NULL;
|
|
|
- if (mm->unmap_area == arch_unmap_area)
|
|
|
- addr = prev ? prev->vm_end : mm->mmap_base;
|
|
|
- else
|
|
|
- addr = vma ? vma->vm_start : mm->mmap_base;
|
|
|
- mm->unmap_area(mm, addr);
|
|
|
mm->mmap_cache = NULL; /* Kill the cache. */
|
|
|
}
|
|
|
|