|
@@ -1290,13 +1290,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
|
return addr;
|
|
return addr;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_PREEMPT
|
|
|
|
-# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
|
|
|
|
-#else
|
|
|
|
-/* No preempt: go for improved straight-line efficiency */
|
|
|
|
-# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* unmap_vmas - unmap a range of memory covered by a list of vma's
|
|
* unmap_vmas - unmap a range of memory covered by a list of vma's
|
|
* @tlb: address of the caller's struct mmu_gather
|
|
* @tlb: address of the caller's struct mmu_gather
|
|
@@ -1310,10 +1303,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
|
*
|
|
*
|
|
* Unmap all pages in the vma list.
|
|
* Unmap all pages in the vma list.
|
|
*
|
|
*
|
|
- * We aim to not hold locks for too long (for scheduling latency reasons).
|
|
|
|
- * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
|
|
|
|
- * return the ending mmu_gather to the caller.
|
|
|
|
- *
|
|
|
|
* Only addresses between `start' and `end' will be unmapped.
|
|
* Only addresses between `start' and `end' will be unmapped.
|
|
*
|
|
*
|
|
* The VMA list must be sorted in ascending virtual address order.
|
|
* The VMA list must be sorted in ascending virtual address order.
|