|
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
|
|
|
* tear-down from @mm. The @fullmm argument is used when @mm is without
|
|
|
* users and we're going to destroy the full address space (exit/execve).
|
|
|
*/
|
|
|
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
|
|
|
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
|
|
{
|
|
|
tlb->mm = mm;
|
|
|
|
|
|
- tlb->fullmm = fullmm;
|
|
|
+ /* Is it from 0 to ~0? */
|
|
|
+ tlb->fullmm = !(start | (end+1));
|
|
|
tlb->need_flush_all = 0;
|
|
|
- tlb->start = -1UL;
|
|
|
- tlb->end = 0;
|
|
|
+ tlb->start = start;
|
|
|
+ tlb->end = end;
|
|
|
tlb->need_flush = 0;
|
|
|
tlb->local.next = NULL;
|
|
|
tlb->local.nr = 0;
|
|
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
|
|
|
{
|
|
|
struct mmu_gather_batch *batch, *next;
|
|
|
|
|
|
- tlb->start = start;
|
|
|
- tlb->end = end;
|
|
|
tlb_flush_mmu(tlb);
|
|
|
|
|
|
/* keep the page table cache within bounds */
|
|
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|
|
spinlock_t *ptl;
|
|
|
pte_t *start_pte;
|
|
|
pte_t *pte;
|
|
|
- unsigned long range_start = addr;
|
|
|
|
|
|
again:
|
|
|
init_rss_vec(rss);
|
|
@@ -1205,17 +1203,25 @@ again:
|
|
|
* and page-free while holding it.
|
|
|
*/
|
|
|
if (force_flush) {
|
|
|
+ unsigned long old_end;
|
|
|
+
|
|
|
force_flush = 0;
|
|
|
|
|
|
-#ifdef HAVE_GENERIC_MMU_GATHER
|
|
|
- tlb->start = range_start;
|
|
|
+ /*
|
|
|
+ * Flush the TLB just for the previous segment,
|
|
|
+ * then update the range to be the remaining
|
|
|
+ * TLB range.
|
|
|
+ */
|
|
|
+ old_end = tlb->end;
|
|
|
tlb->end = addr;
|
|
|
-#endif
|
|
|
+
|
|
|
tlb_flush_mmu(tlb);
|
|
|
- if (addr != end) {
|
|
|
- range_start = addr;
|
|
|
+
|
|
|
+ tlb->start = addr;
|
|
|
+ tlb->end = old_end;
|
|
|
+
|
|
|
+ if (addr != end)
|
|
|
goto again;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
return addr;
|
|
@@ -1400,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
unsigned long end = start + size;
|
|
|
|
|
|
lru_add_drain();
|
|
|
- tlb_gather_mmu(&tlb, mm, 0);
|
|
|
+ tlb_gather_mmu(&tlb, mm, start, end);
|
|
|
update_hiwater_rss(mm);
|
|
|
mmu_notifier_invalidate_range_start(mm, start, end);
|
|
|
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
|
|
@@ -1426,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
|
|
|
unsigned long end = address + size;
|
|
|
|
|
|
lru_add_drain();
|
|
|
- tlb_gather_mmu(&tlb, mm, 0);
|
|
|
+ tlb_gather_mmu(&tlb, mm, address, end);
|
|
|
update_hiwater_rss(mm);
|
|
|
mmu_notifier_invalidate_range_start(mm, address, end);
|
|
|
unmap_single_vma(&tlb, vma, address, end, details);
|