|
@@ -912,12 +912,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|
long *zap_work, struct zap_details *details)
|
|
long *zap_work, struct zap_details *details)
|
|
{
|
|
{
|
|
struct mm_struct *mm = tlb->mm;
|
|
struct mm_struct *mm = tlb->mm;
|
|
|
|
+ int force_flush = 0;
|
|
pte_t *pte;
|
|
pte_t *pte;
|
|
spinlock_t *ptl;
|
|
spinlock_t *ptl;
|
|
int rss[NR_MM_COUNTERS];
|
|
int rss[NR_MM_COUNTERS];
|
|
|
|
|
|
init_rss_vec(rss);
|
|
init_rss_vec(rss);
|
|
-
|
|
|
|
|
|
+again:
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
arch_enter_lazy_mmu_mode();
|
|
arch_enter_lazy_mmu_mode();
|
|
do {
|
|
do {
|
|
@@ -974,7 +975,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|
page_remove_rmap(page);
|
|
page_remove_rmap(page);
|
|
if (unlikely(page_mapcount(page) < 0))
|
|
if (unlikely(page_mapcount(page) < 0))
|
|
print_bad_pte(vma, addr, ptent, page);
|
|
print_bad_pte(vma, addr, ptent, page);
|
|
- tlb_remove_page(tlb, page);
|
|
|
|
|
|
+ force_flush = !__tlb_remove_page(tlb, page);
|
|
|
|
+ if (force_flush)
|
|
|
|
+ break;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -1001,6 +1004,18 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|
arch_leave_lazy_mmu_mode();
|
|
arch_leave_lazy_mmu_mode();
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * mmu_gather ran out of room to batch pages, we break out of
|
|
|
|
+ * the PTE lock to avoid doing the potential expensive TLB invalidate
|
|
|
|
+ * and page-free while holding it.
|
|
|
|
+ */
|
|
|
|
+ if (force_flush) {
|
|
|
|
+ force_flush = 0;
|
|
|
|
+ tlb_flush_mmu(tlb);
|
|
|
|
+ if (addr != end)
|
|
|
|
+ goto again;
|
|
|
|
+ }
|
|
|
|
+
|
|
return addr;
|
|
return addr;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1121,17 +1136,14 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
|
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
|
|
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
|
|
* drops the lock and schedules.
|
|
* drops the lock and schedules.
|
|
*/
|
|
*/
|
|
-unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
|
|
|
|
|
+unsigned long unmap_vmas(struct mmu_gather *tlb,
|
|
struct vm_area_struct *vma, unsigned long start_addr,
|
|
struct vm_area_struct *vma, unsigned long start_addr,
|
|
unsigned long end_addr, unsigned long *nr_accounted,
|
|
unsigned long end_addr, unsigned long *nr_accounted,
|
|
struct zap_details *details)
|
|
struct zap_details *details)
|
|
{
|
|
{
|
|
long zap_work = ZAP_BLOCK_SIZE;
|
|
long zap_work = ZAP_BLOCK_SIZE;
|
|
- unsigned long tlb_start = 0; /* For tlb_finish_mmu */
|
|
|
|
- int tlb_start_valid = 0;
|
|
|
|
unsigned long start = start_addr;
|
|
unsigned long start = start_addr;
|
|
spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
|
|
spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
|
|
- int fullmm = (*tlbp)->fullmm;
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
|
|
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
|
|
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
|
|
@@ -1152,11 +1164,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
|
untrack_pfn_vma(vma, 0, 0);
|
|
untrack_pfn_vma(vma, 0, 0);
|
|
|
|
|
|
while (start != end) {
|
|
while (start != end) {
|
|
- if (!tlb_start_valid) {
|
|
|
|
- tlb_start = start;
|
|
|
|
- tlb_start_valid = 1;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
if (unlikely(is_vm_hugetlb_page(vma))) {
|
|
if (unlikely(is_vm_hugetlb_page(vma))) {
|
|
/*
|
|
/*
|
|
* It is undesirable to test vma->vm_file as it
|
|
* It is undesirable to test vma->vm_file as it
|
|
@@ -1177,7 +1184,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
|
|
|
|
|
start = end;
|
|
start = end;
|
|
} else
|
|
} else
|
|
- start = unmap_page_range(*tlbp, vma,
|
|
|
|
|
|
+ start = unmap_page_range(tlb, vma,
|
|
start, end, &zap_work, details);
|
|
start, end, &zap_work, details);
|
|
|
|
|
|
if (zap_work > 0) {
|
|
if (zap_work > 0) {
|
|
@@ -1185,19 +1192,13 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- tlb_finish_mmu(*tlbp, tlb_start, start);
|
|
|
|
-
|
|
|
|
if (need_resched() ||
|
|
if (need_resched() ||
|
|
(i_mmap_lock && spin_needbreak(i_mmap_lock))) {
|
|
(i_mmap_lock && spin_needbreak(i_mmap_lock))) {
|
|
- if (i_mmap_lock) {
|
|
|
|
- *tlbp = NULL;
|
|
|
|
|
|
+ if (i_mmap_lock)
|
|
goto out;
|
|
goto out;
|
|
- }
|
|
|
|
cond_resched();
|
|
cond_resched();
|
|
}
|
|
}
|
|
|
|
|
|
- *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
|
|
|
|
- tlb_start_valid = 0;
|
|
|
|
zap_work = ZAP_BLOCK_SIZE;
|
|
zap_work = ZAP_BLOCK_SIZE;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1217,16 +1218,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
|
unsigned long size, struct zap_details *details)
|
|
unsigned long size, struct zap_details *details)
|
|
{
|
|
{
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
- struct mmu_gather *tlb;
|
|
|
|
|
|
+ struct mmu_gather tlb;
|
|
unsigned long end = address + size;
|
|
unsigned long end = address + size;
|
|
unsigned long nr_accounted = 0;
|
|
unsigned long nr_accounted = 0;
|
|
|
|
|
|
lru_add_drain();
|
|
lru_add_drain();
|
|
- tlb = tlb_gather_mmu(mm, 0);
|
|
|
|
|
|
+ tlb_gather_mmu(&tlb, mm, 0);
|
|
update_hiwater_rss(mm);
|
|
update_hiwater_rss(mm);
|
|
end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
|
|
end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
|
|
- if (tlb)
|
|
|
|
- tlb_finish_mmu(tlb, address, end);
|
|
|
|
|
|
+ tlb_finish_mmu(&tlb, address, end);
|
|
return end;
|
|
return end;
|
|
}
|
|
}
|
|
|
|
|