|
@@ -80,11 +80,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
pte_t *old_pte, *new_pte, pte;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
spinlock_t *old_ptl, *new_ptl;
|
|
- unsigned long old_start;
|
|
|
|
|
|
|
|
- old_start = old_addr;
|
|
|
|
- mmu_notifier_invalidate_range_start(vma->vm_mm,
|
|
|
|
- old_start, old_end);
|
|
|
|
if (vma->vm_file) {
|
|
if (vma->vm_file) {
|
|
/*
|
|
/*
|
|
* Subtle point from Rajesh Venkatasubramanian: before
|
|
* Subtle point from Rajesh Venkatasubramanian: before
|
|
@@ -111,7 +107,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
new_pte++, new_addr += PAGE_SIZE) {
|
|
new_pte++, new_addr += PAGE_SIZE) {
|
|
if (pte_none(*old_pte))
|
|
if (pte_none(*old_pte))
|
|
continue;
|
|
continue;
|
|
- pte = ptep_clear_flush(vma, old_addr, old_pte);
|
|
|
|
|
|
+ pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
set_pte_at(mm, new_addr, new_pte, pte);
|
|
set_pte_at(mm, new_addr, new_pte, pte);
|
|
}
|
|
}
|
|
@@ -123,7 +119,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
pte_unmap_unlock(old_pte - 1, old_ptl);
|
|
if (mapping)
|
|
if (mapping)
|
|
mutex_unlock(&mapping->i_mmap_mutex);
|
|
mutex_unlock(&mapping->i_mmap_mutex);
|
|
- mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
#define LATENCY_LIMIT (64 * PAGE_SIZE)
|
|
#define LATENCY_LIMIT (64 * PAGE_SIZE)
|
|
@@ -134,10 +129,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
{
|
|
{
|
|
unsigned long extent, next, old_end;
|
|
unsigned long extent, next, old_end;
|
|
pmd_t *old_pmd, *new_pmd;
|
|
pmd_t *old_pmd, *new_pmd;
|
|
|
|
+ bool need_flush = false;
|
|
|
|
|
|
old_end = old_addr + len;
|
|
old_end = old_addr + len;
|
|
flush_cache_range(vma, old_addr, old_end);
|
|
flush_cache_range(vma, old_addr, old_end);
|
|
|
|
|
|
|
|
+ mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end);
|
|
|
|
+
|
|
for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
|
|
for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
|
|
cond_resched();
|
|
cond_resched();
|
|
next = (old_addr + PMD_SIZE) & PMD_MASK;
|
|
next = (old_addr + PMD_SIZE) & PMD_MASK;
|
|
@@ -158,7 +156,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|
extent = LATENCY_LIMIT;
|
|
extent = LATENCY_LIMIT;
|
|
move_ptes(vma, old_pmd, old_addr, old_addr + extent,
|
|
move_ptes(vma, old_pmd, old_addr, old_addr + extent,
|
|
new_vma, new_pmd, new_addr);
|
|
new_vma, new_pmd, new_addr);
|
|
|
|
+ need_flush = true;
|
|
}
|
|
}
|
|
|
|
+ if (likely(need_flush))
|
|
|
|
+ flush_tlb_range(vma, old_end-len, old_addr);
|
|
|
|
+
|
|
|
|
+ mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end);
|
|
|
|
|
|
return len + old_addr - old_end; /* how much done */
|
|
return len + old_addr - old_end; /* how much done */
|
|
}
|
|
}
|