|
@@ -12,17 +12,20 @@
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/mman.h>
|
|
|
#include <linux/hugetlb.h>
|
|
|
+#include <linux/writeback.h>
|
|
|
+#include <linux/file.h>
|
|
|
#include <linux/syscalls.h>
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
|
-static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
+static unsigned long msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
unsigned long addr, unsigned long end)
|
|
|
{
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
int progress = 0;
|
|
|
+ unsigned long ret = 0;
|
|
|
|
|
|
again:
|
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
@@ -43,58 +46,64 @@ again:
|
|
|
if (!page)
|
|
|
continue;
|
|
|
if (ptep_clear_flush_dirty(vma, addr, pte) ||
|
|
|
- page_test_and_clear_dirty(page))
|
|
|
- set_page_dirty(page);
|
|
|
+ page_test_and_clear_dirty(page))
|
|
|
+ ret += set_page_dirty(page);
|
|
|
progress += 3;
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
|
cond_resched();
|
|
|
if (addr != end)
|
|
|
goto again;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
|
|
- unsigned long addr, unsigned long end)
|
|
|
+static inline unsigned long msync_pmd_range(struct vm_area_struct *vma,
|
|
|
+ pud_t *pud, unsigned long addr, unsigned long end)
|
|
|
{
|
|
|
pmd_t *pmd;
|
|
|
unsigned long next;
|
|
|
+ unsigned long ret = 0;
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
do {
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
continue;
|
|
|
- msync_pte_range(vma, pmd, addr, next);
|
|
|
+ ret += msync_pte_range(vma, pmd, addr, next);
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
|
|
- unsigned long addr, unsigned long end)
|
|
|
+static inline unsigned long msync_pud_range(struct vm_area_struct *vma,
|
|
|
+ pgd_t *pgd, unsigned long addr, unsigned long end)
|
|
|
{
|
|
|
pud_t *pud;
|
|
|
unsigned long next;
|
|
|
+ unsigned long ret = 0;
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
do {
|
|
|
next = pud_addr_end(addr, end);
|
|
|
if (pud_none_or_clear_bad(pud))
|
|
|
continue;
|
|
|
- msync_pmd_range(vma, pud, addr, next);
|
|
|
+ ret += msync_pmd_range(vma, pud, addr, next);
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
-static void msync_page_range(struct vm_area_struct *vma,
|
|
|
+static unsigned long msync_page_range(struct vm_area_struct *vma,
|
|
|
unsigned long addr, unsigned long end)
|
|
|
{
|
|
|
pgd_t *pgd;
|
|
|
unsigned long next;
|
|
|
+ unsigned long ret = 0;
|
|
|
|
|
|
/* For hugepages we can't go walking the page table normally,
|
|
|
* but that's ok, hugetlbfs is memory based, so we don't need
|
|
|
* to do anything more on an msync().
|
|
|
*/
|
|
|
if (vma->vm_flags & VM_HUGETLB)
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
BUG_ON(addr >= end);
|
|
|
pgd = pgd_offset(vma->vm_mm, addr);
|
|
@@ -103,8 +112,9 @@ static void msync_page_range(struct vm_area_struct *vma,
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
|
continue;
|
|
|
- msync_pud_range(vma, pgd, addr, next);
|
|
|
+ ret += msync_pud_range(vma, pgd, addr, next);
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -118,8 +128,9 @@ static void msync_page_range(struct vm_area_struct *vma,
|
|
|
* So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
|
|
|
* applications.
|
|
|
*/
|
|
|
-static int msync_interval(struct vm_area_struct *vma,
|
|
|
- unsigned long addr, unsigned long end, int flags)
|
|
|
+static int msync_interval(struct vm_area_struct *vma, unsigned long addr,
|
|
|
+ unsigned long end, int flags,
|
|
|
+ unsigned long *nr_pages_dirtied)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
struct file *file = vma->vm_file;
|
|
@@ -128,7 +139,7 @@ static int msync_interval(struct vm_area_struct *vma,
|
|
|
return -EBUSY;
|
|
|
|
|
|
if (file && (vma->vm_flags & VM_SHARED)) {
|
|
|
- msync_page_range(vma, addr, end);
|
|
|
+ *nr_pages_dirtied = msync_page_range(vma, addr, end);
|
|
|
|
|
|
if (flags & MS_SYNC) {
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
@@ -157,11 +168,8 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
|
|
|
unsigned long end;
|
|
|
struct vm_area_struct *vma;
|
|
|
int unmapped_error, error = -EINVAL;
|
|
|
+ int done = 0;
|
|
|
|
|
|
- if (flags & MS_SYNC)
|
|
|
- current->flags |= PF_SYNCWRITE;
|
|
|
-
|
|
|
- down_read(¤t->mm->mmap_sem);
|
|
|
if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
|
|
|
goto out;
|
|
|
if (start & ~PAGE_MASK)
|
|
@@ -180,13 +188,19 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
|
|
|
* If the interval [start,end) covers some unmapped address ranges,
|
|
|
* just ignore them, but return -ENOMEM at the end.
|
|
|
*/
|
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
|
+ if (flags & MS_SYNC)
|
|
|
+ current->flags |= PF_SYNCWRITE;
|
|
|
vma = find_vma(current->mm, start);
|
|
|
unmapped_error = 0;
|
|
|
- for (;;) {
|
|
|
+ do {
|
|
|
+ unsigned long nr_pages_dirtied = 0;
|
|
|
+ struct file *file;
|
|
|
+
|
|
|
/* Still start < end. */
|
|
|
error = -ENOMEM;
|
|
|
if (!vma)
|
|
|
- goto out;
|
|
|
+ goto out_unlock;
|
|
|
/* Here start < vma->vm_end. */
|
|
|
if (start < vma->vm_start) {
|
|
|
unmapped_error = -ENOMEM;
|
|
@@ -195,22 +209,37 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
|
|
|
/* Here vma->vm_start <= start < vma->vm_end. */
|
|
|
if (end <= vma->vm_end) {
|
|
|
if (start < end) {
|
|
|
- error = msync_interval(vma, start, end, flags);
|
|
|
+ error = msync_interval(vma, start, end, flags,
|
|
|
+ &nr_pages_dirtied);
|
|
|
if (error)
|
|
|
- goto out;
|
|
|
+ goto out_unlock;
|
|
|
}
|
|
|
error = unmapped_error;
|
|
|
- goto out;
|
|
|
+ done = 1;
|
|
|
+ } else {
|
|
|
+ /* Here vma->vm_start <= start < vma->vm_end < end. */
|
|
|
+ error = msync_interval(vma, start, vma->vm_end, flags,
|
|
|
+ &nr_pages_dirtied);
|
|
|
+ if (error)
|
|
|
+ goto out_unlock;
|
|
|
}
|
|
|
- /* Here vma->vm_start <= start < vma->vm_end < end. */
|
|
|
- error = msync_interval(vma, start, vma->vm_end, flags);
|
|
|
- if (error)
|
|
|
- goto out;
|
|
|
+ file = vma->vm_file;
|
|
|
start = vma->vm_end;
|
|
|
- vma = vma->vm_next;
|
|
|
- }
|
|
|
-out:
|
|
|
- up_read(¤t->mm->mmap_sem);
|
|
|
+ if ((flags & MS_ASYNC) && file && nr_pages_dirtied) {
|
|
|
+ get_file(file);
|
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
|
+ balance_dirty_pages_ratelimited_nr(file->f_mapping,
|
|
|
+ nr_pages_dirtied);
|
|
|
+ fput(file);
|
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
|
+ vma = find_vma(current->mm, start);
|
|
|
+ } else {
|
|
|
+ vma = vma->vm_next;
|
|
|
+ }
|
|
|
+ } while (!done);
|
|
|
+out_unlock:
|
|
|
current->flags &= ~PF_SYNCWRITE;
|
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
|
+out:
|
|
|
return error;
|
|
|
}
|