|
@@ -695,10 +695,15 @@ enum clear_refs_types {
|
|
|
CLEAR_REFS_LAST,
|
|
|
};
|
|
|
|
|
|
+struct clear_refs_private {
|
|
|
+ struct vm_area_struct *vma;
|
|
|
+};
|
|
|
+
|
|
|
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
|
|
|
unsigned long end, struct mm_walk *walk)
|
|
|
{
|
|
|
- struct vm_area_struct *vma = walk->private;
|
|
|
+ struct clear_refs_private *cp = walk->private;
|
|
|
+ struct vm_area_struct *vma = cp->vma;
|
|
|
pte_t *pte, ptent;
|
|
|
spinlock_t *ptl;
|
|
|
struct page *page;
|
|
@@ -753,13 +758,16 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
|
|
return -ESRCH;
|
|
|
mm = get_task_mm(task);
|
|
|
if (mm) {
|
|
|
+ struct clear_refs_private cp = {
|
|
|
+ };
|
|
|
struct mm_walk clear_refs_walk = {
|
|
|
.pmd_entry = clear_refs_pte_range,
|
|
|
.mm = mm,
|
|
|
+ .private = &cp,
|
|
|
};
|
|
|
down_read(&mm->mmap_sem);
|
|
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
|
- clear_refs_walk.private = vma;
|
|
|
+ cp.vma = vma;
|
|
|
if (is_vm_hugetlb_page(vma))
|
|
|
continue;
|
|
|
/*
|