|
@@ -37,15 +37,12 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
|
|
|
|
|
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
|
- int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid)
|
|
|
+ int dirty_accountable, int prot_numa)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
pte_t *pte, oldpte;
|
|
|
spinlock_t *ptl;
|
|
|
unsigned long pages = 0;
|
|
|
- bool all_same_cpupid = true;
|
|
|
- int last_cpu = -1;
|
|
|
- int last_pid = -1;
|
|
|
|
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
|
|
arch_enter_lazy_mmu_mode();
|
|
@@ -64,19 +61,6 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
|
|
|
page = vm_normal_page(vma, addr, oldpte);
|
|
|
if (page) {
|
|
|
- int cpupid = page_cpupid_last(page);
|
|
|
- int this_cpu = cpupid_to_cpu(cpupid);
|
|
|
- int this_pid = cpupid_to_pid(cpupid);
|
|
|
-
|
|
|
- if (last_cpu == -1)
|
|
|
- last_cpu = this_cpu;
|
|
|
- if (last_pid == -1)
|
|
|
- last_pid = this_pid;
|
|
|
- if (last_cpu != this_cpu ||
|
|
|
- last_pid != this_pid) {
|
|
|
- all_same_cpupid = false;
|
|
|
- }
|
|
|
-
|
|
|
if (!pte_numa(oldpte)) {
|
|
|
ptent = pte_mknuma(ptent);
|
|
|
updated = true;
|
|
@@ -115,26 +99,9 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
arch_leave_lazy_mmu_mode();
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
|
|
|
|
- *ret_all_same_cpupid = all_same_cpupid;
|
|
|
return pages;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NUMA_BALANCING
|
|
|
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
|
|
|
- pmd_t *pmd)
|
|
|
-{
|
|
|
- spin_lock(&mm->page_table_lock);
|
|
|
- set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
|
|
|
- spin_unlock(&mm->page_table_lock);
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
|
|
|
- pmd_t *pmd)
|
|
|
-{
|
|
|
- BUG();
|
|
|
-}
|
|
|
-#endif /* CONFIG_NUMA_BALANCING */
|
|
|
-
|
|
|
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
|
pud_t *pud, unsigned long addr, unsigned long end,
|
|
|
pgprot_t newprot, int dirty_accountable, int prot_numa)
|
|
@@ -142,7 +109,6 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
|
pmd_t *pmd;
|
|
|
unsigned long next;
|
|
|
unsigned long pages = 0;
|
|
|
- bool all_same_cpupid;
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
do {
|
|
@@ -168,17 +134,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
continue;
|
|
|
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
|
|
|
- dirty_accountable, prot_numa, &all_same_cpupid);
|
|
|
+ dirty_accountable, prot_numa);
|
|
|
pages += this_pages;
|
|
|
-
|
|
|
- /*
|
|
|
- * If we are changing protections for NUMA hinting faults then
|
|
|
- * set pmd_numa if the examined pages were all on the same
|
|
|
- * node. This allows a regular PMD to be handled as one fault
|
|
|
- * and effectively batches the taking of the PTL
|
|
|
- */
|
|
|
- if (prot_numa && this_pages && all_same_cpupid)
|
|
|
- change_pmd_protnuma(vma->vm_mm, addr, pmd);
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
|
|
return pages;
|