|
@@ -1,5 +1,6 @@
|
|
#include <linux/mm.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/hugetlb.h>
|
|
|
|
+#include <linux/huge_mm.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/mount.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/highmem.h>
|
|
@@ -7,6 +8,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/mempolicy.h>
|
|
|
|
+#include <linux/rmap.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
#include <linux/swapops.h>
|
|
|
|
|
|
@@ -385,8 +387,25 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
pte_t *pte;
|
|
pte_t *pte;
|
|
spinlock_t *ptl;
|
|
spinlock_t *ptl;
|
|
|
|
|
|
- split_huge_page_pmd(walk->mm, pmd);
|
|
|
|
-
|
|
|
|
|
|
+ spin_lock(&walk->mm->page_table_lock);
|
|
|
|
+ if (pmd_trans_huge(*pmd)) {
|
|
|
|
+ if (pmd_trans_splitting(*pmd)) {
|
|
|
|
+ spin_unlock(&walk->mm->page_table_lock);
|
|
|
|
+ wait_split_huge_page(vma->anon_vma, pmd);
|
|
|
|
+ } else {
|
|
|
|
+ smaps_pte_entry(*(pte_t *)pmd, addr,
|
|
|
|
+ HPAGE_PMD_SIZE, walk);
|
|
|
|
+ spin_unlock(&walk->mm->page_table_lock);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ spin_unlock(&walk->mm->page_table_lock);
|
|
|
|
+ }
|
|
|
|
+ /*
|
|
|
|
+ * The mmap_sem held all the way back in m_start() is what
|
|
|
|
+ * keeps khugepaged out of here and from collapsing things
|
|
|
|
+ * in here.
|
|
|
|
+ */
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
for (; addr != end; pte++, addr += PAGE_SIZE)
|
|
for (; addr != end; pte++, addr += PAGE_SIZE)
|
|
smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
|
|
smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
|