|
@@ -5256,6 +5256,41 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
+/*
|
|
|
+ * We don't consider swapping or file mapped pages because THP does not
|
|
|
+ * support them for now.
|
|
|
+ * Caller should make sure that pmd_trans_huge(pmd) is true.
|
|
|
+ */
|
|
|
+static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
|
|
|
+ unsigned long addr, pmd_t pmd, union mc_target *target)
|
|
|
+{
|
|
|
+ struct page *page = NULL;
|
|
|
+ struct page_cgroup *pc;
|
|
|
+ enum mc_target_type ret = MC_TARGET_NONE;
|
|
|
+
|
|
|
+ page = pmd_page(pmd);
|
|
|
+ VM_BUG_ON(!page || !PageHead(page));
|
|
|
+ if (!move_anon())
|
|
|
+ return ret;
|
|
|
+ pc = lookup_page_cgroup(page);
|
|
|
+ if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
|
|
|
+ ret = MC_TARGET_PAGE;
|
|
|
+ if (target) {
|
|
|
+ get_page(page);
|
|
|
+ target->page = page;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
|
|
|
+ unsigned long addr, pmd_t pmd, union mc_target *target)
|
|
|
+{
|
|
|
+ return MC_TARGET_NONE;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
|
|
|
unsigned long addr, unsigned long end,
|
|
|
struct mm_walk *walk)
|
|
@@ -5264,9 +5299,12 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
|
|
|
- split_huge_page_pmd(walk->mm, pmd);
|
|
|
- if (pmd_trans_unstable(pmd))
|
|
|
+ if (pmd_trans_huge_lock(pmd, vma) == 1) {
|
|
|
+ if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
|
|
|
+ mc.precharge += HPAGE_PMD_NR;
|
|
|
+ spin_unlock(&vma->vm_mm->page_table_lock);
|
|
|
return 0;
|
|
|
+ }
|
|
|
|
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
|
for (; addr != end; pte++, addr += PAGE_SIZE)
|
|
@@ -5425,18 +5463,49 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
|
|
|
struct vm_area_struct *vma = walk->private;
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
+ enum mc_target_type target_type;
|
|
|
+ union mc_target target;
|
|
|
+ struct page *page;
|
|
|
+ struct page_cgroup *pc;
|
|
|
|
|
|
- split_huge_page_pmd(walk->mm, pmd);
|
|
|
- if (pmd_trans_unstable(pmd))
|
|
|
+ /*
|
|
|
+ * We don't take compound_lock() here but no race with splitting thp
|
|
|
+ * happens because:
|
|
|
+ * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
|
|
|
+ * under splitting, which means there's no concurrent thp split,
|
|
|
+ * - if another thread runs into split_huge_page() just after we
|
|
|
+ * entered this if-block, the thread must wait for page table lock
|
|
|
+ * to be unlocked in __split_huge_page_splitting(), where the main
|
|
|
+ * part of thp split is not executed yet.
|
|
|
+ */
|
|
|
+ if (pmd_trans_huge_lock(pmd, vma) == 1) {
|
|
|
+ if (!mc.precharge) {
|
|
|
+ spin_unlock(&vma->vm_mm->page_table_lock);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
|
|
|
+ if (target_type == MC_TARGET_PAGE) {
|
|
|
+ page = target.page;
|
|
|
+ if (!isolate_lru_page(page)) {
|
|
|
+ pc = lookup_page_cgroup(page);
|
|
|
+ if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
|
|
|
+ pc, mc.from, mc.to,
|
|
|
+ false)) {
|
|
|
+ mc.precharge -= HPAGE_PMD_NR;
|
|
|
+ mc.moved_charge += HPAGE_PMD_NR;
|
|
|
+ }
|
|
|
+ putback_lru_page(page);
|
|
|
+ }
|
|
|
+ put_page(page);
|
|
|
+ }
|
|
|
+ spin_unlock(&vma->vm_mm->page_table_lock);
|
|
|
return 0;
|
|
|
+ }
|
|
|
+
|
|
|
retry:
|
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
|
for (; addr != end; addr += PAGE_SIZE) {
|
|
|
pte_t ptent = *(pte++);
|
|
|
- union mc_target target;
|
|
|
- int type;
|
|
|
- struct page *page;
|
|
|
- struct page_cgroup *pc;
|
|
|
swp_entry_t ent;
|
|
|
|
|
|
if (!mc.precharge)
|