Browse Source

hugetlbfs: add missing TLB flush to hugetlb_cow()

A cow break on a hugetlbfs page with page_count > 1 will set a new pte with
set_huge_pte_at(), w/o any tlb flush operation.  The old pte will remain in
the tlb and subsequent write access to the page will result in a page fault
loop, for as long as it may take until the tlb is flushed from somewhere else.
 This patch introduces an architecture-specific huge_ptep_clear_flush()
function, which is called before the the set_huge_pte_at() in hugetlb_cow().

ATTENTION: This is just a nop on all architectures for now, the s390
implementation will come with our large page patch later.  Other architectures
should define their own huge_ptep_clear_flush() if needed.

Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Gerald Schaefer 17 years ago
parent
commit
8fe627ec5b

+ 5 - 0
include/asm-ia64/hugetlb.h

@@ -34,4 +34,9 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 	return ptep_get_and_clear(mm, addr, ptep);
 	return ptep_get_and_clear(mm, addr, ptep);
 }
 }
 
 
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+}
+
 #endif /* _ASM_IA64_HUGETLB_H */
 #endif /* _ASM_IA64_HUGETLB_H */

+ 5 - 0
include/asm-powerpc/hugetlb.h

@@ -34,4 +34,9 @@ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 {
 {
 }
 }
 
 
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+}
+
 #endif /* _ASM_POWERPC_HUGETLB_H */
 #endif /* _ASM_POWERPC_HUGETLB_H */

+ 5 - 0
include/asm-sh/hugetlb.h

@@ -46,4 +46,9 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 	return ptep_get_and_clear(mm, addr, ptep);
 	return ptep_get_and_clear(mm, addr, ptep);
 }
 }
 
 
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+}
+
 #endif /* _ASM_SH_HUGETLB_H */
 #endif /* _ASM_SH_HUGETLB_H */

+ 5 - 0
include/asm-sparc64/hugetlb.h

@@ -39,4 +39,9 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
 	free_pgd_range(tlb, addr, end, floor, ceiling);
 	free_pgd_range(tlb, addr, end, floor, ceiling);
 }
 }
 
 
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+}
+
 #endif /* _ASM_SPARC64_HUGETLB_H */
 #endif /* _ASM_SPARC64_HUGETLB_H */

+ 5 - 0
include/asm-x86/hugetlb.h

@@ -46,4 +46,9 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 	return ptep_get_and_clear(mm, addr, ptep);
 	return ptep_get_and_clear(mm, addr, ptep);
 }
 }
 
 
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+					 unsigned long addr, pte_t *ptep)
+{
+}
+
 #endif /* _ASM_X86_HUGETLB_H */
 #endif /* _ASM_X86_HUGETLB_H */

+ 1 - 0
mm/hugetlb.c

@@ -892,6 +892,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
 	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
 	if (likely(pte_same(*ptep, pte))) {
 	if (likely(pte_same(*ptep, pte))) {
 		/* Break COW */
 		/* Break COW */
+		huge_ptep_clear_flush(vma, address, ptep);
 		set_huge_pte_at(mm, address, ptep,
 		set_huge_pte_at(mm, address, ptep,
 				make_huge_pte(vma, new_page, 1));
 				make_huge_pte(vma, new_page, 1));
 		/* Make the old page be freed below */
 		/* Make the old page be freed below */