|
@@ -6,6 +6,8 @@
|
|
|
pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
|
|
|
unsigned long addr, unsigned *shift);
|
|
|
|
|
|
+void flush_dcache_icache_hugepage(struct page *page);
|
|
|
+
|
|
|
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
|
|
|
unsigned long len);
|
|
|
|
|
@@ -13,12 +15,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
|
|
unsigned long end, unsigned long floor,
|
|
|
unsigned long ceiling);
|
|
|
|
|
|
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
- pte_t *ptep, pte_t pte);
|
|
|
-
|
|
|
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
- pte_t *ptep);
|
|
|
-
|
|
|
/*
|
|
|
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
|
|
|
* to override the version in mm/hugetlb.c
|
|
@@ -44,9 +40,26 @@ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
+ pte_t *ptep, pte_t pte)
|
|
|
+{
|
|
|
+ set_pte_at(mm, addr, ptep, pte);
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
|
+ unsigned long addr, pte_t *ptep)
|
|
|
+{
|
|
|
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
|
|
|
+ return __pte(old);
|
|
|
+}
|
|
|
+
|
|
|
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
{
|
|
|
+ pte_t pte;
|
|
|
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
|
|
+ flush_tlb_page(vma, addr);
|
|
|
}
|
|
|
|
|
|
static inline int huge_pte_none(pte_t pte)
|