|
@@ -1378,13 +1378,45 @@ static inline void pgtable_page_dtor(struct page *page)
|
|
|
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
|
|
|
NULL: pte_offset_kernel(pmd, address))
|
|
|
|
|
|
+#if USE_SPLIT_PMD_PTLOCKS
|
|
|
+
|
|
|
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
|
|
|
+{
|
|
|
+ return &virt_to_page(pmd)->ptl;
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool pgtable_pmd_page_ctor(struct page *page)
|
|
|
+{
|
|
|
+ spin_lock_init(&page->ptl);
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
+ page->pmd_huge_pte = NULL;
|
|
|
+#endif
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void pgtable_pmd_page_dtor(struct page *page)
|
|
|
+{
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
+ VM_BUG_ON(page->pmd_huge_pte);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
|
|
|
{
|
|
|
return &mm->page_table_lock;
|
|
|
}
|
|
|
|
|
|
+static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
|
|
|
+static inline void pgtable_pmd_page_dtor(struct page *page) {}
|
|
|
+
|
|
|
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
|
|
|
|
|
|
+#endif
|
|
|
+
|
|
|
static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
|
|
|
{
|
|
|
spinlock_t *ptl = pmd_lockptr(mm, pmd);
|