|
@@ -35,6 +35,11 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
|
|
|
|
|
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
|
|
|
|
|
+static inline void clean_pte_table(pte_t *pte)
|
|
|
+{
|
|
|
+ clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Allocate one PTE table.
|
|
|
*
|
|
@@ -42,14 +47,14 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
|
|
* into one table thus:
|
|
|
*
|
|
|
* +------------+
|
|
|
- * | h/w pt 0 |
|
|
|
- * +------------+
|
|
|
- * | h/w pt 1 |
|
|
|
- * +------------+
|
|
|
* | Linux pt 0 |
|
|
|
* +------------+
|
|
|
* | Linux pt 1 |
|
|
|
* +------------+
|
|
|
+ * | h/w pt 0 |
|
|
|
+ * +------------+
|
|
|
+ * | h/w pt 1 |
|
|
|
+ * +------------+
|
|
|
*/
|
|
|
static inline pte_t *
|
|
|
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
|
@@ -57,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
|
|
pte_t *pte;
|
|
|
|
|
|
pte = (pte_t *)__get_free_page(PGALLOC_GFP);
|
|
|
- if (pte) {
|
|
|
- clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
|
|
|
- pte += PTRS_PER_PTE;
|
|
|
- }
|
|
|
+ if (pte)
|
|
|
+ clean_pte_table(pte);
|
|
|
|
|
|
return pte;
|
|
|
}
|
|
@@ -76,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
pte = alloc_pages(PGALLOC_GFP, 0);
|
|
|
#endif
|
|
|
if (pte) {
|
|
|
- if (!PageHighMem(pte)) {
|
|
|
- void *page = page_address(pte);
|
|
|
- clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
|
|
|
- }
|
|
|
+ if (!PageHighMem(pte))
|
|
|
+ clean_pte_table(page_address(pte));
|
|
|
pgtable_page_ctor(pte);
|
|
|
}
|
|
|
|
|
@@ -91,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
|
*/
|
|
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
|
{
|
|
|
- if (pte) {
|
|
|
- pte -= PTRS_PER_PTE;
|
|
|
+ if (pte)
|
|
|
free_page((unsigned long)pte);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|
@@ -106,7 +105,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|
|
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
|
|
|
unsigned long prot)
|
|
|
{
|
|
|
- unsigned long pmdval = pte | prot;
|
|
|
+ unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot;
|
|
|
pmdp[0] = __pmd(pmdval);
|
|
|
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
|
|
|
flush_pmd_entry(pmdp);
|
|
@@ -121,14 +120,10 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
|
|
|
static inline void
|
|
|
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
|
|
{
|
|
|
- unsigned long pte_ptr = (unsigned long)ptep;
|
|
|
-
|
|
|
/*
|
|
|
- * The pmd must be loaded with the physical
|
|
|
- * address of the PTE table
|
|
|
+ * The pmd must be loaded with the physical address of the PTE table
|
|
|
*/
|
|
|
- pte_ptr -= PTRS_PER_PTE * sizeof(void *);
|
|
|
- __pmd_populate(pmdp, __pa(pte_ptr), _PAGE_KERNEL_TABLE);
|
|
|
+ __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
|
|
|
}
|
|
|
|
|
|
static inline void
|