|
@@ -42,20 +42,23 @@ static inline int pte_exec_kernel(pte_t pte)
|
|
|
return pte_x(pte);
|
|
|
}
|
|
|
|
|
|
-#ifndef CONFIG_PARAVIRT
|
|
|
/* Rules for using set_pte: the pte being assigned *must* be
|
|
|
* either not present or in a state where the hardware will
|
|
|
* not attempt to update the pte. In places where this is
|
|
|
* not possible, use pte_get_and_clear to obtain the old pte
|
|
|
* value and then use set_pte to update it. -ben
|
|
|
*/
|
|
|
-static inline void set_pte(pte_t *ptep, pte_t pte)
|
|
|
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
|
|
|
{
|
|
|
ptep->pte_high = pte.pte_high;
|
|
|
smp_wmb();
|
|
|
ptep->pte_low = pte.pte_low;
|
|
|
}
|
|
|
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
|
|
|
+static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
+ pte_t *ptep , pte_t pte)
|
|
|
+{
|
|
|
+ native_set_pte(ptep, pte);
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Since this is only called on user PTEs, and the page fault handler
|
|
@@ -63,7 +66,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
|
|
|
* we are justified in merely clearing the PTE present bit, followed
|
|
|
* by a set. The ordering here is important.
|
|
|
*/
|
|
|
-static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
|
|
|
+static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
|
|
|
+ pte_t *ptep, pte_t pte)
|
|
|
{
|
|
|
ptep->pte_low = 0;
|
|
|
smp_wmb();
|
|
@@ -72,32 +76,48 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte
|
|
|
ptep->pte_low = pte.pte_low;
|
|
|
}
|
|
|
|
|
|
-#define set_pte_atomic(pteptr,pteval) \
|
|
|
- set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
|
|
|
-#define set_pmd(pmdptr,pmdval) \
|
|
|
- set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
|
|
|
-#define set_pud(pudptr,pudval) \
|
|
|
- (*(pudptr) = (pudval))
|
|
|
+static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|
|
+{
|
|
|
+ set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
|
|
|
+}
|
|
|
+static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
|
+{
|
|
|
+ set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
|
|
|
+}
|
|
|
+static inline void native_set_pud(pud_t *pudp, pud_t pud)
|
|
|
+{
|
|
|
+ *pudp = pud;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* For PTEs and PDEs, we must clear the P-bit first when clearing a page table
|
|
|
* entry, so clear the bottom half first and enforce ordering with a compiler
|
|
|
* barrier.
|
|
|
*/
|
|
|
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
{
|
|
|
ptep->pte_low = 0;
|
|
|
smp_wmb();
|
|
|
ptep->pte_high = 0;
|
|
|
}
|
|
|
|
|
|
-static inline void pmd_clear(pmd_t *pmd)
|
|
|
+static inline void native_pmd_clear(pmd_t *pmd)
|
|
|
{
|
|
|
u32 *tmp = (u32 *)pmd;
|
|
|
*tmp = 0;
|
|
|
smp_wmb();
|
|
|
*(tmp + 1) = 0;
|
|
|
}
|
|
|
+
|
|
|
+#ifndef CONFIG_PARAVIRT
|
|
|
+#define set_pte(ptep, pte) native_set_pte(ptep, pte)
|
|
|
+#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
|
|
|
+#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte)
|
|
|
+#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
|
|
|
+#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
|
|
|
+#define set_pud(pudp, pud) native_set_pud(pudp, pud)
|
|
|
+#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
|
|
|
+#define pmd_clear(pmd) native_pmd_clear(pmd)
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -119,7 +139,7 @@ static inline void pud_clear (pud_t * pud) { }
|
|
|
#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
|
|
|
pmd_index(address))
|
|
|
|
|
|
-static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
|
|
|
+static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
|
|
|
{
|
|
|
pte_t res;
|
|
|
|
|
@@ -146,28 +166,21 @@ static inline int pte_none(pte_t pte)
|
|
|
|
|
|
static inline unsigned long pte_pfn(pte_t pte)
|
|
|
{
|
|
|
- return (pte.pte_low >> PAGE_SHIFT) |
|
|
|
- (pte.pte_high << (32 - PAGE_SHIFT));
|
|
|
+ return pte_val(pte) >> PAGE_SHIFT;
|
|
|
}
|
|
|
|
|
|
extern unsigned long long __supported_pte_mask;
|
|
|
|
|
|
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
|
|
|
{
|
|
|
- pte_t pte;
|
|
|
-
|
|
|
- pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
|
|
|
- (pgprot_val(pgprot) >> 32);
|
|
|
- pte.pte_high &= (__supported_pte_mask >> 32);
|
|
|
- pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
|
|
|
- __supported_pte_mask;
|
|
|
- return pte;
|
|
|
+ return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
|
|
|
+ pgprot_val(pgprot)) & __supported_pte_mask);
|
|
|
}
|
|
|
|
|
|
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
|
|
{
|
|
|
- return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
|
|
|
- pgprot_val(pgprot)) & __supported_pte_mask);
|
|
|
+ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
|
|
|
+ pgprot_val(pgprot)) & __supported_pte_mask);
|
|
|
}
|
|
|
|
|
|
/*
|