|
@@ -159,36 +159,46 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Lock and read a linux PTE. If it's present and writable, atomically
|
|
|
- * set dirty and referenced bits and return the PTE, otherwise return 0.
|
|
|
+ * If it's present and writable, atomically set dirty and referenced bits and
|
|
|
+ * return the PTE, otherwise return 0. If we find a transparent hugepage
|
|
|
+ * and if it is marked splitting we return 0;
|
|
|
*/
|
|
|
-static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
|
|
|
+static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
|
|
|
+ unsigned int hugepage)
|
|
|
{
|
|
|
- pte_t pte, tmp;
|
|
|
-
|
|
|
- /* wait until _PAGE_BUSY is clear then set it atomically */
|
|
|
- __asm__ __volatile__ (
|
|
|
- "1: ldarx %0,0,%3\n"
|
|
|
- " andi. %1,%0,%4\n"
|
|
|
- " bne- 1b\n"
|
|
|
- " ori %1,%0,%4\n"
|
|
|
- " stdcx. %1,0,%3\n"
|
|
|
- " bne- 1b"
|
|
|
- : "=&r" (pte), "=&r" (tmp), "=m" (*p)
|
|
|
- : "r" (p), "i" (_PAGE_BUSY)
|
|
|
- : "cc");
|
|
|
-
|
|
|
- if (pte_present(pte)) {
|
|
|
- pte = pte_mkyoung(pte);
|
|
|
- if (writing && pte_write(pte))
|
|
|
- pte = pte_mkdirty(pte);
|
|
|
- }
|
|
|
+ pte_t old_pte, new_pte = __pte(0);
|
|
|
+
|
|
|
+ while (1) {
|
|
|
+ old_pte = pte_val(*ptep);
|
|
|
+ /*
|
|
|
+ * wait until _PAGE_BUSY is clear then set it atomically
|
|
|
+ */
|
|
|
+ if (unlikely(old_pte & _PAGE_BUSY)) {
|
|
|
+ cpu_relax();
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
+ /* If hugepage and is trans splitting return None */
|
|
|
+ if (unlikely(hugepage &&
|
|
|
+ pmd_trans_splitting(pte_pmd(old_pte))))
|
|
|
+ return __pte(0);
|
|
|
+#endif
|
|
|
+ /* If pte is not present return None */
|
|
|
+ if (unlikely(!(old_pte & _PAGE_PRESENT)))
|
|
|
+ return __pte(0);
|
|
|
|
|
|
- *p = pte; /* clears _PAGE_BUSY */
|
|
|
+ new_pte = pte_mkyoung(old_pte);
|
|
|
+ if (writing && pte_write(old_pte))
|
|
|
+ new_pte = pte_mkdirty(new_pte);
|
|
|
|
|
|
- return pte;
|
|
|
+ if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
|
|
|
+ new_pte))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return new_pte;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/* Return HPTE cache control bits corresponding to Linux pte bits */
|
|
|
static inline unsigned long hpte_cache_bits(unsigned long pte_val)
|
|
|
{
|