|
@@ -526,10 +526,10 @@ extern void add_hash_page(unsigned context, unsigned long va,
|
|
|
* Atomic PTE updates.
|
|
|
*
|
|
|
* pte_update clears and sets bit atomically, and returns
|
|
|
- * the old pte value.
|
|
|
- * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
|
|
|
- * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
|
|
|
+ * the old pte value. In the 64-bit PTE case we lock around the
|
|
|
+ * low PTE word since we expect ALL flag bits to be there
|
|
|
*/
|
|
|
+#ifndef CONFIG_PTE_64BIT
|
|
|
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
|
|
|
unsigned long set)
|
|
|
{
|
|
@@ -543,10 +543,31 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
|
|
|
" stwcx. %1,0,%3\n\
|
|
|
bne- 1b"
|
|
|
: "=&r" (old), "=&r" (tmp), "=m" (*p)
|
|
|
- : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
|
|
|
+ : "r" (p), "r" (clr), "r" (set), "m" (*p)
|
|
|
: "cc" );
|
|
|
return old;
|
|
|
}
|
|
|
+#else
|
|
|
+static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
|
|
|
+ unsigned long set)
|
|
|
+{
|
|
|
+ unsigned long long old;
|
|
|
+ unsigned long tmp;
|
|
|
+
|
|
|
+ __asm__ __volatile__("\
|
|
|
+1: lwarx %L0,0,%4\n\
|
|
|
+ lwzx %0,0,%3\n\
|
|
|
+ andc %1,%L0,%5\n\
|
|
|
+ or %1,%1,%6\n"
|
|
|
+ PPC405_ERR77(0,%3)
|
|
|
+" stwcx. %1,0,%4\n\
|
|
|
+ bne- 1b"
|
|
|
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
|
|
|
+ : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
|
|
|
+ : "cc" );
|
|
|
+ return old;
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
* set_pte stores a linux PTE into the linux page table.
|