|
@@ -197,6 +197,63 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd)
|
|
|
}
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
|
|
+static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
|
|
|
+ unsigned long addr,
|
|
|
+ pte_t *ptep)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Get the current pte state, but zero it out to make it
|
|
|
+ * non-present, preventing the hardware from asynchronously
|
|
|
+ * updating it.
|
|
|
+ */
|
|
|
+ return ptep_get_and_clear(mm, addr, ptep);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
|
|
|
+ unsigned long addr,
|
|
|
+ pte_t *ptep, pte_t pte)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * The pte is non-present, so there's no hardware state to
|
|
|
+ * preserve.
|
|
|
+ */
|
|
|
+ set_pte_at(mm, addr, ptep, pte);
|
|
|
+}
|
|
|
+
|
|
|
+#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
|
|
|
+/*
|
|
|
+ * Start a pte protection read-modify-write transaction, which
|
|
|
+ * protects against asynchronous hardware modifications to the pte.
|
|
|
+ * The intention is not to prevent the hardware from making pte
|
|
|
+ * updates, but to prevent any updates it may make from being lost.
|
|
|
+ *
|
|
|
+ * This does not protect against other software modifications of the
|
|
|
+ * pte; the appropriate pte lock must be held over the transation.
|
|
|
+ *
|
|
|
+ * Note that this interface is intended to be batchable, meaning that
|
|
|
+ * ptep_modify_prot_commit may not actually update the pte, but merely
|
|
|
+ * queue the update to be done at some later time. The update must be
|
|
|
+ * actually committed before the pte lock is released, however.
|
|
|
+ */
|
|
|
+static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
|
|
|
+ unsigned long addr,
|
|
|
+ pte_t *ptep)
|
|
|
+{
|
|
|
+ return __ptep_modify_prot_start(mm, addr, ptep);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Commit an update to a pte, leaving any hardware-controlled bits in
|
|
|
+ * the PTE unmodified.
|
|
|
+ */
|
|
|
+static inline void ptep_modify_prot_commit(struct mm_struct *mm,
|
|
|
+ unsigned long addr,
|
|
|
+ pte_t *ptep, pte_t pte)
|
|
|
+{
|
|
|
+ __ptep_modify_prot_commit(mm, addr, ptep, pte);
|
|
|
+}
|
|
|
+#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
|
|
|
+
|
|
|
/*
|
|
|
* A facility to provide lazy MMU batching. This allows PTE updates and
|
|
|
* page invalidations to be delayed until a call to leave lazy MMU mode
|