|
@@ -31,9 +31,9 @@
|
|
|
* the S390 page table tree.
|
|
|
*/
|
|
|
#ifndef __ASSEMBLY__
|
|
|
+#include <linux/mm_types.h>
|
|
|
#include <asm/bug.h>
|
|
|
#include <asm/processor.h>
|
|
|
-#include <linux/threads.h>
|
|
|
|
|
|
struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
|
|
|
struct mm_struct;
|
|
@@ -597,31 +597,31 @@ ptep_establish(struct vm_area_struct *vma,
|
|
|
* should therefore only be called if it is not mapped in any
|
|
|
* address space.
|
|
|
*/
|
|
|
-#define page_test_and_clear_dirty(_page) \
|
|
|
-({ \
|
|
|
- struct page *__page = (_page); \
|
|
|
- unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
|
|
|
- int __skey = page_get_storage_key(__physpage); \
|
|
|
- if (__skey & _PAGE_CHANGED) \
|
|
|
- page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\
|
|
|
- (__skey & _PAGE_CHANGED); \
|
|
|
-})
|
|
|
+static inline int page_test_and_clear_dirty(struct page *page)
|
|
|
+{
|
|
|
+ unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
|
|
|
+ int skey = page_get_storage_key(physpage);
|
|
|
+
|
|
|
+ if (skey & _PAGE_CHANGED)
|
|
|
+ page_set_storage_key(physpage, skey & ~_PAGE_CHANGED);
|
|
|
+ return skey & _PAGE_CHANGED;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Test and clear referenced bit in storage key.
|
|
|
*/
|
|
|
-#define page_test_and_clear_young(page) \
|
|
|
-({ \
|
|
|
- struct page *__page = (page); \
|
|
|
- unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);\
|
|
|
- int __ccode; \
|
|
|
- asm volatile( \
|
|
|
- " rrbe 0,%1\n" \
|
|
|
- " ipm %0\n" \
|
|
|
- " srl %0,28\n" \
|
|
|
- : "=d" (__ccode) : "a" (__physpage) : "cc"); \
|
|
|
- (__ccode & 2); \
|
|
|
-})
|
|
|
+static inline int page_test_and_clear_young(struct page *page)
|
|
|
+{
|
|
|
+ unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
|
|
|
+ int ccode;
|
|
|
+
|
|
|
+ asm volatile (
|
|
|
+ "rrbe 0,%1\n"
|
|
|
+ "ipm %0\n"
|
|
|
+ "srl %0,28\n"
|
|
|
+ : "=d" (ccode) : "a" (physpage) : "cc" );
|
|
|
+ return ccode & 2;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Conversion functions: convert a page and protection to a page entry,
|
|
@@ -634,32 +634,28 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
|
|
|
return __pte;
|
|
|
}
|
|
|
|
|
|
-#define mk_pte(pg, pgprot) \
|
|
|
-({ \
|
|
|
- struct page *__page = (pg); \
|
|
|
- pgprot_t __pgprot = (pgprot); \
|
|
|
- unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
|
|
|
- pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
|
|
|
- __pte; \
|
|
|
-})
|
|
|
+static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
|
|
|
+{
|
|
|
+ unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
|
|
|
|
|
|
-#define pfn_pte(pfn, pgprot) \
|
|
|
-({ \
|
|
|
- pgprot_t __pgprot = (pgprot); \
|
|
|
- unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
|
|
|
- pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
|
|
|
- __pte; \
|
|
|
-})
|
|
|
+ return mk_pte_phys(physpage, pgprot);
|
|
|
+}
|
|
|
+
|
|
|
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
|
|
|
+{
|
|
|
+ unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
|
|
|
+
|
|
|
+ return mk_pte_phys(physpage, pgprot);
|
|
|
+}
|
|
|
|
|
|
#ifdef __s390x__
|
|
|
|
|
|
-#define pfn_pmd(pfn, pgprot) \
|
|
|
-({ \
|
|
|
- pgprot_t __pgprot = (pgprot); \
|
|
|
- unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
|
|
|
- pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \
|
|
|
- __pmd; \
|
|
|
-})
|
|
|
+static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
|
|
|
+{
|
|
|
+ unsigned long physpage = __pa((pfn) << PAGE_SHIFT);
|
|
|
+
|
|
|
+ return __pmd(physpage + pgprot_val(pgprot));
|
|
|
+}
|
|
|
|
|
|
#endif /* __s390x__ */
|
|
|
|