1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246 |
- /*
- * S390 version
- * Copyright IBM Corp. 1999, 2000
- * Author(s): Hartmut Penner (hp@de.ibm.com)
- * Ulrich Weigand (weigand@de.ibm.com)
- * Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Derived from "include/asm-i386/pgtable.h"
- */
- #ifndef _ASM_S390_PGTABLE_H
- #define _ASM_S390_PGTABLE_H
- /*
- * The Linux memory management assumes a three-level page table setup. For
- * s390 31 bit we "fold" the mid level into the top-level page table, so
- * that we physically have the same two-level page table as the s390 mmu
- * expects in 31 bit mode. For s390 64 bit we use three of the five levels
- * the hardware provides (region first and region second tables are not
- * used).
- *
- * The "pgd_xxx()" functions are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- *
- * This file contains the functions and defines necessary to modify and use
- * the S390 page table tree.
- */
- #ifndef __ASSEMBLY__
- #include <linux/sched.h>
- #include <linux/mm_types.h>
- #include <asm/bug.h>
- #include <asm/page.h>
- extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
- extern void paging_init(void);
- extern void vmem_map_init(void);
- extern void fault_init(void);
- /*
- * The S390 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- */
- #define update_mmu_cache(vma, address, ptep) do { } while (0)
- /*
- * ZERO_PAGE is a global shared page that is always zero; used
- * for zero-mapped memory areas etc..
- */
- extern unsigned long empty_zero_page;
- extern unsigned long zero_page_mask;
- #define ZERO_PAGE(vaddr) \
- (virt_to_page((void *)(empty_zero_page + \
- (((unsigned long)(vaddr)) &zero_page_mask))))
- #define is_zero_pfn is_zero_pfn
- static inline int is_zero_pfn(unsigned long pfn)
- {
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
- }
- #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
- #endif /* !__ASSEMBLY__ */
- /*
- * PMD_SHIFT determines the size of the area a second-level page
- * table can map
- * PGDIR_SHIFT determines what a third-level page table entry can map
- */
- #ifndef CONFIG_64BIT
- # define PMD_SHIFT 20
- # define PUD_SHIFT 20
- # define PGDIR_SHIFT 20
- #else /* CONFIG_64BIT */
- # define PMD_SHIFT 20
- # define PUD_SHIFT 31
- # define PGDIR_SHIFT 42
- #endif /* CONFIG_64BIT */
- #define PMD_SIZE (1UL << PMD_SHIFT)
- #define PMD_MASK (~(PMD_SIZE-1))
- #define PUD_SIZE (1UL << PUD_SHIFT)
- #define PUD_MASK (~(PUD_SIZE-1))
- #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
- #define PGDIR_MASK (~(PGDIR_SIZE-1))
- /*
- * entries per page directory level: the S390 is two-level, so
- * we don't really have any PMD directory physically.
- * for S390 segment-table entries are combined to one PGD
- * that leads to 1024 pte per pgd
- */
- #define PTRS_PER_PTE 256
- #ifndef CONFIG_64BIT
- #define PTRS_PER_PMD 1
- #define PTRS_PER_PUD 1
- #else /* CONFIG_64BIT */
- #define PTRS_PER_PMD 2048
- #define PTRS_PER_PUD 2048
- #endif /* CONFIG_64BIT */
- #define PTRS_PER_PGD 2048
- #define FIRST_USER_ADDRESS 0
- #define pte_ERROR(e) \
- printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
- #define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
- #define pud_ERROR(e) \
- printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
- #define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
- #ifndef __ASSEMBLY__
- /*
- * The vmalloc area will always be on the topmost area of the kernel
- * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
- * which should be enough for any sane case.
- * By putting vmalloc at the top, we maximise the gap between physical
- * memory and vmalloc to catch misplaced memory accesses. As a side
- * effect, this also makes sure that 64 bit module code cannot be used
- * as system call address.
- */
- extern unsigned long VMALLOC_START;
- extern unsigned long VMALLOC_END;
- extern struct page *vmemmap;
- #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
- /*
- * A 31 bit pagetable entry of S390 has following format:
- * | PFRA | | OS |
- * 0 0IP0
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Page-Invalid Bit: Page is not available for address-translation
- * P Page-Protection Bit: Store access not possible for page
- *
- * A 31 bit segmenttable entry of S390 has following format:
- * | P-table origin | |PTL
- * 0 IC
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Segment-Invalid Bit: Segment is not available for address-translation
- * C Common-Segment Bit: Segment is not private (PoP 3-30)
- * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
- *
- * The 31 bit segmenttable origin of S390 has following format:
- *
- * |S-table origin | | STL |
- * X **GPS
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * X Space-Switch event:
- * G Segment-Invalid Bit: *
- * P Private-Space Bit: Segment is not private (PoP 3-30)
- * S Storage-Alteration:
- * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
- *
- * A 64 bit pagetable entry of S390 has following format:
- * | PFRA |0IPC| OS |
- * 0000000000111111111122222222223333333333444444444455555555556666
- * 0123456789012345678901234567890123456789012345678901234567890123
- *
- * I Page-Invalid Bit: Page is not available for address-translation
- * P Page-Protection Bit: Store access not possible for page
- * C Change-bit override: HW is not required to set change bit
- *
- * A 64 bit segmenttable entry of S390 has following format:
- * | P-table origin | TT
- * 0000000000111111111122222222223333333333444444444455555555556666
- * 0123456789012345678901234567890123456789012345678901234567890123
- *
- * I Segment-Invalid Bit: Segment is not available for address-translation
- * C Common-Segment Bit: Segment is not private (PoP 3-30)
- * P Page-Protection Bit: Store access not possible for page
- * TT Type 00
- *
- * A 64 bit region table entry of S390 has following format:
- * | S-table origin | TF TTTL
- * 0000000000111111111122222222223333333333444444444455555555556666
- * 0123456789012345678901234567890123456789012345678901234567890123
- *
- * I Segment-Invalid Bit: Segment is not available for address-translation
- * TT Type 01
- * TF
- * TL Table length
- *
- * The 64 bit regiontable origin of S390 has following format:
- * | region table origon | DTTL
- * 0000000000111111111122222222223333333333444444444455555555556666
- * 0123456789012345678901234567890123456789012345678901234567890123
- *
- * X Space-Switch event:
- * G Segment-Invalid Bit:
- * P Private-Space Bit:
- * S Storage-Alteration:
- * R Real space
- * TL Table-Length:
- *
- * A storage key has the following format:
- * | ACC |F|R|C|0|
- * 0 3 4 5 6 7
- * ACC: access key
- * F : fetch protection bit
- * R : referenced bit
- * C : changed bit
- */
- /* Hardware bits in the page table entry */
- #define _PAGE_CO 0x100 /* HW Change-bit override */
- #define _PAGE_RO 0x200 /* HW read-only bit */
- #define _PAGE_INVALID 0x400 /* HW invalid bit */
- /* Software bits in the page table entry */
- #define _PAGE_SWT 0x001 /* SW pte type bit t */
- #define _PAGE_SWX 0x002 /* SW pte type bit x */
- #define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
- #define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
- #define _PAGE_SPECIAL 0x010 /* SW associated with special page */
- #define __HAVE_ARCH_PTE_SPECIAL
- /* Set of bits not changed in pte_modify */
- #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
- /* Six different types of pages. */
- #define _PAGE_TYPE_EMPTY 0x400
- #define _PAGE_TYPE_NONE 0x401
- #define _PAGE_TYPE_SWAP 0x403
- #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
- #define _PAGE_TYPE_RO 0x200
- #define _PAGE_TYPE_RW 0x000
- /*
- * Only four types for huge pages, using the invalid bit and protection bit
- * of a segment table entry.
- */
- #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
- #define _HPAGE_TYPE_NONE 0x220
- #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
- #define _HPAGE_TYPE_RW 0x000
- /*
- * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
- * pte_none and pte_file to find out the pte type WITHOUT holding the page
- * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
- * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
- * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
- * This change is done while holding the lock, but the intermediate step
- * of a previously valid pte with the hw invalid bit set can be observed by
- * handle_pte_fault. That makes it necessary that all valid pte types with
- * the hw invalid bit set must be distinguishable from the four pte types
- * empty, none, swap and file.
- *
- * irxt ipte irxt
- * _PAGE_TYPE_EMPTY 1000 -> 1000
- * _PAGE_TYPE_NONE 1001 -> 1001
- * _PAGE_TYPE_SWAP 1011 -> 1011
- * _PAGE_TYPE_FILE 11?1 -> 11?1
- * _PAGE_TYPE_RO 0100 -> 1100
- * _PAGE_TYPE_RW 0000 -> 1000
- *
- * pte_none is true for bits combinations 1000, 1010, 1100, 1110
- * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
- * pte_file is true for bits combinations 1101, 1111
- * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
- */
- #ifndef CONFIG_64BIT
- /* Bits in the segment table address-space-control-element */
- #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
- #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
- #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
- #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
- #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
- /* Bits in the segment table entry */
- #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
- #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
- #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
- #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
- #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
- #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
- #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
- /* Page status table bits for virtualization */
- #define RCP_ACC_BITS 0xf0000000UL
- #define RCP_FP_BIT 0x08000000UL
- #define RCP_PCL_BIT 0x00800000UL
- #define RCP_HR_BIT 0x00400000UL
- #define RCP_HC_BIT 0x00200000UL
- #define RCP_GR_BIT 0x00040000UL
- #define RCP_GC_BIT 0x00020000UL
- /* User dirty / referenced bit for KVM's migration feature */
- #define KVM_UR_BIT 0x00008000UL
- #define KVM_UC_BIT 0x00004000UL
- #else /* CONFIG_64BIT */
- /* Bits in the segment/region table address-space-control-element */
- #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
- #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
- #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
- #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
- #define _ASCE_REAL_SPACE 0x20 /* real space control */
- #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
- #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
- #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
- #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
- #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
- #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
- /* Bits in the region table entry */
- #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
- #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
- #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
- #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
- #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
- #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
- #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
- #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
- #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
- #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
- #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
- #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
- #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
- /* Bits in the segment table entry */
- #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
- #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
- #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
- #define _SEGMENT_ENTRY (0)
- #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
- #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
- #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
- /* Page status table bits for virtualization */
- #define RCP_ACC_BITS 0xf000000000000000UL
- #define RCP_FP_BIT 0x0800000000000000UL
- #define RCP_PCL_BIT 0x0080000000000000UL
- #define RCP_HR_BIT 0x0040000000000000UL
- #define RCP_HC_BIT 0x0020000000000000UL
- #define RCP_GR_BIT 0x0004000000000000UL
- #define RCP_GC_BIT 0x0002000000000000UL
- /* User dirty / referenced bit for KVM's migration feature */
- #define KVM_UR_BIT 0x0000800000000000UL
- #define KVM_UC_BIT 0x0000400000000000UL
- #endif /* CONFIG_64BIT */
- /*
- * A user page table pointer has the space-switch-event bit, the
- * private-space-control bit and the storage-alteration-event-control
- * bit set. A kernel page table pointer doesn't need them.
- */
- #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
- _ASCE_ALT_EVENT)
- /*
- * Page protection definitions.
- */
- #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
- #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
- #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
- #define PAGE_KERNEL PAGE_RW
- #define PAGE_COPY PAGE_RO
- /*
- * On s390 the page table entry has an invalid bit and a read-only bit.
- * Read permission implies execute permission and write permission
- * implies read permission.
- */
- /*xwr*/
- #define __P000 PAGE_NONE
- #define __P001 PAGE_RO
- #define __P010 PAGE_RO
- #define __P011 PAGE_RO
- #define __P100 PAGE_RO
- #define __P101 PAGE_RO
- #define __P110 PAGE_RO
- #define __P111 PAGE_RO
- #define __S000 PAGE_NONE
- #define __S001 PAGE_RO
- #define __S010 PAGE_RW
- #define __S011 PAGE_RW
- #define __S100 PAGE_RO
- #define __S101 PAGE_RO
- #define __S110 PAGE_RW
- #define __S111 PAGE_RW
- static inline int mm_exclusive(struct mm_struct *mm)
- {
- return likely(mm == current->active_mm &&
- atomic_read(&mm->context.attach_count) <= 1);
- }
- static inline int mm_has_pgste(struct mm_struct *mm)
- {
- #ifdef CONFIG_PGSTE
- if (unlikely(mm->context.has_pgste))
- return 1;
- #endif
- return 0;
- }
- /*
- * pgd/pmd/pte query functions
- */
- #ifndef CONFIG_64BIT
- static inline int pgd_present(pgd_t pgd) { return 1; }
- static inline int pgd_none(pgd_t pgd) { return 0; }
- static inline int pgd_bad(pgd_t pgd) { return 0; }
- static inline int pud_present(pud_t pud) { return 1; }
- static inline int pud_none(pud_t pud) { return 0; }
- static inline int pud_bad(pud_t pud) { return 0; }
- #else /* CONFIG_64BIT */
- static inline int pgd_present(pgd_t pgd)
- {
- if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
- return 1;
- return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
- }
- static inline int pgd_none(pgd_t pgd)
- {
- if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
- return 0;
- return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
- }
- static inline int pgd_bad(pgd_t pgd)
- {
- /*
- * With dynamic page table levels the pgd can be a region table
- * entry or a segment table entry. Check for the bit that are
- * invalid for either table entry.
- */
- unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
- ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
- return (pgd_val(pgd) & mask) != 0;
- }
- static inline int pud_present(pud_t pud)
- {
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
- return 1;
- return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
- }
- static inline int pud_none(pud_t pud)
- {
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
- return 0;
- return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
- }
- static inline int pud_bad(pud_t pud)
- {
- /*
- * With dynamic page table levels the pud can be a region table
- * entry or a segment table entry. Check for the bit that are
- * invalid for either table entry.
- */
- unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
- ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
- return (pud_val(pud) & mask) != 0;
- }
- #endif /* CONFIG_64BIT */
- static inline int pmd_present(pmd_t pmd)
- {
- return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
- }
- static inline int pmd_none(pmd_t pmd)
- {
- return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
- }
- static inline int pmd_bad(pmd_t pmd)
- {
- unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
- return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
- }
- static inline int pte_none(pte_t pte)
- {
- return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
- }
- static inline int pte_present(pte_t pte)
- {
- unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
- return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
- (!(pte_val(pte) & _PAGE_INVALID) &&
- !(pte_val(pte) & _PAGE_SWT));
- }
- static inline int pte_file(pte_t pte)
- {
- unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
- return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
- }
- static inline int pte_special(pte_t pte)
- {
- return (pte_val(pte) & _PAGE_SPECIAL);
- }
- #define __HAVE_ARCH_PTE_SAME
- static inline int pte_same(pte_t a, pte_t b)
- {
- return pte_val(a) == pte_val(b);
- }
- static inline pgste_t pgste_get_lock(pte_t *ptep)
- {
- unsigned long new = 0;
- #ifdef CONFIG_PGSTE
- unsigned long old;
- preempt_disable();
- asm(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
- " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
- " csg %0,%1,%2\n"
- " jl 0b\n"
- : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
- : "Q" (ptep[PTRS_PER_PTE]) : "cc");
- #endif
- return __pgste(new);
- }
- static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
- {
- #ifdef CONFIG_PGSTE
- asm(
- " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
- " stg %1,%0\n"
- : "=Q" (ptep[PTRS_PER_PTE])
- : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
- preempt_enable();
- #endif
- }
- static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
- {
- #ifdef CONFIG_PGSTE
- unsigned long address, bits;
- unsigned char skey;
- if (!pte_present(*ptep))
- return pgste;
- address = pte_val(*ptep) & PAGE_MASK;
- skey = page_get_storage_key(address);
- bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
- /* Clear page changed & referenced bit in the storage key */
- if (bits & _PAGE_CHANGED)
- page_set_storage_key(address, skey ^ bits, 1);
- else if (bits)
- page_reset_referenced(address);
- /* Transfer page changed & referenced bit to guest bits in pgste */
- pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
- /* Get host changed & referenced bits from pgste */
- bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
- /* Clear host bits in pgste. */
- pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
- pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
- /* Copy page access key and fetch protection bit to pgste */
- pgste_val(pgste) |=
- (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
- /* Transfer changed and referenced to kvm user bits */
- pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
- /* Transfer changed & referenced to pte sofware bits */
- pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
- #endif
- return pgste;
- }
- static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
- {
- #ifdef CONFIG_PGSTE
- int young;
- if (!pte_present(*ptep))
- return pgste;
- young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
- /* Transfer page referenced bit to pte software bit (host view) */
- if (young || (pgste_val(pgste) & RCP_HR_BIT))
- pte_val(*ptep) |= _PAGE_SWR;
- /* Clear host referenced bit in pgste. */
- pgste_val(pgste) &= ~RCP_HR_BIT;
- /* Transfer page referenced bit to guest bit in pgste */
- pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
- #endif
- return pgste;
- }
- static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
- {
- #ifdef CONFIG_PGSTE
- unsigned long address;
- unsigned long okey, nkey;
- if (!pte_present(entry))
- return;
- address = pte_val(entry) & PAGE_MASK;
- okey = nkey = page_get_storage_key(address);
- nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
- /* Set page access key and fetch protection bit from pgste */
- nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
- if (okey != nkey)
- page_set_storage_key(address, nkey, 1);
- #endif
- }
- /**
- * struct gmap_struct - guest address space
- * @mm: pointer to the parent mm_struct
- * @table: pointer to the page directory
- * @asce: address space control element for gmap page table
- * @crst_list: list of all crst tables used in the guest address space
- */
- struct gmap {
- struct list_head list;
- struct mm_struct *mm;
- unsigned long *table;
- unsigned long asce;
- struct list_head crst_list;
- };
- /**
- * struct gmap_rmap - reverse mapping for segment table entries
- * @next: pointer to the next gmap_rmap structure in the list
- * @entry: pointer to a segment table entry
- */
- struct gmap_rmap {
- struct list_head list;
- unsigned long *entry;
- };
- /**
- * struct gmap_pgtable - gmap information attached to a page table
- * @vmaddr: address of the 1MB segment in the process virtual memory
- * @mapper: list of segment table entries maping a page table
- */
- struct gmap_pgtable {
- unsigned long vmaddr;
- struct list_head mapper;
- };
- struct gmap *gmap_alloc(struct mm_struct *mm);
- void gmap_free(struct gmap *gmap);
- void gmap_enable(struct gmap *gmap);
- void gmap_disable(struct gmap *gmap);
- int gmap_map_segment(struct gmap *gmap, unsigned long from,
- unsigned long to, unsigned long length);
- int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
- unsigned long __gmap_fault(unsigned long address, struct gmap *);
- unsigned long gmap_fault(unsigned long address, struct gmap *);
- void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
- /*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
- static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t entry)
- {
- pgste_t pgste;
- if (mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste_set_pte(ptep, pgste, entry);
- *ptep = entry;
- pgste_set_unlock(ptep, pgste);
- } else
- *ptep = entry;
- }
- /*
- * query functions pte_write/pte_dirty/pte_young only work if
- * pte_present() is true. Undefined behaviour if not..
- */
- static inline int pte_write(pte_t pte)
- {
- return (pte_val(pte) & _PAGE_RO) == 0;
- }
- static inline int pte_dirty(pte_t pte)
- {
- #ifdef CONFIG_PGSTE
- if (pte_val(pte) & _PAGE_SWC)
- return 1;
- #endif
- return 0;
- }
- static inline int pte_young(pte_t pte)
- {
- #ifdef CONFIG_PGSTE
- if (pte_val(pte) & _PAGE_SWR)
- return 1;
- #endif
- return 0;
- }
- /*
- * pgd/pmd/pte modification functions
- */
- static inline void pgd_clear(pgd_t *pgd)
- {
- #ifdef CONFIG_64BIT
- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
- pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
- #endif
- }
- static inline void pud_clear(pud_t *pud)
- {
- #ifdef CONFIG_64BIT
- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- pud_val(*pud) = _REGION3_ENTRY_EMPTY;
- #endif
- }
- static inline void pmd_clear(pmd_t *pmdp)
- {
- pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
- }
- static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
- }
- /*
- * The following pte modification functions only work if
- * pte_present() is true. Undefined behaviour if not..
- */
- static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
- {
- pte_val(pte) &= _PAGE_CHG_MASK;
- pte_val(pte) |= pgprot_val(newprot);
- return pte;
- }
- static inline pte_t pte_wrprotect(pte_t pte)
- {
- /* Do not clobber _PAGE_TYPE_NONE pages! */
- if (!(pte_val(pte) & _PAGE_INVALID))
- pte_val(pte) |= _PAGE_RO;
- return pte;
- }
- static inline pte_t pte_mkwrite(pte_t pte)
- {
- pte_val(pte) &= ~_PAGE_RO;
- return pte;
- }
- static inline pte_t pte_mkclean(pte_t pte)
- {
- #ifdef CONFIG_PGSTE
- pte_val(pte) &= ~_PAGE_SWC;
- #endif
- return pte;
- }
- static inline pte_t pte_mkdirty(pte_t pte)
- {
- return pte;
- }
- static inline pte_t pte_mkold(pte_t pte)
- {
- #ifdef CONFIG_PGSTE
- pte_val(pte) &= ~_PAGE_SWR;
- #endif
- return pte;
- }
- static inline pte_t pte_mkyoung(pte_t pte)
- {
- return pte;
- }
- static inline pte_t pte_mkspecial(pte_t pte)
- {
- pte_val(pte) |= _PAGE_SPECIAL;
- return pte;
- }
- #ifdef CONFIG_HUGETLB_PAGE
- static inline pte_t pte_mkhuge(pte_t pte)
- {
- /*
- * PROT_NONE needs to be remapped from the pte type to the ste type.
- * The HW invalid bit is also different for pte and ste. The pte
- * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
- * bit, so we don't have to clear it.
- */
- if (pte_val(pte) & _PAGE_INVALID) {
- if (pte_val(pte) & _PAGE_SWT)
- pte_val(pte) |= _HPAGE_TYPE_NONE;
- pte_val(pte) |= _SEGMENT_ENTRY_INV;
- }
- /*
- * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
- * table entry.
- */
- pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
- /*
- * Also set the change-override bit because we don't need dirty bit
- * tracking for hugetlbfs pages.
- */
- pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
- return pte;
- }
- #endif
- /*
- * Get (and clear) the user dirty bit for a pte.
- */
- static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
- pte_t *ptep)
- {
- pgste_t pgste;
- int dirty = 0;
- if (mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_update_all(ptep, pgste);
- dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
- pgste_val(pgste) &= ~KVM_UC_BIT;
- pgste_set_unlock(ptep, pgste);
- return dirty;
- }
- return dirty;
- }
- /*
- * Get (and clear) the user referenced bit for a pte.
- */
- static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
- pte_t *ptep)
- {
- pgste_t pgste;
- int young = 0;
- if (mm_has_pgste(mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_update_young(ptep, pgste);
- young = !!(pgste_val(pgste) & KVM_UR_BIT);
- pgste_val(pgste) &= ~KVM_UR_BIT;
- pgste_set_unlock(ptep, pgste);
- }
- return young;
- }
- #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
- static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
- {
- pgste_t pgste;
- pte_t pte;
- if (mm_has_pgste(vma->vm_mm)) {
- pgste = pgste_get_lock(ptep);
- pgste = pgste_update_young(ptep, pgste);
- pte = *ptep;
- *ptep = pte_mkold(pte);
- pgste_set_unlock(ptep, pgste);
- return pte_young(pte);
- }
- return 0;
- }
- #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
- static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
- {
- /* No need to flush TLB
- * On s390 reference bits are in storage key and never in TLB
- * With virtualization we handle the reference bit, without we
- * we can simply return */
- return ptep_test_and_clear_young(vma, address, ptep);
- }
- static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
- {
- if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- #ifndef CONFIG_64BIT
- /* pto must point to the start of the segment table */
- pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
- #else
- /* ipte in zarch mode can do the math */
- pte_t *pto = ptep;
- #endif
- asm volatile(
- " ipte %2,%3"
- : "=m" (*ptep) : "m" (*ptep),
- "a" (pto), "a" (address));
- }
- }
- /*
- * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
- * both clear the TLB for the unmapped pte. The reason is that
- * ptep_get_and_clear is used in common code (e.g. change_pte_range)
- * to modify an active pte. The sequence is
- * 1) ptep_get_and_clear
- * 2) set_pte_at
- * 3) flush_tlb_range
- * On s390 the tlb needs to get flushed with the modification of the pte
- * if the pte is active. The only way how this can be implemented is to
- * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
- * is a nop.
- */
- #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
- static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
- {
- pgste_t pgste;
- pte_t pte;
- mm->context.flush_mm = 1;
- if (mm_has_pgste(mm))
- pgste = pgste_get_lock(ptep);
- pte = *ptep;
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
- if (mm_has_pgste(mm)) {
- pgste = pgste_update_all(&pte, pgste);
- pgste_set_unlock(ptep, pgste);
- }
- return pte;
- }
- #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
- static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep)
- {
- pte_t pte;
- mm->context.flush_mm = 1;
- if (mm_has_pgste(mm))
- pgste_get_lock(ptep);
- pte = *ptep;
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
- return pte;
- }
- static inline void ptep_modify_prot_commit(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep, pte_t pte)
- {
- *ptep = pte;
- if (mm_has_pgste(mm))
- pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
- }
- #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
- static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
- {
- pgste_t pgste;
- pte_t pte;
- if (mm_has_pgste(vma->vm_mm))
- pgste = pgste_get_lock(ptep);
- pte = *ptep;
- __ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
- if (mm_has_pgste(vma->vm_mm)) {
- pgste = pgste_update_all(&pte, pgste);
- pgste_set_unlock(ptep, pgste);
- }
- return pte;
- }
- /*
- * The batched pte unmap code uses ptep_get_and_clear_full to clear the
- * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
- * tlbs of an mm if it can guarantee that the ptes of the mm_struct
- * cannot be accessed while the batched unmap is running. In this case
- * full==1 and a simple pte_clear is enough. See tlb.h.
- */
- #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
- static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep, int full)
- {
- pgste_t pgste;
- pte_t pte;
- if (mm_has_pgste(mm))
- pgste = pgste_get_lock(ptep);
- pte = *ptep;
- if (!full)
- __ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
- if (mm_has_pgste(mm)) {
- pgste = pgste_update_all(&pte, pgste);
- pgste_set_unlock(ptep, pgste);
- }
- return pte;
- }
- #define __HAVE_ARCH_PTEP_SET_WRPROTECT
- static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
- {
- pgste_t pgste;
- pte_t pte = *ptep;
- if (pte_write(pte)) {
- mm->context.flush_mm = 1;
- if (mm_has_pgste(mm))
- pgste = pgste_get_lock(ptep);
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
- *ptep = pte_wrprotect(pte);
- if (mm_has_pgste(mm))
- pgste_set_unlock(ptep, pgste);
- }
- return pte;
- }
- #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
- static inline int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty)
- {
- pgste_t pgste;
- if (pte_same(*ptep, entry))
- return 0;
- if (mm_has_pgste(vma->vm_mm))
- pgste = pgste_get_lock(ptep);
- __ptep_ipte(address, ptep);
- *ptep = entry;
- if (mm_has_pgste(vma->vm_mm))
- pgste_set_unlock(ptep, pgste);
- return 1;
- }
- /*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
- static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
- {
- pte_t __pte;
- pte_val(__pte) = physpage + pgprot_val(pgprot);
- return __pte;
- }
- static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
- {
- unsigned long physpage = page_to_phys(page);
- return mk_pte_phys(physpage, pgprot);
- }
- #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
- #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
- #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
- #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
- #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
- #define pgd_offset_k(address) pgd_offset(&init_mm, address)
- #ifndef CONFIG_64BIT
- #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
- #define pud_deref(pmd) ({ BUG(); 0UL; })
- #define pgd_deref(pmd) ({ BUG(); 0UL; })
- #define pud_offset(pgd, address) ((pud_t *) pgd)
- #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
- #else /* CONFIG_64BIT */
- #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
- #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
- #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
- static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
- {
- pud_t *pud = (pud_t *) pgd;
- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
- pud = (pud_t *) pgd_deref(*pgd);
- return pud + pud_index(address);
- }
- static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
- {
- pmd_t *pmd = (pmd_t *) pud;
- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- pmd = (pmd_t *) pud_deref(*pud);
- return pmd + pmd_index(address);
- }
- #endif /* CONFIG_64BIT */
- #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
- #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
- #define pte_page(x) pfn_to_page(pte_pfn(x))
- #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
- /* Find an entry in the lowest level page table.. */
- #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
- #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
- #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
- #define pte_unmap(pte) do { } while (0)
- /*
- * 31 bit swap entry format:
- * A page-table entry has some bits we have to treat in a special way.
- * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
- * exception will occur instead of a page translation exception. The
- * specifiation exception has the bad habit not to store necessary
- * information in the lowcore.
- * Bit 21 and bit 22 are the page invalid bit and the page protection
- * bit. We set both to indicate a swapped page.
- * Bit 30 and 31 are used to distinguish the different page types. For
- * a swapped page these bits need to be zero.
- * This leaves the bits 1-19 and bits 24-29 to store type and offset.
- * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
- * plus 24 for the offset.
- * 0| offset |0110|o|type |00|
- * 0 0000000001111111111 2222 2 22222 33
- * 0 1234567890123456789 0123 4 56789 01
- *
- * 64 bit swap entry format:
- * A page-table entry has some bits we have to treat in a special way.
- * Bits 52 and bit 55 have to be zero, otherwise an specification
- * exception will occur instead of a page translation exception. The
- * specifiation exception has the bad habit not to store necessary
- * information in the lowcore.
- * Bit 53 and bit 54 are the page invalid bit and the page protection
- * bit. We set both to indicate a swapped page.
- * Bit 62 and 63 are used to distinguish the different page types. For
- * a swapped page these bits need to be zero.
- * This leaves the bits 0-51 and bits 56-61 to store type and offset.
- * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
- * plus 56 for the offset.
- * | offset |0110|o|type |00|
- * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
- * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
- */
- #ifndef CONFIG_64BIT
- #define __SWP_OFFSET_MASK (~0UL >> 12)
- #else
- #define __SWP_OFFSET_MASK (~0UL >> 11)
- #endif
- static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
- {
- pte_t pte;
- offset &= __SWP_OFFSET_MASK;
- pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
- ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
- return pte;
- }
- #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
- #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
- #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
- #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
- #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
- #ifndef CONFIG_64BIT
- # define PTE_FILE_MAX_BITS 26
- #else /* CONFIG_64BIT */
- # define PTE_FILE_MAX_BITS 59
- #endif /* CONFIG_64BIT */
- #define pte_to_pgoff(__pte) \
- ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
- #define pgoff_to_pte(__off) \
- ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
- | _PAGE_TYPE_FILE })
- #endif /* !__ASSEMBLY__ */
- #define kern_addr_valid(addr) (1)
- extern int vmem_add_mapping(unsigned long start, unsigned long size);
- extern int vmem_remove_mapping(unsigned long start, unsigned long size);
- extern int s390_enable_sie(void);
- /*
- * No page table caches to initialise
- */
- #define pgtable_cache_init() do { } while (0)
- #include <asm-generic/pgtable.h>
- #endif /* _S390_PAGE_H */
|