|
@@ -17,16 +17,6 @@
|
|
|
|
|
|
#include <asm-generic/pgtable-nopud.h>
|
|
|
|
|
|
-/* PMD_SHIFT determines what a second-level page table entry can map */
|
|
|
-#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
|
|
|
-#define PMD_SIZE (1UL << PMD_SHIFT)
|
|
|
-#define PMD_MASK (~(PMD_SIZE-1))
|
|
|
-
|
|
|
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
|
|
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2))
|
|
|
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
|
|
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
|
-
|
|
|
/*
|
|
|
* Entries per page directory level. The PTE level must use a 64b record
|
|
|
* for each page table entry. The PMD and PGD level use a 32b record for
|
|
@@ -40,40 +30,30 @@
|
|
|
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
|
|
|
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
|
|
|
|
|
|
-#define USER_PTRS_PER_PGD (1024)
|
|
|
-#define FIRST_USER_ADDRESS 0
|
|
|
+/* PMD_SHIFT determines what a second-level page table entry can map */
|
|
|
+#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
|
|
|
+#define PMD_SIZE (1UL << PMD_SHIFT)
|
|
|
+#define PMD_MASK (~(PMD_SIZE-1))
|
|
|
|
|
|
-#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
|
|
|
- PGD_INDEX_SIZE + PAGE_SHIFT)
|
|
|
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
|
|
+#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
|
|
|
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
|
|
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
|
+
|
|
|
+#define FIRST_USER_ADDRESS 0
|
|
|
|
|
|
/*
|
|
|
* Size of EA range mapped by our pagetables.
|
|
|
*/
|
|
|
-#define PGTABLE_EA_BITS 41
|
|
|
-#define PGTABLE_EA_MASK ((1UL<<PGTABLE_EA_BITS)-1)
|
|
|
+#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
|
|
|
+ PGD_INDEX_SIZE + PAGE_SHIFT)
|
|
|
+#define EADDR_MASK ((1UL << EADDR_SIZE) - 1)
|
|
|
|
|
|
/*
|
|
|
* Define the address range of the vmalloc VM area.
|
|
|
*/
|
|
|
#define VMALLOC_START (0xD000000000000000ul)
|
|
|
-#define VMALLOC_END (VMALLOC_START + PGTABLE_EA_MASK)
|
|
|
-
|
|
|
-/*
|
|
|
- * Define the address range of the imalloc VM area.
|
|
|
- * (used for ioremap)
|
|
|
- */
|
|
|
-#define IMALLOC_START (ioremap_bot)
|
|
|
-#define IMALLOC_VMADDR(x) ((unsigned long)(x))
|
|
|
-#define PHBS_IO_BASE (0xE000000000000000ul) /* Reserve 2 gigs for PHBs */
|
|
|
-#define IMALLOC_BASE (0xE000000080000000ul)
|
|
|
-#define IMALLOC_END (IMALLOC_BASE + PGTABLE_EA_MASK)
|
|
|
-
|
|
|
-/*
|
|
|
- * Define the user address range
|
|
|
- */
|
|
|
-#define USER_START (0UL)
|
|
|
-#define USER_END (USER_START + PGTABLE_EA_MASK)
|
|
|
-
|
|
|
+#define VMALLOC_END (VMALLOC_START + EADDR_MASK)
|
|
|
|
|
|
/*
|
|
|
* Bits in a linux-style PTE. These match the bits in the
|
|
@@ -168,10 +148,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
|
|
|
/* shift to put page number into pte */
|
|
|
#define PTE_SHIFT (17)
|
|
|
|
|
|
-/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
|
|
|
- * to give the PTE page number. The bottom two bits are for flags. */
|
|
|
-#define PMD_TO_PTEPAGE_SHIFT (2)
|
|
|
-
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
@@ -200,13 +176,14 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
|
|
|
*/
|
|
|
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
|
|
|
|
|
-#define pfn_pte(pfn,pgprot) \
|
|
|
-({ \
|
|
|
- pte_t pte; \
|
|
|
- pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \
|
|
|
- pgprot_val(pgprot); \
|
|
|
- pte; \
|
|
|
-})
|
|
|
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
|
|
|
+{
|
|
|
+ pte_t pte;
|
|
|
+
|
|
|
+
|
|
|
+ pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot);
|
|
|
+ return pte;
|
|
|
+}
|
|
|
|
|
|
#define pte_modify(_pte, newprot) \
|
|
|
(__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
|
|
@@ -220,13 +197,12 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
|
|
|
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
|
|
|
|
|
#define pmd_set(pmdp, ptep) \
|
|
|
- (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT))
|
|
|
+ (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
|
|
|
#define pmd_none(pmd) (!pmd_val(pmd))
|
|
|
#define pmd_bad(pmd) (pmd_val(pmd) == 0)
|
|
|
#define pmd_present(pmd) (pmd_val(pmd) != 0)
|
|
|
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
|
|
|
-#define pmd_page_kernel(pmd) \
|
|
|
- (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
|
|
|
+#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd)))
|
|
|
#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
|
|
|
|
|
|
#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp)))
|
|
@@ -266,8 +242,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
|
|
|
/* to find an entry in the ioremap page-table-directory */
|
|
|
#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
|
|
|
|
|
|
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
|
|
|
-
|
|
|
/*
|
|
|
* The following only work if pte_present() is true.
|
|
|
* Undefined behaviour if not..
|
|
@@ -442,7 +416,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
pte_clear(mm, addr, ptep);
|
|
|
flush_tlb_pending();
|
|
|
}
|
|
|
- *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS;
|
|
|
+ *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
|
|
|
}
|
|
|
|
|
|
/* Set the dirty and/or accessed bits atomically in a linux PTE, this
|
|
@@ -487,18 +461,13 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
|
|
|
|
|
|
extern unsigned long ioremap_bot, ioremap_base;
|
|
|
|
|
|
-#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
|
|
|
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
|
|
|
-
|
|
|
-#define pte_ERROR(e) \
|
|
|
- printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
|
|
#define pmd_ERROR(e) \
|
|
|
printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
|
|
|
#define pgd_ERROR(e) \
|
|
|
printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
|
|
|
|
|
|
-extern pgd_t swapper_pg_dir[1024];
|
|
|
-extern pgd_t ioremap_dir[1024];
|
|
|
+extern pgd_t swapper_pg_dir[];
|
|
|
+extern pgd_t ioremap_dir[];
|
|
|
|
|
|
extern void paging_init(void);
|
|
|
|
|
@@ -540,43 +509,11 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
|
|
|
*/
|
|
|
#define kern_addr_valid(addr) (1)
|
|
|
|
|
|
-#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
|
|
|
- remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
|
|
|
-
|
|
|
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
|
|
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
|
|
|
|
|
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
|
|
|
-#define GET_IOSPACE(pfn) 0
|
|
|
-#define GET_PFN(pfn) (pfn)
|
|
|
-
|
|
|
void pgtable_cache_init(void);
|
|
|
|
|
|
-extern void hpte_init_native(void);
|
|
|
-extern void hpte_init_lpar(void);
|
|
|
-extern void hpte_init_iSeries(void);
|
|
|
-
|
|
|
-/* imalloc region types */
|
|
|
-#define IM_REGION_UNUSED 0x1
|
|
|
-#define IM_REGION_SUBSET 0x2
|
|
|
-#define IM_REGION_EXISTS 0x4
|
|
|
-#define IM_REGION_OVERLAP 0x8
|
|
|
-#define IM_REGION_SUPERSET 0x10
|
|
|
-
|
|
|
-extern struct vm_struct * im_get_free_area(unsigned long size);
|
|
|
-extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
|
|
|
- int region_type);
|
|
|
-unsigned long im_free(void *addr);
|
|
|
-
|
|
|
-extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
|
|
|
- unsigned long va, unsigned long prpn,
|
|
|
- int secondary, unsigned long hpteflags,
|
|
|
- int bolted, int large);
|
|
|
-
|
|
|
-extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
|
|
|
- unsigned long prpn, int secondary,
|
|
|
- unsigned long hpteflags, int bolted, int large);
|
|
|
-
|
|
|
/*
|
|
|
* find_linux_pte returns the address of a linux pte for a given
|
|
|
* effective address and directory. If not found, it returns zero.
|