pgtable.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * This file contains the functions and defines necessary to modify and
  3. * use the SuperH page table tree.
  4. *
  5. * Copyright (C) 1999 Niibe Yutaka
  6. * Copyright (C) 2002 - 2007 Paul Mundt
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file "COPYING" in the main directory of this
  10. * archive for more details.
  11. */
  12. #ifndef __ASM_SH_PGTABLE_H
  13. #define __ASM_SH_PGTABLE_H
  14. #include <asm-generic/pgtable-nopmd.h>
  15. #include <asm/page.h>
  16. #ifndef __ASSEMBLY__
  17. #include <asm/addrspace.h>
  18. #include <asm/fixmap.h>
  19. /*
  20. * ZERO_PAGE is a global shared page that is always zero: used
  21. * for zero-mapped memory areas etc..
  22. */
  23. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  24. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  25. #endif /* !__ASSEMBLY__ */
  26. /*
  27. * Effective and physical address definitions, to aid with sign
  28. * extension.
  29. */
  30. #define NEFF 32
  31. #define NEFF_SIGN (1LL << (NEFF - 1))
  32. #define NEFF_MASK (-1LL << NEFF)
  33. static inline unsigned long long neff_sign_extend(unsigned long val)
  34. {
  35. unsigned long long extended = val;
  36. return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended;
  37. }
  38. #ifdef CONFIG_29BIT
  39. #define NPHYS 29
  40. #else
  41. #define NPHYS 32
  42. #endif
  43. #define NPHYS_SIGN (1LL << (NPHYS - 1))
  44. #define NPHYS_MASK (-1LL << NPHYS)
  45. /*
  46. * traditional two-level paging structure
  47. */
  48. /* PTE bits */
  49. #if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64)
  50. # define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */
  51. #else
  52. # define PTE_MAGNITUDE 2 /* 32-bit PTEs */
  53. #endif
  54. #define PTE_SHIFT PAGE_SHIFT
  55. #define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE)
  56. /* PGD bits */
  57. #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
  58. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  59. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  60. /* Entries per level */
  61. #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
  62. #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
  63. #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
  64. #define FIRST_USER_ADDRESS 0
  65. #define PHYS_ADDR_MASK29 0x1fffffff
  66. #define PHYS_ADDR_MASK32 0xffffffff
  67. #ifdef CONFIG_PMB
  68. static inline unsigned long phys_addr_mask(void)
  69. {
  70. /* Is the MMU in 29bit mode? */
  71. if (__in_29bit_mode())
  72. return PHYS_ADDR_MASK29;
  73. return PHYS_ADDR_MASK32;
  74. }
  75. #elif defined(CONFIG_32BIT)
  76. static inline unsigned long phys_addr_mask(void)
  77. {
  78. return PHYS_ADDR_MASK32;
  79. }
  80. #else
  81. static inline unsigned long phys_addr_mask(void)
  82. {
  83. return PHYS_ADDR_MASK29;
  84. }
  85. #endif
  86. #define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
  87. #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
  88. #ifdef CONFIG_SUPERH32
  89. #define VMALLOC_START (P3SEG)
  90. #else
  91. #define VMALLOC_START (0xf0000000)
  92. #endif
  93. #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  94. #if defined(CONFIG_SUPERH32)
  95. #include <asm/pgtable_32.h>
  96. #else
  97. #include <asm/pgtable_64.h>
  98. #endif
  99. /*
  100. * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
  101. * protection for execute, and considers it the same as a read. Also, write
  102. * permission implies read permission. This is the closest we can get..
  103. *
  104. * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
  105. * not only supporting separate execute, read, and write bits, but having
  106. * completely separate permission bits for user and kernel space.
  107. */
  108. /*xwr*/
  109. #define __P000 PAGE_NONE
  110. #define __P001 PAGE_READONLY
  111. #define __P010 PAGE_COPY
  112. #define __P011 PAGE_COPY
  113. #define __P100 PAGE_EXECREAD
  114. #define __P101 PAGE_EXECREAD
  115. #define __P110 PAGE_COPY
  116. #define __P111 PAGE_COPY
  117. #define __S000 PAGE_NONE
  118. #define __S001 PAGE_READONLY
  119. #define __S010 PAGE_WRITEONLY
  120. #define __S011 PAGE_SHARED
  121. #define __S100 PAGE_EXECREAD
  122. #define __S101 PAGE_EXECREAD
  123. #define __S110 PAGE_RWX
  124. #define __S111 PAGE_RWX
  125. typedef pte_t *pte_addr_t;
  126. #define kern_addr_valid(addr) (1)
  127. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  128. remap_pfn_range(vma, vaddr, pfn, size, prot)
  129. #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
  130. /*
  131. * No page table caches to initialise
  132. */
  133. #define pgtable_cache_init() do { } while (0)
  134. struct vm_area_struct;
  135. extern void __update_cache(struct vm_area_struct *vma,
  136. unsigned long address, pte_t pte);
  137. extern void __update_tlb(struct vm_area_struct *vma,
  138. unsigned long address, pte_t pte);
  139. static inline void
  140. update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  141. {
  142. __update_cache(vma, address, pte);
  143. __update_tlb(vma, address, pte);
  144. }
  145. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  146. extern void paging_init(void);
  147. extern void page_table_range_init(unsigned long start, unsigned long end,
  148. pgd_t *pgd);
  149. /* arch/sh/mm/mmap.c */
  150. #define HAVE_ARCH_UNMAPPED_AREA
  151. #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
  152. #include <asm-generic/pgtable.h>
  153. #endif /* __ASM_SH_PGTABLE_H */