pgtable_32.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. #ifndef _SPARC_PGTABLE_H
  2. #define _SPARC_PGTABLE_H
  3. /* asm/pgtable.h: Defines and functions used to work
  4. * with Sparc page tables.
  5. *
  6. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  7. * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. */
  9. #include <linux/const.h>
  10. #ifndef __ASSEMBLY__
  11. #include <asm-generic/4level-fixup.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/swap.h>
  14. #include <asm/types.h>
  15. #include <asm/pgtsrmmu.h>
  16. #include <asm/vaddrs.h>
  17. #include <asm/oplib.h>
  18. #include <asm/btfixup.h>
  19. #include <asm/cpu_type.h>
  20. struct vm_area_struct;
  21. struct page;
  22. extern void load_mmu(void);
  23. extern unsigned long calc_highpages(void);
  24. #define pte_ERROR(e) __builtin_trap()
  25. #define pmd_ERROR(e) __builtin_trap()
  26. #define pgd_ERROR(e) __builtin_trap()
  27. #define PMD_SHIFT 22
  28. #define PMD_SIZE (1UL << PMD_SHIFT)
  29. #define PMD_MASK (~(PMD_SIZE-1))
  30. #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
  31. #define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
  32. #define PGDIR_SIZE SRMMU_PGDIR_SIZE
  33. #define PGDIR_MASK SRMMU_PGDIR_MASK
  34. #define PTRS_PER_PTE 1024
  35. #define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
  36. #define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
  37. #define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
  38. #define FIRST_USER_ADDRESS 0
  39. #define PTE_SIZE (PTRS_PER_PTE*4)
  40. #define PAGE_NONE SRMMU_PAGE_NONE
  41. #define PAGE_SHARED SRMMU_PAGE_SHARED
  42. #define PAGE_COPY SRMMU_PAGE_COPY
  43. #define PAGE_READONLY SRMMU_PAGE_RDONLY
  44. #define PAGE_KERNEL SRMMU_PAGE_KERNEL
  45. /* Top-level page directory */
  46. extern pgd_t swapper_pg_dir[1024];
  47. extern void paging_init(void);
  48. extern unsigned long ptr_in_current_pgd;
  49. /* xwr */
  50. #define __P000 PAGE_NONE
  51. #define __P001 PAGE_READONLY
  52. #define __P010 PAGE_COPY
  53. #define __P011 PAGE_COPY
  54. #define __P100 PAGE_READONLY
  55. #define __P101 PAGE_READONLY
  56. #define __P110 PAGE_COPY
  57. #define __P111 PAGE_COPY
  58. #define __S000 PAGE_NONE
  59. #define __S001 PAGE_READONLY
  60. #define __S010 PAGE_SHARED
  61. #define __S011 PAGE_SHARED
  62. #define __S100 PAGE_READONLY
  63. #define __S101 PAGE_READONLY
  64. #define __S110 PAGE_SHARED
  65. #define __S111 PAGE_SHARED
  66. extern int num_contexts;
  67. /* First physical page can be anywhere, the following is needed so that
  68. * va-->pa and vice versa conversions work properly without performance
  69. * hit for all __pa()/__va() operations.
  70. */
  71. extern unsigned long phys_base;
  72. extern unsigned long pfn_base;
  73. /*
  74. * BAD_PAGETABLE is used when we need a bogus page-table, while
  75. * BAD_PAGE is used for a bogus page.
  76. *
  77. * ZERO_PAGE is a global shared page that is always zero: used
  78. * for zero-mapped memory areas etc..
  79. */
  80. extern pte_t * __bad_pagetable(void);
  81. extern pte_t __bad_page(void);
  82. extern unsigned long empty_zero_page;
  83. #define BAD_PAGETABLE __bad_pagetable()
  84. #define BAD_PAGE __bad_page()
  85. #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
  86. /*
  87. * In general all page table modifications should use the V8 atomic
  88. * swap instruction. This insures the mmu and the cpu are in sync
  89. * with respect to ref/mod bits in the page tables.
  90. */
  91. static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
  92. {
  93. __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
  94. return value;
  95. }
  96. /* Certain architectures need to do special things when pte's
  97. * within a page table are directly modified. Thus, the following
  98. * hook is made available.
  99. */
  100. static inline void set_pte(pte_t *ptep, pte_t pteval)
  101. {
  102. srmmu_swap((unsigned long *)ptep, pte_val(pteval));
  103. }
  104. #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  105. static inline int srmmu_device_memory(unsigned long x)
  106. {
  107. return ((x & 0xF0000000) != 0);
  108. }
  109. static inline struct page *pmd_page(pmd_t pmd)
  110. {
  111. if (srmmu_device_memory(pmd_val(pmd)))
  112. BUG();
  113. return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
  114. }
  115. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  116. {
  117. if (srmmu_device_memory(pgd_val(pgd))) {
  118. return ~0;
  119. } else {
  120. unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
  121. return (unsigned long)__nocache_va(v << 4);
  122. }
  123. }
  124. static inline int pte_present(pte_t pte)
  125. {
  126. return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
  127. }
  128. static inline int pte_none(pte_t pte)
  129. {
  130. return !pte_val(pte);
  131. }
  132. static inline void __pte_clear(pte_t *ptep)
  133. {
  134. set_pte(ptep, __pte(0));
  135. }
  136. static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  137. {
  138. __pte_clear(ptep);
  139. }
  140. static inline int pmd_bad(pmd_t pmd)
  141. {
  142. return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
  143. }
  144. static inline int pmd_present(pmd_t pmd)
  145. {
  146. return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
  147. }
  148. static inline int pmd_none(pmd_t pmd)
  149. {
  150. return !pmd_val(pmd);
  151. }
  152. static inline void pmd_clear(pmd_t *pmdp)
  153. {
  154. int i;
  155. for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
  156. set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
  157. }
  158. static inline int pgd_none(pgd_t pgd)
  159. {
  160. return !(pgd_val(pgd) & 0xFFFFFFF);
  161. }
  162. static inline int pgd_bad(pgd_t pgd)
  163. {
  164. return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
  165. }
  166. static inline int pgd_present(pgd_t pgd)
  167. {
  168. return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
  169. }
  170. static inline void pgd_clear(pgd_t *pgdp)
  171. {
  172. set_pte((pte_t *)pgdp, __pte(0));
  173. }
  174. /*
  175. * The following only work if pte_present() is true.
  176. * Undefined behaviour if not..
  177. */
  178. static inline int pte_write(pte_t pte)
  179. {
  180. return pte_val(pte) & SRMMU_WRITE;
  181. }
  182. static inline int pte_dirty(pte_t pte)
  183. {
  184. return pte_val(pte) & SRMMU_DIRTY;
  185. }
  186. static inline int pte_young(pte_t pte)
  187. {
  188. return pte_val(pte) & SRMMU_REF;
  189. }
  190. /*
  191. * The following only work if pte_present() is not true.
  192. */
  193. static inline int pte_file(pte_t pte)
  194. {
  195. return pte_val(pte) & SRMMU_FILE;
  196. }
  197. static inline int pte_special(pte_t pte)
  198. {
  199. return 0;
  200. }
  201. static inline pte_t pte_wrprotect(pte_t pte)
  202. {
  203. return __pte(pte_val(pte) & ~SRMMU_WRITE);
  204. }
  205. static inline pte_t pte_mkclean(pte_t pte)
  206. {
  207. return __pte(pte_val(pte) & ~SRMMU_DIRTY);
  208. }
  209. static inline pte_t pte_mkold(pte_t pte)
  210. {
  211. return __pte(pte_val(pte) & ~SRMMU_REF);
  212. }
  213. static inline pte_t pte_mkwrite(pte_t pte)
  214. {
  215. return __pte(pte_val(pte) | SRMMU_WRITE);
  216. }
  217. static inline pte_t pte_mkdirty(pte_t pte)
  218. {
  219. return __pte(pte_val(pte) | SRMMU_DIRTY);
  220. }
  221. static inline pte_t pte_mkyoung(pte_t pte)
  222. {
  223. return __pte(pte_val(pte) | SRMMU_REF);
  224. }
  225. #define pte_mkspecial(pte) (pte)
  226. #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
  227. static inline unsigned long pte_pfn(pte_t pte)
  228. {
  229. if (srmmu_device_memory(pte_val(pte))) {
  230. /* Just return something that will cause
  231. * pfn_valid() to return false. This makes
  232. * copy_one_pte() to just directly copy to
  233. * PTE over.
  234. */
  235. return ~0UL;
  236. }
  237. return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
  238. }
  239. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  240. /*
  241. * Conversion functions: convert a page and protection to a page entry,
  242. * and a page entry and page directory to the page they refer to.
  243. */
  244. static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
  245. {
  246. return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
  247. }
  248. static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
  249. {
  250. return __pte(((page) >> 4) | pgprot_val(pgprot));
  251. }
  252. static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
  253. {
  254. return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
  255. }
  256. #define pgprot_noncached pgprot_noncached
  257. static inline pgprot_t pgprot_noncached(pgprot_t prot)
  258. {
  259. prot &= ~__pgprot(SRMMU_CACHE);
  260. return prot;
  261. }
  262. static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
  263. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  264. {
  265. return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
  266. pgprot_val(newprot));
  267. }
  268. #define pgd_index(address) ((address) >> PGDIR_SHIFT)
  269. /* to find an entry in a page-table-directory */
  270. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  271. /* to find an entry in a kernel page-table-directory */
  272. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  273. /* Find an entry in the second-level page table.. */
  274. static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
  275. {
  276. return (pmd_t *) pgd_page_vaddr(*dir) +
  277. ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
  278. }
  279. /* Find an entry in the third-level page table.. */
  280. pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
  281. /*
  282. * This shortcut works on sun4m (and sun4d) because the nocache area is static.
  283. */
  284. #define pte_offset_map(d, a) pte_offset_kernel(d,a)
  285. #define pte_unmap(pte) do{}while(0)
  286. struct seq_file;
  287. void mmu_info(struct seq_file *m);
  288. /* Fault handler stuff... */
  289. #define FAULT_CODE_PROT 0x1
  290. #define FAULT_CODE_WRITE 0x2
  291. #define FAULT_CODE_USER 0x4
  292. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  293. void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
  294. unsigned long xva, unsigned int len);
  295. void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
  296. extern int invalid_segment;
  297. /* Encode and de-code a swap entry */
  298. static inline unsigned long __swp_type(swp_entry_t entry)
  299. {
  300. return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
  301. }
  302. static inline unsigned long __swp_offset(swp_entry_t entry)
  303. {
  304. return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
  305. }
  306. static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
  307. {
  308. return (swp_entry_t) {
  309. (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
  310. | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
  311. }
  312. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  313. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  314. /* file-offset-in-pte helpers */
  315. static inline unsigned long pte_to_pgoff(pte_t pte)
  316. {
  317. return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
  318. }
  319. static inline pte_t pgoff_to_pte(unsigned long pgoff)
  320. {
  321. return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
  322. }
  323. /*
  324. * This is made a constant because mm/fremap.c required a constant.
  325. */
  326. #define PTE_FILE_MAX_BITS 24
  327. /*
  328. */
  329. struct ctx_list {
  330. struct ctx_list *next;
  331. struct ctx_list *prev;
  332. unsigned int ctx_number;
  333. struct mm_struct *ctx_mm;
  334. };
  335. extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
  336. extern struct ctx_list ctx_free; /* Head of free list */
  337. extern struct ctx_list ctx_used; /* Head of used contexts list */
  338. #define NO_CONTEXT -1
  339. static inline void remove_from_ctx_list(struct ctx_list *entry)
  340. {
  341. entry->next->prev = entry->prev;
  342. entry->prev->next = entry->next;
  343. }
  344. static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
  345. {
  346. entry->next = head;
  347. (entry->prev = head->prev)->next = entry;
  348. head->prev = entry;
  349. }
  350. #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
  351. #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
  352. static inline unsigned long
  353. __get_phys (unsigned long addr)
  354. {
  355. switch (sparc_cpu_model){
  356. case sun4m:
  357. case sun4d:
  358. return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
  359. default:
  360. return 0;
  361. }
  362. }
  363. static inline int
  364. __get_iospace (unsigned long addr)
  365. {
  366. switch (sparc_cpu_model){
  367. case sun4m:
  368. case sun4d:
  369. return (srmmu_get_pte (addr) >> 28);
  370. default:
  371. return -1;
  372. }
  373. }
  374. extern unsigned long *sparc_valid_addr_bitmap;
  375. /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
  376. #define kern_addr_valid(addr) \
  377. (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
  378. /*
  379. * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  380. * its high 4 bits. These macros/functions put it there or get it from there.
  381. */
  382. #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
  383. #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
  384. #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
  385. extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
  386. unsigned long, pgprot_t);
  387. static inline int io_remap_pfn_range(struct vm_area_struct *vma,
  388. unsigned long from, unsigned long pfn,
  389. unsigned long size, pgprot_t prot)
  390. {
  391. unsigned long long offset, space, phys_base;
  392. offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
  393. space = GET_IOSPACE(pfn);
  394. phys_base = offset | (space << 32ULL);
  395. return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
  396. }
  397. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  398. #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
  399. ({ \
  400. int __changed = !pte_same(*(__ptep), __entry); \
  401. if (__changed) { \
  402. set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
  403. flush_tlb_page(__vma, __address); \
  404. } \
  405. __changed; \
  406. })
  407. #include <asm-generic/pgtable.h>
  408. #endif /* !(__ASSEMBLY__) */
  409. #define VMALLOC_START _AC(0xfe600000,UL)
  410. #define VMALLOC_END _AC(0xffc00000,UL)
  411. /* We provide our own get_unmapped_area to cope with VA holes for userland */
  412. #define HAVE_ARCH_UNMAPPED_AREA
  413. /*
  414. * No page table caches to initialise
  415. */
  416. #define pgtable_cache_init() do { } while (0)
  417. #endif /* !(_SPARC_PGTABLE_H) */