pgtable.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /* $Id: pgtable.h,v 1.156 2002/02/09 19:49:31 davem Exp $
  2. * pgtable.h: SpitFire page table operations.
  3. *
  4. * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. #ifndef _SPARC64_PGTABLE_H
  8. #define _SPARC64_PGTABLE_H
  9. /* This file contains the functions and defines necessary to modify and use
  10. * the SpitFire page tables.
  11. */
  12. #include <asm-generic/pgtable-nopud.h>
  13. #include <linux/config.h>
  14. #include <linux/compiler.h>
  15. #include <asm/types.h>
  16. #include <asm/spitfire.h>
  17. #include <asm/asi.h>
  18. #include <asm/system.h>
  19. #include <asm/page.h>
  20. #include <asm/processor.h>
  21. #include <asm/const.h>
  22. /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 16MB).
  23. * The page copy blockops use 0x1000000 to 0x18000000 (16MB --> 24MB).
  24. * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
  25. * The vmalloc area spans 0x140000000 to 0x200000000.
  26. * There is a single static kernel PMD which maps from 0x0 to address
  27. * 0x400000000.
  28. */
  29. #define TLBTEMP_BASE _AC(0x0000000001000000,UL)
  30. #define MODULES_VADDR _AC(0x0000000002000000,UL)
  31. #define MODULES_LEN _AC(0x000000007e000000,UL)
  32. #define MODULES_END _AC(0x0000000080000000,UL)
  33. #define VMALLOC_START _AC(0x0000000140000000,UL)
  34. #define VMALLOC_END _AC(0x0000000200000000,UL)
  35. #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
  36. #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
  37. /* XXX All of this needs to be rethought so we can take advantage
  38. * XXX cheetah's full 64-bit virtual address space, ie. no more hole
  39. * XXX in the middle like on spitfire. -DaveM
  40. */
  41. /*
  42. * Given a virtual address, the lowest PAGE_SHIFT bits determine offset
  43. * into the page; the next higher PAGE_SHIFT-3 bits determine the pte#
  44. * in the proper pagetable (the -3 is from the 8 byte ptes, and each page
  45. * table is a single page long). The next higher PMD_BITS determine pmd#
  46. * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2)
  47. * since the pmd entries are 4 bytes, and each pmd page is a single page
  48. * long). Finally, the higher few bits determine pgde#.
  49. */
  50. /* PMD_SHIFT determines the size of the area a second-level page
  51. * table can map
  52. */
  53. #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
  54. #define PMD_SIZE (1UL << PMD_SHIFT)
  55. #define PMD_MASK (~(PMD_SIZE-1))
  56. #define PMD_BITS (PAGE_SHIFT - 2)
  57. /* PGDIR_SHIFT determines what a third-level page table entry can map */
  58. #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
  59. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  60. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  61. #define PGDIR_BITS (PAGE_SHIFT - 2)
  62. #ifndef __ASSEMBLY__
  63. #include <linux/sched.h>
  64. /* Entries per page directory level. */
  65. #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
  66. #define PTRS_PER_PMD (1UL << PMD_BITS)
  67. #define PTRS_PER_PGD (1UL << PGDIR_BITS)
  68. /* Kernel has a separate 44bit address space. */
  69. #define FIRST_USER_ADDRESS 0
  70. #define pte_ERROR(e) __builtin_trap()
  71. #define pmd_ERROR(e) __builtin_trap()
  72. #define pgd_ERROR(e) __builtin_trap()
  73. #endif /* !(__ASSEMBLY__) */
  74. /* Spitfire/Cheetah TTE bits. */
  75. #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
  76. #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/
  77. #define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */
  78. #define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */
  79. #define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */
  80. #define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */
  81. #define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */
  82. #define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */
  83. #define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
  84. #define _PAGE_RES1 _AC(0x0003000000000000,UL) /* Reserved */
  85. #define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
  86. #define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */
  87. #define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/
  88. #define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */
  89. #define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */
  90. #define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */
  91. #define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
  92. #define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
  93. #define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */
  94. #define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */
  95. #define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */
  96. #define _PAGE_G _AC(0x0000000000000001,UL) /* Global */
  97. /* Here are the SpitFire software bits we use in the TTE's.
  98. *
  99. * WARNING: If you are going to try and start using some
  100. * of the soft2 bits, you will need to make
  101. * modifications to the swap entry implementation.
  102. * For example, one thing that could happen is that
  103. * swp_entry_to_pte() would BUG_ON() if you tried
  104. * to use one of the soft2 bits for _PAGE_FILE.
  105. *
  106. * Like other architectures, I have aliased _PAGE_FILE with
  107. * _PAGE_MODIFIED. This works because _PAGE_FILE is never
  108. * interpreted that way unless _PAGE_PRESENT is clear.
  109. */
  110. #define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */
  111. #define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */
  112. #define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */
  113. #define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
  114. #define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */
  115. #define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */
  116. #define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */
  117. #if PAGE_SHIFT == 13
  118. #define _PAGE_SZBITS _PAGE_SZ8K
  119. #elif PAGE_SHIFT == 16
  120. #define _PAGE_SZBITS _PAGE_SZ64K
  121. #elif PAGE_SHIFT == 19
  122. #define _PAGE_SZBITS _PAGE_SZ512K
  123. #elif PAGE_SHIFT == 22
  124. #define _PAGE_SZBITS _PAGE_SZ4MB
  125. #else
  126. #error Wrong PAGE_SHIFT specified
  127. #endif
  128. #if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
  129. #define _PAGE_SZHUGE _PAGE_SZ4MB
  130. #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
  131. #define _PAGE_SZHUGE _PAGE_SZ512K
  132. #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
  133. #define _PAGE_SZHUGE _PAGE_SZ64K
  134. #endif
  135. #define _PAGE_CACHE (_PAGE_CP | _PAGE_CV)
  136. #define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
  137. #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
  138. #define __PRIV_BITS _PAGE_P
  139. #define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE)
  140. /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
  141. #define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
  142. __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
  143. #define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
  144. __ACCESS_BITS | _PAGE_EXEC)
  145. #define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
  146. __ACCESS_BITS | _PAGE_EXEC)
  147. #define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
  148. __PRIV_BITS | \
  149. __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC)
  150. #define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
  151. _PAGE_CACHE | \
  152. __ACCESS_BITS | _PAGE_WRITE)
  153. #define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
  154. _PAGE_CACHE | __ACCESS_BITS)
  155. #define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
  156. _PAGE_CACHE | __ACCESS_BITS)
  157. #define _PFN_MASK _PAGE_PADDR
  158. #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \
  159. __ACCESS_BITS | _PAGE_E)
  160. #define __P000 PAGE_NONE
  161. #define __P001 PAGE_READONLY_NOEXEC
  162. #define __P010 PAGE_COPY_NOEXEC
  163. #define __P011 PAGE_COPY_NOEXEC
  164. #define __P100 PAGE_READONLY
  165. #define __P101 PAGE_READONLY
  166. #define __P110 PAGE_COPY
  167. #define __P111 PAGE_COPY
  168. #define __S000 PAGE_NONE
  169. #define __S001 PAGE_READONLY_NOEXEC
  170. #define __S010 PAGE_SHARED_NOEXEC
  171. #define __S011 PAGE_SHARED_NOEXEC
  172. #define __S100 PAGE_READONLY
  173. #define __S101 PAGE_READONLY
  174. #define __S110 PAGE_SHARED
  175. #define __S111 PAGE_SHARED
  176. #ifndef __ASSEMBLY__
  177. extern unsigned long phys_base;
  178. extern unsigned long pfn_base;
  179. extern struct page *mem_map_zero;
  180. #define ZERO_PAGE(vaddr) (mem_map_zero)
  181. /* PFNs are real physical page numbers. However, mem_map only begins to record
  182. * per-page information starting at pfn_base. This is to handle systems where
  183. * the first physical page in the machine is at some huge physical address,
  184. * such as 4GB. This is common on a partitioned E10000, for example.
  185. */
  186. #define pfn_pte(pfn, prot) \
  187. __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS)
  188. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  189. #define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)
  190. #define pte_page(x) pfn_to_page(pte_pfn(x))
  191. #define page_pte_prot(page, prot) mk_pte(page, prot)
  192. #define page_pte(page) page_pte_prot(page, __pgprot(0))
  193. static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
  194. {
  195. pte_t __pte;
  196. const unsigned long preserve_mask = (_PFN_MASK |
  197. _PAGE_MODIFIED | _PAGE_ACCESSED |
  198. _PAGE_CACHE | _PAGE_E |
  199. _PAGE_PRESENT | _PAGE_SZBITS);
  200. pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) |
  201. (pgprot_val(new_prot) & ~preserve_mask);
  202. return __pte;
  203. }
  204. #define pmd_set(pmdp, ptep) \
  205. (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
  206. #define pud_set(pudp, pmdp) \
  207. (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
  208. #define __pmd_page(pmd) \
  209. ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))
  210. #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
  211. #define pud_page(pud) \
  212. ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
  213. #define pte_none(pte) (!pte_val(pte))
  214. #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
  215. #define pmd_none(pmd) (!pmd_val(pmd))
  216. #define pmd_bad(pmd) (0)
  217. #define pmd_present(pmd) (pmd_val(pmd) != 0U)
  218. #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
  219. #define pud_none(pud) (!pud_val(pud))
  220. #define pud_bad(pud) (0)
  221. #define pud_present(pud) (pud_val(pud) != 0U)
  222. #define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
  223. /* The following only work if pte_present() is true.
  224. * Undefined behaviour if not..
  225. */
  226. #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
  227. #define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)
  228. #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
  229. #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
  230. #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
  231. #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))
  232. #define pte_rdprotect(pte) \
  233. (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))
  234. #define pte_mkclean(pte) \
  235. (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
  236. #define pte_mkold(pte) \
  237. (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
  238. /* Permanent address of a page. */
  239. #define __page_address(page) page_address(page)
  240. /* Be very careful when you change these three, they are delicate. */
  241. #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
  242. #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE))
  243. #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
  244. #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE))
  245. /* to find an entry in a page-table-directory. */
  246. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  247. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  248. /* to find an entry in a kernel page-table-directory */
  249. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  250. /* extract the pgd cache used for optimizing the tlb miss
  251. * slow path when executing 32-bit compat processes
  252. */
  253. #define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11)
  254. /* Find an entry in the second-level page table.. */
  255. #define pmd_offset(pudp, address) \
  256. ((pmd_t *) pud_page(*(pudp)) + \
  257. (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
  258. /* Find an entry in the third-level page table.. */
  259. #define pte_index(dir, address) \
  260. ((pte_t *) __pmd_page(*(dir)) + \
  261. ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
  262. #define pte_offset_kernel pte_index
  263. #define pte_offset_map pte_index
  264. #define pte_offset_map_nested pte_index
  265. #define pte_unmap(pte) do { } while (0)
  266. #define pte_unmap_nested(pte) do { } while (0)
  267. /* Actual page table PTE updates. */
  268. extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
  269. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
  270. {
  271. pte_t orig = *ptep;
  272. *ptep = pte;
  273. /* It is more efficient to let flush_tlb_kernel_range()
  274. * handle init_mm tlb flushes.
  275. */
  276. if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
  277. tlb_batch_add(mm, addr, ptep, orig);
  278. }
  279. #define pte_clear(mm,addr,ptep) \
  280. set_pte_at((mm), (addr), (ptep), __pte(0UL))
  281. extern pgd_t swapper_pg_dir[1];
  282. /* These do nothing with the way I have things setup. */
  283. #define mmu_lockarea(vaddr, len) (vaddr)
  284. #define mmu_unlockarea(vaddr, len) do { } while(0)
  285. struct vm_area_struct;
  286. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
  287. /* Make a non-present pseudo-TTE. */
  288. static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
  289. {
  290. pte_t pte;
  291. pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
  292. ~(unsigned long)_PAGE_CACHE);
  293. pte_val(pte) |= (((unsigned long)space) << 32);
  294. return pte;
  295. }
  296. /* Encode and de-code a swap entry */
  297. #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
  298. #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
  299. #define __swp_entry(type, offset) \
  300. ( (swp_entry_t) \
  301. { \
  302. (((long)(type) << PAGE_SHIFT) | \
  303. ((long)(offset) << (PAGE_SHIFT + 8UL))) \
  304. } )
  305. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  306. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  307. /* File offset in PTE support. */
  308. #define pte_file(pte) (pte_val(pte) & _PAGE_FILE)
  309. #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
  310. #define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE))
  311. #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
  312. extern unsigned long prom_virt_to_phys(unsigned long, int *);
  313. static __inline__ unsigned long
  314. sun4u_get_pte (unsigned long addr)
  315. {
  316. pgd_t *pgdp;
  317. pud_t *pudp;
  318. pmd_t *pmdp;
  319. pte_t *ptep;
  320. if (addr >= PAGE_OFFSET)
  321. return addr & _PAGE_PADDR;
  322. if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
  323. return prom_virt_to_phys(addr, NULL);
  324. pgdp = pgd_offset_k(addr);
  325. pudp = pud_offset(pgdp, addr);
  326. pmdp = pmd_offset(pudp, addr);
  327. ptep = pte_offset_kernel(pmdp, addr);
  328. return pte_val(*ptep) & _PAGE_PADDR;
  329. }
  330. static __inline__ unsigned long
  331. __get_phys (unsigned long addr)
  332. {
  333. return sun4u_get_pte (addr);
  334. }
  335. static __inline__ int
  336. __get_iospace (unsigned long addr)
  337. {
  338. return ((sun4u_get_pte (addr) & 0xf0000000) >> 28);
  339. }
  340. extern unsigned long *sparc64_valid_addr_bitmap;
  341. /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
  342. #define kern_addr_valid(addr) \
  343. (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
  344. extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
  345. unsigned long offset,
  346. unsigned long size, pgprot_t prot, int space);
  347. extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  348. unsigned long pfn,
  349. unsigned long size, pgprot_t prot);
  350. /* Clear virtual and physical cachability, set side-effect bit. */
  351. #define pgprot_noncached(prot) \
  352. (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \
  353. _PAGE_E))
  354. /*
  355. * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  356. * its high 4 bits. These macros/functions put it there or get it from there.
  357. */
  358. #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
  359. #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
  360. #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
  361. #include <asm-generic/pgtable.h>
  362. /* We provide our own get_unmapped_area to cope with VA holes for userland */
  363. #define HAVE_ARCH_UNMAPPED_AREA
  364. /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
  365. * the largest alignment possible such that larget PTEs can be used.
  366. */
  367. extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
  368. unsigned long, unsigned long,
  369. unsigned long);
  370. #define HAVE_ARCH_FB_UNMAPPED_AREA
  371. /*
  372. * No page table caches to initialise
  373. */
  374. #define pgtable_cache_init() do { } while (0)
  375. extern void check_pgt_cache(void);
  376. #endif /* !(__ASSEMBLY__) */
  377. #endif /* !(_SPARC64_PGTABLE_H) */