page.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. #ifndef _PARISC_PAGE_H
  2. #define _PARISC_PAGE_H
  3. /* PAGE_SHIFT determines the page size */
  4. #define PAGE_SHIFT 12
  5. #define PAGE_SIZE (1UL << PAGE_SHIFT)
  6. #define PAGE_MASK (~(PAGE_SIZE-1))
  7. #ifdef __KERNEL__
  8. #include <linux/config.h>
  9. #ifndef __ASSEMBLY__
  10. #include <asm/types.h>
  11. #include <asm/cache.h>
  12. #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  13. #define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from))
  14. struct page;
  15. extern void purge_kernel_dcache_page(unsigned long);
  16. extern void copy_user_page_asm(void *to, void *from);
  17. extern void clear_user_page_asm(void *page, unsigned long vaddr);
  18. static inline void
  19. copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg)
  20. {
  21. copy_user_page_asm(vto, vfrom);
  22. flush_kernel_dcache_page(vto);
  23. /* XXX: ppc flushes icache too, should we? */
  24. }
  25. static inline void
  26. clear_user_page(void *page, unsigned long vaddr, struct page *pg)
  27. {
  28. purge_kernel_dcache_page((unsigned long)page);
  29. clear_user_page_asm(page, vaddr);
  30. }
  31. /*
  32. * These are used to make use of C type-checking..
  33. */
  34. #ifdef __LP64__
  35. typedef struct { unsigned long pte; } pte_t;
  36. #else
  37. typedef struct {
  38. unsigned long pte;
  39. unsigned long flags;
  40. } pte_t;
  41. #endif
  42. /* NOTE: even on 64 bits, these entries are __u32 because we allocate
  43. * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
  44. typedef struct { __u32 pmd; } pmd_t;
  45. typedef struct { __u32 pgd; } pgd_t;
  46. typedef struct { unsigned long pgprot; } pgprot_t;
  47. #define pte_val(x) ((x).pte)
  48. #ifdef __LP64__
  49. #define pte_flags(x) (*(__u32 *)&((x).pte))
  50. #else
  51. #define pte_flags(x) ((x).flags)
  52. #endif
  53. /* These do not work lvalues, so make sure we don't use them as such. */
  54. #define pmd_val(x) ((x).pmd + 0)
  55. #define pgd_val(x) ((x).pgd + 0)
  56. #define pgprot_val(x) ((x).pgprot)
  57. #define __pmd_val_set(x,n) (x).pmd = (n)
  58. #define __pgd_val_set(x,n) (x).pgd = (n)
  59. #define __pte(x) ((pte_t) { (x) } )
  60. #define __pmd(x) ((pmd_t) { (x) } )
  61. #define __pgd(x) ((pgd_t) { (x) } )
  62. #define __pgprot(x) ((pgprot_t) { (x) } )
  63. typedef struct __physmem_range {
  64. unsigned long start_pfn;
  65. unsigned long pages; /* PAGE_SIZE pages */
  66. } physmem_range_t;
  67. extern physmem_range_t pmem_ranges[];
  68. extern int npmem_ranges;
  69. #endif /* !__ASSEMBLY__ */
  70. /* WARNING: The definitions below must match exactly to sizeof(pte_t)
  71. * etc
  72. */
  73. #ifdef __LP64__
  74. #define BITS_PER_PTE_ENTRY 3
  75. #define BITS_PER_PMD_ENTRY 2
  76. #define BITS_PER_PGD_ENTRY 2
  77. #else
  78. #define BITS_PER_PTE_ENTRY 3
  79. #define BITS_PER_PMD_ENTRY 2
  80. #define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY
  81. #endif
  82. #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY)
  83. #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY)
  84. #define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY)
  85. /* to align the pointer to the (next) page boundary */
  86. #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
  87. #define LINUX_GATEWAY_SPACE 0
  88. /* This governs the relationship between virtual and physical addresses.
  89. * If you alter it, make sure to take care of our various fixed mapping
  90. * segments in fixmap.h */
  91. #define __PAGE_OFFSET (0x10000000)
  92. #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
  93. /* The size of the gateway page (we leave lots of room for expansion) */
  94. #define GATEWAY_PAGE_SIZE 0x4000
  95. /* The start of the actual kernel binary---used in vmlinux.lds.S
  96. * Leave some space after __PAGE_OFFSET for detecting kernel null
  97. * ptr derefs */
  98. #define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000)
  99. /* These macros don't work for 64-bit C code -- don't allow in C at all */
  100. #ifdef __ASSEMBLY__
  101. # define PA(x) ((x)-__PAGE_OFFSET)
  102. # define VA(x) ((x)+__PAGE_OFFSET)
  103. #endif
  104. #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
  105. #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
  106. #ifndef CONFIG_DISCONTIGMEM
  107. #define pfn_to_page(pfn) (mem_map + (pfn))
  108. #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
  109. #define pfn_valid(pfn) ((pfn) < max_mapnr)
  110. #endif /* CONFIG_DISCONTIGMEM */
  111. #ifdef CONFIG_HUGETLB_PAGE
  112. #define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
  113. #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
  114. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  115. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  116. #endif
  117. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  118. #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
  119. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  120. #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
  121. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  122. #endif /* __KERNEL__ */
  123. #include <asm-generic/page.h>
  124. #endif /* _PARISC_PAGE_H */