highmem.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * highmem.h: virtual kernel memory mappings for high memory
  3. *
  4. * PowerPC version, stolen from the i386 version.
  5. *
  6. * Used in CONFIG_HIGHMEM systems for memory pages which
  7. * are not addressable by direct kernel virtual addresses.
  8. *
  9. * Copyright (C) 1999 Gerhard Wichert, Siemens AG
  10. * Gerhard.Wichert@pdb.siemens.de
  11. *
  12. *
  13. * Redesigned the x86 32-bit VM architecture to deal with
  14. * up to 16 Terrabyte physical memory. With current x86 CPUs
  15. * we now support up to 64 Gigabytes physical RAM.
  16. *
  17. * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  18. */
  19. #ifndef _ASM_HIGHMEM_H
  20. #define _ASM_HIGHMEM_H
  21. #ifdef __KERNEL__
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/highmem.h>
  25. #include <asm/kmap_types.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/page.h>
  28. #include <asm/fixmap.h>
  29. extern pte_t *kmap_pte;
  30. extern pgprot_t kmap_prot;
  31. extern pte_t *pkmap_page_table;
  32. /*
  33. * Right now we initialize only a single pte table. It can be extended
  34. * easily, subsequent pte tables have to be allocated in one physical
  35. * chunk of RAM.
  36. */
  37. /*
  38. * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
  39. * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
  40. * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
  41. * in case of 16K/64K/256K page sizes.
  42. */
  43. #ifdef CONFIG_PPC_4K_PAGES
  44. #define PKMAP_ORDER PTE_SHIFT
  45. #else
  46. #define PKMAP_ORDER 9
  47. #endif
  48. #define LAST_PKMAP (1 << PKMAP_ORDER)
  49. #ifndef CONFIG_PPC_4K_PAGES
  50. #define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
  51. #else
  52. #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
  53. #endif
  54. #define LAST_PKMAP_MASK (LAST_PKMAP-1)
  55. #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
  56. #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  57. extern void *kmap_high(struct page *page);
  58. extern void kunmap_high(struct page *page);
  59. static inline void *kmap(struct page *page)
  60. {
  61. might_sleep();
  62. if (!PageHighMem(page))
  63. return page_address(page);
  64. return kmap_high(page);
  65. }
  66. static inline void kunmap(struct page *page)
  67. {
  68. BUG_ON(in_interrupt());
  69. if (!PageHighMem(page))
  70. return;
  71. kunmap_high(page);
  72. }
  73. /*
  74. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  75. * gives a more generic (and caching) interface. But kmap_atomic can
  76. * be used in IRQ contexts, so in some (very limited) cases we need
  77. * it.
  78. */
  79. static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
  80. {
  81. unsigned int idx;
  82. unsigned long vaddr;
  83. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  84. pagefault_disable();
  85. if (!PageHighMem(page))
  86. return page_address(page);
  87. debug_kmap_atomic(type);
  88. idx = type + KM_TYPE_NR*smp_processor_id();
  89. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  90. #ifdef CONFIG_DEBUG_HIGHMEM
  91. BUG_ON(!pte_none(*(kmap_pte-idx)));
  92. #endif
  93. __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
  94. local_flush_tlb_page(NULL, vaddr);
  95. return (void*) vaddr;
  96. }
  97. static inline void *kmap_atomic(struct page *page, enum km_type type)
  98. {
  99. return kmap_atomic_prot(page, type, kmap_prot);
  100. }
  101. static inline void kunmap_atomic(void *kvaddr, enum km_type type)
  102. {
  103. #ifdef CONFIG_DEBUG_HIGHMEM
  104. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  105. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  106. if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
  107. pagefault_enable();
  108. return;
  109. }
  110. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  111. /*
  112. * force other mappings to Oops if they'll try to access
  113. * this pte without first remap it
  114. */
  115. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  116. local_flush_tlb_page(NULL, vaddr);
  117. #endif
  118. pagefault_enable();
  119. }
  120. static inline struct page *kmap_atomic_to_page(void *ptr)
  121. {
  122. unsigned long idx, vaddr = (unsigned long) ptr;
  123. pte_t *pte;
  124. if (vaddr < FIXADDR_START)
  125. return virt_to_page(ptr);
  126. idx = virt_to_fix(vaddr);
  127. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  128. return pte_page(*pte);
  129. }
  130. #define flush_cache_kmaps() flush_cache_all()
  131. #endif /* __KERNEL__ */
  132. #endif /* _ASM_HIGHMEM_H */