highmem.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /* highmem.h: virtual kernel memory mappings for high memory
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. * - Derived from include/asm-i386/highmem.h
  6. *
  7. * See Documentation/fujitsu/frv/mmu-layout.txt for more information.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #ifndef _ASM_HIGHMEM_H
  15. #define _ASM_HIGHMEM_H
  16. #ifdef __KERNEL__
  17. #include <linux/config.h>
  18. #include <linux/init.h>
  19. #include <asm/mem-layout.h>
  20. #include <asm/spr-regs.h>
  21. #include <asm/mb-regs.h>
  22. #define NR_TLB_LINES 64 /* number of lines in the TLB */
  23. #ifndef __ASSEMBLY__
  24. #include <linux/interrupt.h>
  25. #include <asm/kmap_types.h>
  26. #include <asm/pgtable.h>
  27. #ifdef CONFIG_DEBUG_HIGHMEM
  28. #define HIGHMEM_DEBUG 1
  29. #else
  30. #define HIGHMEM_DEBUG 0
  31. #endif
  32. /* declarations for highmem.c */
  33. extern unsigned long highstart_pfn, highend_pfn;
  34. #define kmap_prot PAGE_KERNEL
  35. #define kmap_pte ______kmap_pte_in_TLB
  36. extern pte_t *pkmap_page_table;
  37. #define flush_cache_kmaps() do { } while (0)
  38. /*
  39. * Right now we initialize only a single pte table. It can be extended
  40. * easily, subsequent pte tables have to be allocated in one physical
  41. * chunk of RAM.
  42. */
  43. #define LAST_PKMAP PTRS_PER_PTE
  44. #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
  45. #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  46. #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  47. extern void *kmap_high(struct page *page);
  48. extern void kunmap_high(struct page *page);
  49. extern void *kmap(struct page *page);
  50. extern void kunmap(struct page *page);
  51. extern struct page *kmap_atomic_to_page(void *ptr);
  52. #endif /* !__ASSEMBLY__ */
  53. /*
  54. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  55. * gives a more generic (and caching) interface. But kmap_atomic can
  56. * be used in IRQ contexts, so in some (very limited) cases we need
  57. * it.
  58. */
  59. #define KMAP_ATOMIC_CACHE_DAMR 8
  60. #ifndef __ASSEMBLY__
  61. #define __kmap_atomic_primary(type, paddr, ampr) \
  62. ({ \
  63. unsigned long damlr, dampr; \
  64. \
  65. dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
  66. \
  67. if (type != __KM_CACHE) \
  68. asm volatile("movgs %0,dampr"#ampr :: "r"(dampr)); \
  69. else \
  70. asm volatile("movgs %0,iampr"#ampr"\n" \
  71. "movgs %0,dampr"#ampr"\n" \
  72. :: "r"(dampr) \
  73. ); \
  74. \
  75. asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
  76. \
  77. /*printk("DAMR"#ampr": PRIM sl=%d L=%08lx P=%08lx\n", type, damlr, dampr);*/ \
  78. \
  79. (void *) damlr; \
  80. })
  81. #define __kmap_atomic_secondary(slot, paddr) \
  82. ({ \
  83. unsigned long damlr = KMAP_ATOMIC_SECONDARY_FRAME + (slot) * PAGE_SIZE; \
  84. unsigned long dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
  85. \
  86. asm volatile("movgs %0,tplr \n" \
  87. "movgs %1,tppr \n" \
  88. "tlbpr %0,gr0,#2,#1" \
  89. : : "r"(damlr), "r"(dampr)); \
  90. \
  91. /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
  92. \
  93. (void *) damlr; \
  94. })
  95. static inline void *kmap_atomic(struct page *page, enum km_type type)
  96. {
  97. unsigned long paddr;
  98. preempt_disable();
  99. paddr = page_to_phys(page);
  100. switch (type) {
  101. case 0: return __kmap_atomic_primary(0, paddr, 2);
  102. case 1: return __kmap_atomic_primary(1, paddr, 3);
  103. case 2: return __kmap_atomic_primary(2, paddr, 4);
  104. case 3: return __kmap_atomic_primary(3, paddr, 5);
  105. case 4: return __kmap_atomic_primary(4, paddr, 6);
  106. case 5: return __kmap_atomic_primary(5, paddr, 7);
  107. case 6: return __kmap_atomic_primary(6, paddr, 8);
  108. case 7: return __kmap_atomic_primary(7, paddr, 9);
  109. case 8: return __kmap_atomic_primary(8, paddr, 10);
  110. case 9 ... 9 + NR_TLB_LINES - 1:
  111. return __kmap_atomic_secondary(type - 9, paddr);
  112. default:
  113. BUG();
  114. return 0;
  115. }
  116. }
  117. #define __kunmap_atomic_primary(type, ampr) \
  118. do { \
  119. asm volatile("movgs gr0,dampr"#ampr"\n"); \
  120. if (type == __KM_CACHE) \
  121. asm volatile("movgs gr0,iampr"#ampr"\n"); \
  122. } while(0)
  123. #define __kunmap_atomic_secondary(slot, vaddr) \
  124. do { \
  125. asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr)); \
  126. } while(0)
  127. static inline void kunmap_atomic(void *kvaddr, enum km_type type)
  128. {
  129. switch (type) {
  130. case 0: __kunmap_atomic_primary(0, 2); break;
  131. case 1: __kunmap_atomic_primary(1, 3); break;
  132. case 2: __kunmap_atomic_primary(2, 4); break;
  133. case 3: __kunmap_atomic_primary(3, 5); break;
  134. case 4: __kunmap_atomic_primary(4, 6); break;
  135. case 5: __kunmap_atomic_primary(5, 7); break;
  136. case 6: __kunmap_atomic_primary(6, 8); break;
  137. case 7: __kunmap_atomic_primary(7, 9); break;
  138. case 8: __kunmap_atomic_primary(8, 10); break;
  139. case 9 ... 9 + NR_TLB_LINES - 1:
  140. __kunmap_atomic_secondary(type - 9, kvaddr);
  141. break;
  142. default:
  143. BUG();
  144. }
  145. preempt_enable();
  146. }
  147. #endif /* !__ASSEMBLY__ */
  148. #endif /* __KERNEL__ */
  149. #endif /* _ASM_HIGHMEM_H */