highmem.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. /* highmem.h: virtual kernel memory mappings for high memory
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. * - Derived from include/asm-i386/highmem.h
  6. *
  7. * See Documentation/frv/mmu-layout.txt for more information.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #ifndef _ASM_HIGHMEM_H
  15. #define _ASM_HIGHMEM_H
  16. #ifdef __KERNEL__
  17. #include <linux/init.h>
  18. #include <asm/mem-layout.h>
  19. #include <asm/spr-regs.h>
  20. #include <asm/mb-regs.h>
  21. #define NR_TLB_LINES 64 /* number of lines in the TLB */
  22. #ifndef __ASSEMBLY__
  23. #include <linux/interrupt.h>
  24. #include <asm/kmap_types.h>
  25. #include <asm/pgtable.h>
  26. #ifdef CONFIG_DEBUG_HIGHMEM
  27. #define HIGHMEM_DEBUG 1
  28. #else
  29. #define HIGHMEM_DEBUG 0
  30. #endif
  31. /* declarations for highmem.c */
  32. extern unsigned long highstart_pfn, highend_pfn;
  33. #define kmap_prot PAGE_KERNEL
  34. #define kmap_pte ______kmap_pte_in_TLB
  35. extern pte_t *pkmap_page_table;
  36. #define flush_cache_kmaps() do { } while (0)
  37. /*
  38. * Right now we initialize only a single pte table. It can be extended
  39. * easily, subsequent pte tables have to be allocated in one physical
  40. * chunk of RAM.
  41. */
  42. #define LAST_PKMAP PTRS_PER_PTE
  43. #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
  44. #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  45. #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  46. extern void *kmap_high(struct page *page);
  47. extern void kunmap_high(struct page *page);
  48. extern void *kmap(struct page *page);
  49. extern void kunmap(struct page *page);
  50. extern struct page *kmap_atomic_to_page(void *ptr);
  51. #endif /* !__ASSEMBLY__ */
  52. /*
  53. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  54. * gives a more generic (and caching) interface. But kmap_atomic can
  55. * be used in IRQ contexts, so in some (very limited) cases we need
  56. * it.
  57. */
  58. #define KMAP_ATOMIC_CACHE_DAMR 8
  59. #ifndef __ASSEMBLY__
  60. #define __kmap_atomic_primary(type, paddr, ampr) \
  61. ({ \
  62. unsigned long damlr, dampr; \
  63. \
  64. dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
  65. \
  66. if (type != __KM_CACHE) \
  67. asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
  68. else \
  69. asm volatile("movgs %0,iampr"#ampr"\n" \
  70. "movgs %0,dampr"#ampr"\n" \
  71. :: "r"(dampr) : "memory" \
  72. ); \
  73. \
  74. asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
  75. \
  76. /*printk("DAMR"#ampr": PRIM sl=%d L=%08lx P=%08lx\n", type, damlr, dampr);*/ \
  77. \
  78. (void *) damlr; \
  79. })
  80. #define __kmap_atomic_secondary(slot, paddr) \
  81. ({ \
  82. unsigned long damlr = KMAP_ATOMIC_SECONDARY_FRAME + (slot) * PAGE_SIZE; \
  83. unsigned long dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
  84. \
  85. asm volatile("movgs %0,tplr \n" \
  86. "movgs %1,tppr \n" \
  87. "tlbpr %0,gr0,#2,#1" \
  88. : : "r"(damlr), "r"(dampr) : "memory"); \
  89. \
  90. /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
  91. \
  92. (void *) damlr; \
  93. })
  94. static inline void *kmap_atomic(struct page *page, enum km_type type)
  95. {
  96. unsigned long paddr;
  97. pagefault_disable();
  98. paddr = page_to_phys(page);
  99. switch (type) {
  100. case 0: return __kmap_atomic_primary(0, paddr, 2);
  101. case 1: return __kmap_atomic_primary(1, paddr, 3);
  102. case 2: return __kmap_atomic_primary(2, paddr, 4);
  103. case 3: return __kmap_atomic_primary(3, paddr, 5);
  104. case 4: return __kmap_atomic_primary(4, paddr, 6);
  105. case 5: return __kmap_atomic_primary(5, paddr, 7);
  106. case 6: return __kmap_atomic_primary(6, paddr, 8);
  107. case 7: return __kmap_atomic_primary(7, paddr, 9);
  108. case 8: return __kmap_atomic_primary(8, paddr, 10);
  109. case 9 ... 9 + NR_TLB_LINES - 1:
  110. return __kmap_atomic_secondary(type - 9, paddr);
  111. default:
  112. BUG();
  113. return NULL;
  114. }
  115. }
  116. #define __kunmap_atomic_primary(type, ampr) \
  117. do { \
  118. asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
  119. if (type == __KM_CACHE) \
  120. asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
  121. } while(0)
  122. #define __kunmap_atomic_secondary(slot, vaddr) \
  123. do { \
  124. asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
  125. } while(0)
  126. static inline void kunmap_atomic(void *kvaddr, enum km_type type)
  127. {
  128. switch (type) {
  129. case 0: __kunmap_atomic_primary(0, 2); break;
  130. case 1: __kunmap_atomic_primary(1, 3); break;
  131. case 2: __kunmap_atomic_primary(2, 4); break;
  132. case 3: __kunmap_atomic_primary(3, 5); break;
  133. case 4: __kunmap_atomic_primary(4, 6); break;
  134. case 5: __kunmap_atomic_primary(5, 7); break;
  135. case 6: __kunmap_atomic_primary(6, 8); break;
  136. case 7: __kunmap_atomic_primary(7, 9); break;
  137. case 8: __kunmap_atomic_primary(8, 10); break;
  138. case 9 ... 9 + NR_TLB_LINES - 1:
  139. __kunmap_atomic_secondary(type - 9, kvaddr);
  140. break;
  141. default:
  142. BUG();
  143. }
  144. pagefault_enable();
  145. }
  146. #endif /* !__ASSEMBLY__ */
  147. #endif /* __KERNEL__ */
  148. #endif /* _ASM_HIGHMEM_H */