highmem.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * arch/arm/mm/highmem.c -- ARM highmem support
  3. *
  4. * Author: Nicolas Pitre
  5. * Created: september 8, 2008
  6. * Copyright: Marvell Semiconductors Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/interrupt.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #include "mm.h"
  19. void *kmap(struct page *page)
  20. {
  21. might_sleep();
  22. if (!PageHighMem(page))
  23. return page_address(page);
  24. return kmap_high(page);
  25. }
  26. EXPORT_SYMBOL(kmap);
  27. void kunmap(struct page *page)
  28. {
  29. BUG_ON(in_interrupt());
  30. if (!PageHighMem(page))
  31. return;
  32. kunmap_high(page);
  33. }
  34. EXPORT_SYMBOL(kunmap);
  35. void *kmap_atomic(struct page *page, enum km_type type)
  36. {
  37. unsigned int idx;
  38. unsigned long vaddr;
  39. void *kmap;
  40. pagefault_disable();
  41. if (!PageHighMem(page))
  42. return page_address(page);
  43. debug_kmap_atomic(type);
  44. #ifdef CONFIG_DEBUG_HIGHMEM
  45. /*
  46. * There is no cache coherency issue when non VIVT, so force the
  47. * dedicated kmap usage for better debugging purposes in that case.
  48. */
  49. if (!cache_is_vivt())
  50. kmap = NULL;
  51. else
  52. #endif
  53. kmap = kmap_high_get(page);
  54. if (kmap)
  55. return kmap;
  56. idx = type + KM_TYPE_NR * smp_processor_id();
  57. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  58. #ifdef CONFIG_DEBUG_HIGHMEM
  59. /*
  60. * With debugging enabled, kunmap_atomic forces that entry to 0.
  61. * Make sure it was indeed properly unmapped.
  62. */
  63. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  64. #endif
  65. set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  66. /*
  67. * When debugging is off, kunmap_atomic leaves the previous mapping
  68. * in place, so this TLB flush ensures the TLB is updated with the
  69. * new mapping.
  70. */
  71. local_flush_tlb_kernel_page(vaddr);
  72. return (void *)vaddr;
  73. }
  74. EXPORT_SYMBOL(kmap_atomic);
  75. void kunmap_atomic(void *kvaddr, enum km_type type)
  76. {
  77. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  78. unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
  79. if (kvaddr >= (void *)FIXADDR_START) {
  80. if (cache_is_vivt())
  81. __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
  82. #ifdef CONFIG_DEBUG_HIGHMEM
  83. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  84. set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
  85. local_flush_tlb_kernel_page(vaddr);
  86. #else
  87. (void) idx; /* to kill a warning */
  88. #endif
  89. } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
  90. /* this address was obtained through kmap_high_get() */
  91. kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
  92. }
  93. pagefault_enable();
  94. }
  95. EXPORT_SYMBOL(kunmap_atomic);
  96. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  97. {
  98. unsigned int idx;
  99. unsigned long vaddr;
  100. pagefault_disable();
  101. idx = type + KM_TYPE_NR * smp_processor_id();
  102. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  103. #ifdef CONFIG_DEBUG_HIGHMEM
  104. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  105. #endif
  106. set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
  107. local_flush_tlb_kernel_page(vaddr);
  108. return (void *)vaddr;
  109. }
  110. struct page *kmap_atomic_to_page(const void *ptr)
  111. {
  112. unsigned long vaddr = (unsigned long)ptr;
  113. pte_t *pte;
  114. if (vaddr < FIXADDR_START)
  115. return virt_to_page(ptr);
  116. pte = TOP_PTE(vaddr);
  117. return pte_page(*pte);
  118. }
  119. #ifdef CONFIG_CPU_CACHE_VIPT
  120. #include <linux/percpu.h>
  121. /*
  122. * The VIVT cache of a highmem page is always flushed before the page
  123. * is unmapped. Hence unmapped highmem pages need no cache maintenance
  124. * in that case.
  125. *
  126. * However unmapped pages may still be cached with a VIPT cache, and
  127. * it is not possible to perform cache maintenance on them using physical
  128. * addresses unfortunately. So we have no choice but to set up a temporary
  129. * virtual mapping for that purpose.
  130. *
  131. * Yet this VIPT cache maintenance may be triggered from DMA support
  132. * functions which are possibly called from interrupt context. As we don't
  133. * want to keep interrupt disabled all the time when such maintenance is
  134. * taking place, we therefore allow for some reentrancy by preserving and
  135. * restoring the previous fixmap entry before the interrupted context is
  136. * resumed. If the reentrancy depth is 0 then there is no need to restore
  137. * the previous fixmap, and leaving the current one in place allow it to
  138. * be reused the next time without a TLB flush (common with DMA).
  139. */
  140. static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
  141. void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
  142. {
  143. unsigned int idx, cpu = smp_processor_id();
  144. int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  145. unsigned long vaddr, flags;
  146. pte_t pte, *ptep;
  147. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  148. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  149. ptep = TOP_PTE(vaddr);
  150. pte = mk_pte(page, kmap_prot);
  151. if (!in_interrupt())
  152. preempt_disable();
  153. raw_local_irq_save(flags);
  154. (*depth)++;
  155. if (pte_val(*ptep) == pte_val(pte)) {
  156. *saved_pte = pte;
  157. } else {
  158. *saved_pte = *ptep;
  159. set_pte_ext(ptep, pte, 0);
  160. local_flush_tlb_kernel_page(vaddr);
  161. }
  162. raw_local_irq_restore(flags);
  163. return (void *)vaddr;
  164. }
  165. void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
  166. {
  167. unsigned int idx, cpu = smp_processor_id();
  168. int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  169. unsigned long vaddr, flags;
  170. pte_t pte, *ptep;
  171. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  172. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  173. ptep = TOP_PTE(vaddr);
  174. pte = mk_pte(page, kmap_prot);
  175. BUG_ON(pte_val(*ptep) != pte_val(pte));
  176. BUG_ON(*depth <= 0);
  177. raw_local_irq_save(flags);
  178. (*depth)--;
  179. if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
  180. set_pte_ext(ptep, saved_pte, 0);
  181. local_flush_tlb_kernel_page(vaddr);
  182. }
  183. raw_local_irq_restore(flags);
  184. if (!in_interrupt())
  185. preempt_enable();
  186. }
  187. #endif /* CONFIG_CPU_CACHE_VIPT */