highmem.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * arch/arm/mm/highmem.c -- ARM highmem support
  3. *
  4. * Author: Nicolas Pitre
  5. * Created: september 8, 2008
  6. * Copyright: Marvell Semiconductors Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/interrupt.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #include "mm.h"
  19. void *kmap(struct page *page)
  20. {
  21. might_sleep();
  22. if (!PageHighMem(page))
  23. return page_address(page);
  24. return kmap_high(page);
  25. }
  26. EXPORT_SYMBOL(kmap);
  27. void kunmap(struct page *page)
  28. {
  29. BUG_ON(in_interrupt());
  30. if (!PageHighMem(page))
  31. return;
  32. kunmap_high(page);
  33. }
  34. EXPORT_SYMBOL(kunmap);
  35. void *__kmap_atomic(struct page *page)
  36. {
  37. unsigned int idx;
  38. unsigned long vaddr;
  39. void *kmap;
  40. int type;
  41. pagefault_disable();
  42. if (!PageHighMem(page))
  43. return page_address(page);
  44. #ifdef CONFIG_DEBUG_HIGHMEM
  45. /*
  46. * There is no cache coherency issue when non VIVT, so force the
  47. * dedicated kmap usage for better debugging purposes in that case.
  48. */
  49. if (!cache_is_vivt())
  50. kmap = NULL;
  51. else
  52. #endif
  53. kmap = kmap_high_get(page);
  54. if (kmap)
  55. return kmap;
  56. type = kmap_atomic_idx_push();
  57. idx = type + KM_TYPE_NR * smp_processor_id();
  58. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  59. #ifdef CONFIG_DEBUG_HIGHMEM
  60. /*
  61. * With debugging enabled, kunmap_atomic forces that entry to 0.
  62. * Make sure it was indeed properly unmapped.
  63. */
  64. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  65. #endif
  66. set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  67. /*
  68. * When debugging is off, kunmap_atomic leaves the previous mapping
  69. * in place, so this TLB flush ensures the TLB is updated with the
  70. * new mapping.
  71. */
  72. local_flush_tlb_kernel_page(vaddr);
  73. return (void *)vaddr;
  74. }
  75. EXPORT_SYMBOL(__kmap_atomic);
  76. void __kunmap_atomic(void *kvaddr)
  77. {
  78. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  79. int idx, type;
  80. if (kvaddr >= (void *)FIXADDR_START) {
  81. type = kmap_atomic_idx();
  82. idx = type + KM_TYPE_NR * smp_processor_id();
  83. if (cache_is_vivt())
  84. __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
  85. #ifdef CONFIG_DEBUG_HIGHMEM
  86. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  87. set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
  88. local_flush_tlb_kernel_page(vaddr);
  89. #else
  90. (void) idx; /* to kill a warning */
  91. #endif
  92. kmap_atomic_idx_pop();
  93. } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
  94. /* this address was obtained through kmap_high_get() */
  95. kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
  96. }
  97. pagefault_enable();
  98. }
  99. EXPORT_SYMBOL(__kunmap_atomic);
  100. void *kmap_atomic_pfn(unsigned long pfn)
  101. {
  102. unsigned long vaddr;
  103. int idx, type;
  104. pagefault_disable();
  105. type = kmap_atomic_idx_push();
  106. idx = type + KM_TYPE_NR * smp_processor_id();
  107. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  108. #ifdef CONFIG_DEBUG_HIGHMEM
  109. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  110. #endif
  111. set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
  112. local_flush_tlb_kernel_page(vaddr);
  113. return (void *)vaddr;
  114. }
  115. struct page *kmap_atomic_to_page(const void *ptr)
  116. {
  117. unsigned long vaddr = (unsigned long)ptr;
  118. pte_t *pte;
  119. if (vaddr < FIXADDR_START)
  120. return virt_to_page(ptr);
  121. pte = TOP_PTE(vaddr);
  122. return pte_page(*pte);
  123. }
  124. #ifdef CONFIG_CPU_CACHE_VIPT
  125. #include <linux/percpu.h>
  126. /*
  127. * The VIVT cache of a highmem page is always flushed before the page
  128. * is unmapped. Hence unmapped highmem pages need no cache maintenance
  129. * in that case.
  130. *
  131. * However unmapped pages may still be cached with a VIPT cache, and
  132. * it is not possible to perform cache maintenance on them using physical
  133. * addresses unfortunately. So we have no choice but to set up a temporary
  134. * virtual mapping for that purpose.
  135. *
  136. * Yet this VIPT cache maintenance may be triggered from DMA support
  137. * functions which are possibly called from interrupt context. As we don't
  138. * want to keep interrupt disabled all the time when such maintenance is
  139. * taking place, we therefore allow for some reentrancy by preserving and
  140. * restoring the previous fixmap entry before the interrupted context is
  141. * resumed. If the reentrancy depth is 0 then there is no need to restore
  142. * the previous fixmap, and leaving the current one in place allow it to
  143. * be reused the next time without a TLB flush (common with DMA).
  144. */
  145. static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
  146. void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
  147. {
  148. unsigned int idx, cpu;
  149. int *depth;
  150. unsigned long vaddr, flags;
  151. pte_t pte, *ptep;
  152. if (!in_interrupt())
  153. preempt_disable();
  154. cpu = smp_processor_id();
  155. depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  156. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  157. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  158. ptep = TOP_PTE(vaddr);
  159. pte = mk_pte(page, kmap_prot);
  160. raw_local_irq_save(flags);
  161. (*depth)++;
  162. if (pte_val(*ptep) == pte_val(pte)) {
  163. *saved_pte = pte;
  164. } else {
  165. *saved_pte = *ptep;
  166. set_pte_ext(ptep, pte, 0);
  167. local_flush_tlb_kernel_page(vaddr);
  168. }
  169. raw_local_irq_restore(flags);
  170. return (void *)vaddr;
  171. }
  172. void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
  173. {
  174. unsigned int idx, cpu = smp_processor_id();
  175. int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  176. unsigned long vaddr, flags;
  177. pte_t pte, *ptep;
  178. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  179. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  180. ptep = TOP_PTE(vaddr);
  181. pte = mk_pte(page, kmap_prot);
  182. BUG_ON(pte_val(*ptep) != pte_val(pte));
  183. BUG_ON(*depth <= 0);
  184. raw_local_irq_save(flags);
  185. (*depth)--;
  186. if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
  187. set_pte_ext(ptep, saved_pte, 0);
  188. local_flush_tlb_kernel_page(vaddr);
  189. }
  190. raw_local_irq_restore(flags);
  191. if (!in_interrupt())
  192. preempt_enable();
  193. }
  194. #endif /* CONFIG_CPU_CACHE_VIPT */