highmem.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * arch/arm/mm/highmem.c -- ARM highmem support
  3. *
  4. * Author: Nicolas Pitre
  5. * Created: september 8, 2008
  6. * Copyright: Marvell Semiconductors Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/interrupt.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #include "mm.h"
  19. void *kmap(struct page *page)
  20. {
  21. might_sleep();
  22. if (!PageHighMem(page))
  23. return page_address(page);
  24. return kmap_high(page);
  25. }
  26. EXPORT_SYMBOL(kmap);
  27. void kunmap(struct page *page)
  28. {
  29. BUG_ON(in_interrupt());
  30. if (!PageHighMem(page))
  31. return;
  32. kunmap_high(page);
  33. }
  34. EXPORT_SYMBOL(kunmap);
  35. void *__kmap_atomic(struct page *page)
  36. {
  37. unsigned int idx;
  38. unsigned long vaddr;
  39. void *kmap;
  40. int type;
  41. pagefault_disable();
  42. if (!PageHighMem(page))
  43. return page_address(page);
  44. #ifdef CONFIG_DEBUG_HIGHMEM
  45. /*
  46. * There is no cache coherency issue when non VIVT, so force the
  47. * dedicated kmap usage for better debugging purposes in that case.
  48. */
  49. if (!cache_is_vivt())
  50. kmap = NULL;
  51. else
  52. #endif
  53. kmap = kmap_high_get(page);
  54. if (kmap)
  55. return kmap;
  56. type = kmap_atomic_idx_push();
  57. idx = type + KM_TYPE_NR * smp_processor_id();
  58. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  59. #ifdef CONFIG_DEBUG_HIGHMEM
  60. /*
  61. * With debugging enabled, kunmap_atomic forces that entry to 0.
  62. * Make sure it was indeed properly unmapped.
  63. */
  64. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  65. #endif
  66. set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  67. /*
  68. * When debugging is off, kunmap_atomic leaves the previous mapping
  69. * in place, so this TLB flush ensures the TLB is updated with the
  70. * new mapping.
  71. */
  72. local_flush_tlb_kernel_page(vaddr);
  73. return (void *)vaddr;
  74. }
  75. EXPORT_SYMBOL(__kmap_atomic);
  76. void __kunmap_atomic(void *kvaddr)
  77. {
  78. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  79. int idx, type;
  80. if (kvaddr >= (void *)FIXADDR_START) {
  81. type = kmap_atomic_idx_pop();
  82. idx = type + KM_TYPE_NR * smp_processor_id();
  83. if (cache_is_vivt())
  84. __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
  85. #ifdef CONFIG_DEBUG_HIGHMEM
  86. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  87. set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
  88. local_flush_tlb_kernel_page(vaddr);
  89. #else
  90. (void) idx; /* to kill a warning */
  91. #endif
  92. } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
  93. /* this address was obtained through kmap_high_get() */
  94. kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
  95. }
  96. pagefault_enable();
  97. }
  98. EXPORT_SYMBOL(__kunmap_atomic);
  99. void *kmap_atomic_pfn(unsigned long pfn)
  100. {
  101. unsigned long vaddr;
  102. int idx, type;
  103. pagefault_disable();
  104. type = kmap_atomic_idx_push();
  105. idx = type + KM_TYPE_NR * smp_processor_id();
  106. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  107. #ifdef CONFIG_DEBUG_HIGHMEM
  108. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  109. #endif
  110. set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
  111. local_flush_tlb_kernel_page(vaddr);
  112. return (void *)vaddr;
  113. }
  114. struct page *kmap_atomic_to_page(const void *ptr)
  115. {
  116. unsigned long vaddr = (unsigned long)ptr;
  117. pte_t *pte;
  118. if (vaddr < FIXADDR_START)
  119. return virt_to_page(ptr);
  120. pte = TOP_PTE(vaddr);
  121. return pte_page(*pte);
  122. }
  123. #ifdef CONFIG_CPU_CACHE_VIPT
  124. #include <linux/percpu.h>
  125. /*
  126. * The VIVT cache of a highmem page is always flushed before the page
  127. * is unmapped. Hence unmapped highmem pages need no cache maintenance
  128. * in that case.
  129. *
  130. * However unmapped pages may still be cached with a VIPT cache, and
  131. * it is not possible to perform cache maintenance on them using physical
  132. * addresses unfortunately. So we have no choice but to set up a temporary
  133. * virtual mapping for that purpose.
  134. *
  135. * Yet this VIPT cache maintenance may be triggered from DMA support
  136. * functions which are possibly called from interrupt context. As we don't
  137. * want to keep interrupt disabled all the time when such maintenance is
  138. * taking place, we therefore allow for some reentrancy by preserving and
  139. * restoring the previous fixmap entry before the interrupted context is
  140. * resumed. If the reentrancy depth is 0 then there is no need to restore
  141. * the previous fixmap, and leaving the current one in place allow it to
  142. * be reused the next time without a TLB flush (common with DMA).
  143. */
  144. static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
  145. void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
  146. {
  147. unsigned int idx, cpu;
  148. int *depth;
  149. unsigned long vaddr, flags;
  150. pte_t pte, *ptep;
  151. if (!in_interrupt())
  152. preempt_disable();
  153. cpu = smp_processor_id();
  154. depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  155. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  156. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  157. ptep = TOP_PTE(vaddr);
  158. pte = mk_pte(page, kmap_prot);
  159. raw_local_irq_save(flags);
  160. (*depth)++;
  161. if (pte_val(*ptep) == pte_val(pte)) {
  162. *saved_pte = pte;
  163. } else {
  164. *saved_pte = *ptep;
  165. set_pte_ext(ptep, pte, 0);
  166. local_flush_tlb_kernel_page(vaddr);
  167. }
  168. raw_local_irq_restore(flags);
  169. return (void *)vaddr;
  170. }
  171. void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
  172. {
  173. unsigned int idx, cpu = smp_processor_id();
  174. int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  175. unsigned long vaddr, flags;
  176. pte_t pte, *ptep;
  177. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  178. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  179. ptep = TOP_PTE(vaddr);
  180. pte = mk_pte(page, kmap_prot);
  181. BUG_ON(pte_val(*ptep) != pte_val(pte));
  182. BUG_ON(*depth <= 0);
  183. raw_local_irq_save(flags);
  184. (*depth)--;
  185. if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
  186. set_pte_ext(ptep, saved_pte, 0);
  187. local_flush_tlb_kernel_page(vaddr);
  188. }
  189. raw_local_irq_restore(flags);
  190. if (!in_interrupt())
  191. preempt_enable();
  192. }
  193. #endif /* CONFIG_CPU_CACHE_VIPT */