highmem.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. /*
  2. * arch/arm/mm/highmem.c -- ARM highmem support
  3. *
  4. * Author: Nicolas Pitre
  5. * Created: september 8, 2008
  6. * Copyright: Marvell Semiconductors Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/interrupt.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #include "mm.h"
  19. void *kmap(struct page *page)
  20. {
  21. might_sleep();
  22. if (!PageHighMem(page))
  23. return page_address(page);
  24. return kmap_high(page);
  25. }
  26. EXPORT_SYMBOL(kmap);
  27. void kunmap(struct page *page)
  28. {
  29. BUG_ON(in_interrupt());
  30. if (!PageHighMem(page))
  31. return;
  32. kunmap_high(page);
  33. }
  34. EXPORT_SYMBOL(kunmap);
  35. void *kmap_atomic(struct page *page, enum km_type type)
  36. {
  37. unsigned int idx;
  38. unsigned long vaddr;
  39. void *kmap;
  40. pagefault_disable();
  41. if (!PageHighMem(page))
  42. return page_address(page);
  43. debug_kmap_atomic(type);
  44. kmap = kmap_high_get(page);
  45. if (kmap)
  46. return kmap;
  47. idx = type + KM_TYPE_NR * smp_processor_id();
  48. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  49. #ifdef CONFIG_DEBUG_HIGHMEM
  50. /*
  51. * With debugging enabled, kunmap_atomic forces that entry to 0.
  52. * Make sure it was indeed properly unmapped.
  53. */
  54. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  55. #endif
  56. set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  57. /*
  58. * When debugging is off, kunmap_atomic leaves the previous mapping
  59. * in place, so this TLB flush ensures the TLB is updated with the
  60. * new mapping.
  61. */
  62. local_flush_tlb_kernel_page(vaddr);
  63. return (void *)vaddr;
  64. }
  65. EXPORT_SYMBOL(kmap_atomic);
  66. void kunmap_atomic(void *kvaddr, enum km_type type)
  67. {
  68. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  69. unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
  70. if (kvaddr >= (void *)FIXADDR_START) {
  71. if (cache_is_vivt())
  72. __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
  73. #ifdef CONFIG_DEBUG_HIGHMEM
  74. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  75. set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
  76. local_flush_tlb_kernel_page(vaddr);
  77. #else
  78. (void) idx; /* to kill a warning */
  79. #endif
  80. } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
  81. /* this address was obtained through kmap_high_get() */
  82. kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
  83. }
  84. pagefault_enable();
  85. }
  86. EXPORT_SYMBOL(kunmap_atomic);
  87. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  88. {
  89. unsigned int idx;
  90. unsigned long vaddr;
  91. pagefault_disable();
  92. idx = type + KM_TYPE_NR * smp_processor_id();
  93. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  94. #ifdef CONFIG_DEBUG_HIGHMEM
  95. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  96. #endif
  97. set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
  98. local_flush_tlb_kernel_page(vaddr);
  99. return (void *)vaddr;
  100. }
  101. struct page *kmap_atomic_to_page(const void *ptr)
  102. {
  103. unsigned long vaddr = (unsigned long)ptr;
  104. pte_t *pte;
  105. if (vaddr < FIXADDR_START)
  106. return virt_to_page(ptr);
  107. pte = TOP_PTE(vaddr);
  108. return pte_page(*pte);
  109. }
  110. #ifdef CONFIG_CPU_CACHE_VIPT
  111. #include <linux/percpu.h>
  112. /*
  113. * The VIVT cache of a highmem page is always flushed before the page
  114. * is unmapped. Hence unmapped highmem pages need no cache maintenance
  115. * in that case.
  116. *
  117. * However unmapped pages may still be cached with a VIPT cache, and
  118. * it is not possible to perform cache maintenance on them using physical
  119. * addresses unfortunately. So we have no choice but to set up a temporary
  120. * virtual mapping for that purpose.
  121. *
  122. * Yet this VIPT cache maintenance may be triggered from DMA support
  123. * functions which are possibly called from interrupt context. As we don't
  124. * want to keep interrupt disabled all the time when such maintenance is
  125. * taking place, we therefore allow for some reentrancy by preserving and
  126. * restoring the previous fixmap entry before the interrupted context is
  127. * resumed. If the reentrancy depth is 0 then there is no need to restore
  128. * the previous fixmap, and leaving the current one in place allow it to
  129. * be reused the next time without a TLB flush (common with DMA).
  130. */
  131. static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
  132. void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
  133. {
  134. unsigned int idx, cpu = smp_processor_id();
  135. int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  136. unsigned long vaddr, flags;
  137. pte_t pte, *ptep;
  138. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  139. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  140. ptep = TOP_PTE(vaddr);
  141. pte = mk_pte(page, kmap_prot);
  142. if (!in_interrupt())
  143. preempt_disable();
  144. raw_local_irq_save(flags);
  145. (*depth)++;
  146. if (pte_val(*ptep) == pte_val(pte)) {
  147. *saved_pte = pte;
  148. } else {
  149. *saved_pte = *ptep;
  150. set_pte_ext(ptep, pte, 0);
  151. local_flush_tlb_kernel_page(vaddr);
  152. }
  153. raw_local_irq_restore(flags);
  154. return (void *)vaddr;
  155. }
  156. void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
  157. {
  158. unsigned int idx, cpu = smp_processor_id();
  159. int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
  160. unsigned long vaddr, flags;
  161. pte_t pte, *ptep;
  162. idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
  163. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  164. ptep = TOP_PTE(vaddr);
  165. pte = mk_pte(page, kmap_prot);
  166. BUG_ON(pte_val(*ptep) != pte_val(pte));
  167. BUG_ON(*depth <= 0);
  168. raw_local_irq_save(flags);
  169. (*depth)--;
  170. if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
  171. set_pte_ext(ptep, saved_pte, 0);
  172. local_flush_tlb_kernel_page(vaddr);
  173. }
  174. raw_local_irq_restore(flags);
  175. if (!in_interrupt())
  176. preempt_enable();
  177. }
  178. #endif /* CONFIG_CPU_CACHE_VIPT */