highmem_32.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. #include <linux/highmem.h>
  2. #include <linux/module.h>
  3. void *kmap(struct page *page)
  4. {
  5. might_sleep();
  6. if (!PageHighMem(page))
  7. return page_address(page);
  8. return kmap_high(page);
  9. }
  10. void kunmap(struct page *page)
  11. {
  12. if (in_interrupt())
  13. BUG();
  14. if (!PageHighMem(page))
  15. return;
  16. kunmap_high(page);
  17. }
  18. static void debug_kmap_atomic_prot(enum km_type type)
  19. {
  20. #ifdef CONFIG_DEBUG_HIGHMEM
  21. static unsigned warn_count = 10;
  22. if (unlikely(warn_count == 0))
  23. return;
  24. if (unlikely(in_interrupt())) {
  25. if (in_irq()) {
  26. if (type != KM_IRQ0 && type != KM_IRQ1 &&
  27. type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
  28. type != KM_BOUNCE_READ) {
  29. WARN_ON(1);
  30. warn_count--;
  31. }
  32. } else if (!irqs_disabled()) { /* softirq */
  33. if (type != KM_IRQ0 && type != KM_IRQ1 &&
  34. type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
  35. type != KM_SKB_SUNRPC_DATA &&
  36. type != KM_SKB_DATA_SOFTIRQ &&
  37. type != KM_BOUNCE_READ) {
  38. WARN_ON(1);
  39. warn_count--;
  40. }
  41. }
  42. }
  43. if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
  44. type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
  45. if (!irqs_disabled()) {
  46. WARN_ON(1);
  47. warn_count--;
  48. }
  49. } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
  50. if (irq_count() == 0 && !irqs_disabled()) {
  51. WARN_ON(1);
  52. warn_count--;
  53. }
  54. }
  55. #endif
  56. }
  57. /*
  58. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  59. * no global lock is needed and because the kmap code must perform a global TLB
  60. * invalidation when the kmap pool wraps.
  61. *
  62. * However when holding an atomic kmap is is not legal to sleep, so atomic
  63. * kmaps are appropriate for short, tight code paths only.
  64. */
  65. void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
  66. {
  67. enum fixed_addresses idx;
  68. unsigned long vaddr;
  69. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  70. pagefault_disable();
  71. if (!PageHighMem(page))
  72. return page_address(page);
  73. debug_kmap_atomic_prot(type);
  74. idx = type + KM_TYPE_NR*smp_processor_id();
  75. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  76. BUG_ON(!pte_none(*(kmap_pte-idx)));
  77. set_pte(kmap_pte-idx, mk_pte(page, prot));
  78. arch_flush_lazy_mmu_mode();
  79. return (void *)vaddr;
  80. }
  81. void *kmap_atomic(struct page *page, enum km_type type)
  82. {
  83. return kmap_atomic_prot(page, type, kmap_prot);
  84. }
  85. void kunmap_atomic(void *kvaddr, enum km_type type)
  86. {
  87. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  88. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  89. /*
  90. * Force other mappings to Oops if they'll try to access this pte
  91. * without first remap it. Keeping stale mappings around is a bad idea
  92. * also, in case the page changes cacheability attributes or becomes
  93. * a protected page in a hypervisor.
  94. */
  95. if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
  96. kpte_clear_flush(kmap_pte-idx, vaddr);
  97. else {
  98. #ifdef CONFIG_DEBUG_HIGHMEM
  99. BUG_ON(vaddr < PAGE_OFFSET);
  100. BUG_ON(vaddr >= (unsigned long)high_memory);
  101. #endif
  102. }
  103. arch_flush_lazy_mmu_mode();
  104. pagefault_enable();
  105. }
  106. /* This is the same as kmap_atomic() but can map memory that doesn't
  107. * have a struct page associated with it.
  108. */
  109. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  110. {
  111. enum fixed_addresses idx;
  112. unsigned long vaddr;
  113. pagefault_disable();
  114. idx = type + KM_TYPE_NR*smp_processor_id();
  115. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  116. set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
  117. arch_flush_lazy_mmu_mode();
  118. return (void*) vaddr;
  119. }
  120. EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
  121. struct page *kmap_atomic_to_page(void *ptr)
  122. {
  123. unsigned long idx, vaddr = (unsigned long)ptr;
  124. pte_t *pte;
  125. if (vaddr < FIXADDR_START)
  126. return virt_to_page(ptr);
  127. idx = virt_to_fix(vaddr);
  128. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  129. return pte_page(*pte);
  130. }
  131. EXPORT_SYMBOL(kmap);
  132. EXPORT_SYMBOL(kunmap);
  133. EXPORT_SYMBOL(kmap_atomic);
  134. EXPORT_SYMBOL(kunmap_atomic);