highmem.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. #include <linux/module.h>
  2. #include <linux/highmem.h>
  3. #include <linux/smp.h>
  4. #include <asm/fixmap.h>
  5. #include <asm/tlbflush.h>
  6. static pte_t *kmap_pte;
  7. unsigned long highstart_pfn, highend_pfn;
  8. void *__kmap(struct page *page)
  9. {
  10. void *addr;
  11. might_sleep();
  12. if (!PageHighMem(page))
  13. return page_address(page);
  14. addr = kmap_high(page);
  15. flush_tlb_one((unsigned long)addr);
  16. return addr;
  17. }
  18. EXPORT_SYMBOL(__kmap);
  19. void __kunmap(struct page *page)
  20. {
  21. BUG_ON(in_interrupt());
  22. if (!PageHighMem(page))
  23. return;
  24. kunmap_high(page);
  25. }
  26. EXPORT_SYMBOL(__kunmap);
  27. /*
  28. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  29. * no global lock is needed and because the kmap code must perform a global TLB
  30. * invalidation when the kmap pool wraps.
  31. *
  32. * However when holding an atomic kmap is is not legal to sleep, so atomic
  33. * kmaps are appropriate for short, tight code paths only.
  34. */
  35. void *__kmap_atomic(struct page *page, enum km_type type)
  36. {
  37. enum fixed_addresses idx;
  38. unsigned long vaddr;
  39. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  40. pagefault_disable();
  41. if (!PageHighMem(page))
  42. return page_address(page);
  43. debug_kmap_atomic(type);
  44. idx = type + KM_TYPE_NR*smp_processor_id();
  45. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  46. #ifdef CONFIG_DEBUG_HIGHMEM
  47. BUG_ON(!pte_none(*(kmap_pte - idx)));
  48. #endif
  49. set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  50. local_flush_tlb_one((unsigned long)vaddr);
  51. return (void*) vaddr;
  52. }
  53. EXPORT_SYMBOL(__kmap_atomic);
  54. void __kunmap_atomic(void *kvaddr, enum km_type type)
  55. {
  56. #ifdef CONFIG_DEBUG_HIGHMEM
  57. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  58. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  59. if (vaddr < FIXADDR_START) { // FIXME
  60. pagefault_enable();
  61. return;
  62. }
  63. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  64. /*
  65. * force other mappings to Oops if they'll try to access
  66. * this pte without first remap it
  67. */
  68. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  69. local_flush_tlb_one(vaddr);
  70. #endif
  71. pagefault_enable();
  72. }
  73. EXPORT_SYMBOL(__kunmap_atomic);
  74. /*
  75. * This is the same as kmap_atomic() but can map memory that doesn't
  76. * have a struct page associated with it.
  77. */
  78. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  79. {
  80. enum fixed_addresses idx;
  81. unsigned long vaddr;
  82. pagefault_disable();
  83. debug_kmap_atomic(type);
  84. idx = type + KM_TYPE_NR*smp_processor_id();
  85. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  86. set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
  87. flush_tlb_one(vaddr);
  88. return (void*) vaddr;
  89. }
  90. struct page *__kmap_atomic_to_page(void *ptr)
  91. {
  92. unsigned long idx, vaddr = (unsigned long)ptr;
  93. pte_t *pte;
  94. if (vaddr < FIXADDR_START)
  95. return virt_to_page(ptr);
  96. idx = virt_to_fix(vaddr);
  97. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  98. return pte_page(*pte);
  99. }
  100. void __init kmap_init(void)
  101. {
  102. unsigned long kmap_vstart;
  103. /* cache the first kmap pte */
  104. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  105. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  106. }