highmem.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #include <linux/module.h>
  2. #include <linux/highmem.h>
  3. #include <asm/fixmap.h>
  4. #include <asm/tlbflush.h>
  5. static pte_t *kmap_pte;
  6. unsigned long highstart_pfn, highend_pfn;
  7. void *__kmap(struct page *page)
  8. {
  9. void *addr;
  10. might_sleep();
  11. if (!PageHighMem(page))
  12. return page_address(page);
  13. addr = kmap_high(page);
  14. flush_tlb_one((unsigned long)addr);
  15. return addr;
  16. }
  17. EXPORT_SYMBOL(__kmap);
  18. void __kunmap(struct page *page)
  19. {
  20. BUG_ON(in_interrupt());
  21. if (!PageHighMem(page))
  22. return;
  23. kunmap_high(page);
  24. }
  25. EXPORT_SYMBOL(__kunmap);
  26. /*
  27. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  28. * no global lock is needed and because the kmap code must perform a global TLB
  29. * invalidation when the kmap pool wraps.
  30. *
  31. * However when holding an atomic kmap is is not legal to sleep, so atomic
  32. * kmaps are appropriate for short, tight code paths only.
  33. */
  34. void *__kmap_atomic(struct page *page, enum km_type type)
  35. {
  36. enum fixed_addresses idx;
  37. unsigned long vaddr;
  38. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  39. pagefault_disable();
  40. if (!PageHighMem(page))
  41. return page_address(page);
  42. debug_kmap_atomic(type);
  43. idx = type + KM_TYPE_NR*smp_processor_id();
  44. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  45. #ifdef CONFIG_DEBUG_HIGHMEM
  46. BUG_ON(!pte_none(*(kmap_pte - idx)));
  47. #endif
  48. set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  49. local_flush_tlb_one((unsigned long)vaddr);
  50. return (void*) vaddr;
  51. }
  52. EXPORT_SYMBOL(__kmap_atomic);
  53. void __kunmap_atomic(void *kvaddr, enum km_type type)
  54. {
  55. #ifdef CONFIG_DEBUG_HIGHMEM
  56. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  57. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  58. if (vaddr < FIXADDR_START) { // FIXME
  59. pagefault_enable();
  60. return;
  61. }
  62. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  63. /*
  64. * force other mappings to Oops if they'll try to access
  65. * this pte without first remap it
  66. */
  67. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  68. local_flush_tlb_one(vaddr);
  69. #endif
  70. pagefault_enable();
  71. }
  72. EXPORT_SYMBOL(__kunmap_atomic);
  73. /*
  74. * This is the same as kmap_atomic() but can map memory that doesn't
  75. * have a struct page associated with it.
  76. */
  77. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  78. {
  79. enum fixed_addresses idx;
  80. unsigned long vaddr;
  81. pagefault_disable();
  82. debug_kmap_atomic(type);
  83. idx = type + KM_TYPE_NR*smp_processor_id();
  84. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  85. set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
  86. flush_tlb_one(vaddr);
  87. return (void*) vaddr;
  88. }
  89. struct page *__kmap_atomic_to_page(void *ptr)
  90. {
  91. unsigned long idx, vaddr = (unsigned long)ptr;
  92. pte_t *pte;
  93. if (vaddr < FIXADDR_START)
  94. return virt_to_page(ptr);
  95. idx = virt_to_fix(vaddr);
  96. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  97. return pte_page(*pte);
  98. }
  99. void __init kmap_init(void)
  100. {
  101. unsigned long kmap_vstart;
  102. /* cache the first kmap pte */
  103. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  104. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  105. }