highmem.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. #include <linux/config.h>
  2. #include <linux/module.h>
  3. #include <linux/highmem.h>
  4. #include <asm/tlbflush.h>
  5. void *__kmap(struct page *page)
  6. {
  7. void *addr;
  8. might_sleep();
  9. if (!PageHighMem(page))
  10. return page_address(page);
  11. addr = kmap_high(page);
  12. flush_tlb_one((unsigned long)addr);
  13. return addr;
  14. }
  15. void __kunmap(struct page *page)
  16. {
  17. if (in_interrupt())
  18. BUG();
  19. if (!PageHighMem(page))
  20. return;
  21. kunmap_high(page);
  22. }
  23. /*
  24. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  25. * no global lock is needed and because the kmap code must perform a global TLB
  26. * invalidation when the kmap pool wraps.
  27. *
  28. * However when holding an atomic kmap is is not legal to sleep, so atomic
  29. * kmaps are appropriate for short, tight code paths only.
  30. */
  31. void *__kmap_atomic(struct page *page, enum km_type type)
  32. {
  33. enum fixed_addresses idx;
  34. unsigned long vaddr;
  35. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  36. inc_preempt_count();
  37. if (!PageHighMem(page))
  38. return page_address(page);
  39. idx = type + KM_TYPE_NR*smp_processor_id();
  40. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  41. #ifdef CONFIG_DEBUG_HIGHMEM
  42. if (!pte_none(*(kmap_pte-idx)))
  43. BUG();
  44. #endif
  45. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  46. local_flush_tlb_one((unsigned long)vaddr);
  47. return (void*) vaddr;
  48. }
  49. void __kunmap_atomic(void *kvaddr, enum km_type type)
  50. {
  51. #ifdef CONFIG_DEBUG_HIGHMEM
  52. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  53. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  54. if (vaddr < FIXADDR_START) { // FIXME
  55. dec_preempt_count();
  56. preempt_check_resched();
  57. return;
  58. }
  59. if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
  60. BUG();
  61. /*
  62. * force other mappings to Oops if they'll try to access
  63. * this pte without first remap it
  64. */
  65. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  66. local_flush_tlb_one(vaddr);
  67. #endif
  68. dec_preempt_count();
  69. preempt_check_resched();
  70. }
  71. /*
  72. * This is the same as kmap_atomic() but can map memory that doesn't
  73. * have a struct page associated with it.
  74. */
  75. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  76. {
  77. enum fixed_addresses idx;
  78. unsigned long vaddr;
  79. inc_preempt_count();
  80. idx = type + KM_TYPE_NR*smp_processor_id();
  81. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  82. set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
  83. flush_tlb_one(vaddr);
  84. return (void*) vaddr;
  85. }
  86. struct page *__kmap_atomic_to_page(void *ptr)
  87. {
  88. unsigned long idx, vaddr = (unsigned long)ptr;
  89. pte_t *pte;
  90. if (vaddr < FIXADDR_START)
  91. return virt_to_page(ptr);
  92. idx = virt_to_fix(vaddr);
  93. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  94. return pte_page(*pte);
  95. }
  96. EXPORT_SYMBOL(__kmap);
  97. EXPORT_SYMBOL(__kunmap);
  98. EXPORT_SYMBOL(__kmap_atomic);
  99. EXPORT_SYMBOL(__kunmap_atomic);
  100. EXPORT_SYMBOL(__kmap_atomic_to_page);