highmem.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #include <linux/module.h>
  2. #include <linux/highmem.h>
  3. #include <asm/tlbflush.h>
  4. void *__kmap(struct page *page)
  5. {
  6. void *addr;
  7. might_sleep();
  8. if (!PageHighMem(page))
  9. return page_address(page);
  10. addr = kmap_high(page);
  11. flush_tlb_one((unsigned long)addr);
  12. return addr;
  13. }
  14. void __kunmap(struct page *page)
  15. {
  16. if (in_interrupt())
  17. BUG();
  18. if (!PageHighMem(page))
  19. return;
  20. kunmap_high(page);
  21. }
  22. /*
  23. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  24. * no global lock is needed and because the kmap code must perform a global TLB
  25. * invalidation when the kmap pool wraps.
  26. *
  27. * However when holding an atomic kmap is is not legal to sleep, so atomic
  28. * kmaps are appropriate for short, tight code paths only.
  29. */
  30. void *__kmap_atomic(struct page *page, enum km_type type)
  31. {
  32. enum fixed_addresses idx;
  33. unsigned long vaddr;
  34. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  35. pagefault_disable();
  36. if (!PageHighMem(page))
  37. return page_address(page);
  38. idx = type + KM_TYPE_NR*smp_processor_id();
  39. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  40. #ifdef CONFIG_DEBUG_HIGHMEM
  41. if (!pte_none(*(kmap_pte-idx)))
  42. BUG();
  43. #endif
  44. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  45. local_flush_tlb_one((unsigned long)vaddr);
  46. return (void*) vaddr;
  47. }
  48. void __kunmap_atomic(void *kvaddr, enum km_type type)
  49. {
  50. #ifdef CONFIG_DEBUG_HIGHMEM
  51. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  52. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  53. if (vaddr < FIXADDR_START) { // FIXME
  54. pagefault_enable();
  55. return;
  56. }
  57. if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
  58. BUG();
  59. /*
  60. * force other mappings to Oops if they'll try to access
  61. * this pte without first remap it
  62. */
  63. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  64. local_flush_tlb_one(vaddr);
  65. #endif
  66. pagefault_enable();
  67. }
  68. #ifndef CONFIG_LIMITED_DMA
  69. /*
  70. * This is the same as kmap_atomic() but can map memory that doesn't
  71. * have a struct page associated with it.
  72. */
  73. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  74. {
  75. enum fixed_addresses idx;
  76. unsigned long vaddr;
  77. pagefault_disable();
  78. idx = type + KM_TYPE_NR*smp_processor_id();
  79. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  80. set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
  81. flush_tlb_one(vaddr);
  82. return (void*) vaddr;
  83. }
  84. #endif /* CONFIG_LIMITED_DMA */
  85. struct page *__kmap_atomic_to_page(void *ptr)
  86. {
  87. unsigned long idx, vaddr = (unsigned long)ptr;
  88. pte_t *pte;
  89. if (vaddr < FIXADDR_START)
  90. return virt_to_page(ptr);
  91. idx = virt_to_fix(vaddr);
  92. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  93. return pte_page(*pte);
  94. }
  95. EXPORT_SYMBOL(__kmap);
  96. EXPORT_SYMBOL(__kunmap);
  97. EXPORT_SYMBOL(__kmap_atomic);
  98. EXPORT_SYMBOL(__kunmap_atomic);
  99. EXPORT_SYMBOL(__kmap_atomic_to_page);