highmem.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. #include <linux/highmem.h>
  2. #include <linux/module.h>
  3. void *kmap(struct page *page)
  4. {
  5. might_sleep();
  6. if (!PageHighMem(page))
  7. return page_address(page);
  8. return kmap_high(page);
  9. }
  10. void kunmap(struct page *page)
  11. {
  12. if (in_interrupt())
  13. BUG();
  14. if (!PageHighMem(page))
  15. return;
  16. kunmap_high(page);
  17. }
  18. /*
  19. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  20. * no global lock is needed and because the kmap code must perform a global TLB
  21. * invalidation when the kmap pool wraps.
  22. *
  23. * However when holding an atomic kmap is is not legal to sleep, so atomic
  24. * kmaps are appropriate for short, tight code paths only.
  25. */
  26. void *kmap_atomic(struct page *page, enum km_type type)
  27. {
  28. enum fixed_addresses idx;
  29. unsigned long vaddr;
  30. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  31. inc_preempt_count();
  32. if (!PageHighMem(page))
  33. return page_address(page);
  34. idx = type + KM_TYPE_NR*smp_processor_id();
  35. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  36. if (!pte_none(*(kmap_pte-idx)))
  37. BUG();
  38. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  39. return (void*) vaddr;
  40. }
  41. void kunmap_atomic(void *kvaddr, enum km_type type)
  42. {
  43. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  44. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  45. #ifdef CONFIG_DEBUG_HIGHMEM
  46. if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
  47. dec_preempt_count();
  48. preempt_check_resched();
  49. return;
  50. }
  51. if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
  52. BUG();
  53. #endif
  54. /*
  55. * Force other mappings to Oops if they'll try to access this pte
  56. * without first remap it. Keeping stale mappings around is a bad idea
  57. * also, in case the page changes cacheability attributes or becomes
  58. * a protected page in a hypervisor.
  59. */
  60. kpte_clear_flush(kmap_pte-idx, vaddr);
  61. dec_preempt_count();
  62. preempt_check_resched();
  63. }
  64. /* This is the same as kmap_atomic() but can map memory that doesn't
  65. * have a struct page associated with it.
  66. */
  67. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  68. {
  69. enum fixed_addresses idx;
  70. unsigned long vaddr;
  71. inc_preempt_count();
  72. idx = type + KM_TYPE_NR*smp_processor_id();
  73. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  74. set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
  75. return (void*) vaddr;
  76. }
  77. struct page *kmap_atomic_to_page(void *ptr)
  78. {
  79. unsigned long idx, vaddr = (unsigned long)ptr;
  80. pte_t *pte;
  81. if (vaddr < FIXADDR_START)
  82. return virt_to_page(ptr);
  83. idx = virt_to_fix(vaddr);
  84. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  85. return pte_page(*pte);
  86. }
  87. EXPORT_SYMBOL(kmap);
  88. EXPORT_SYMBOL(kunmap);
  89. EXPORT_SYMBOL(kmap_atomic);
  90. EXPORT_SYMBOL(kunmap_atomic);
  91. EXPORT_SYMBOL(kmap_atomic_to_page);