highmem_32.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. #include <linux/highmem.h>
  2. #include <linux/module.h>
  3. #include <linux/swap.h> /* for totalram_pages */
  4. void *kmap(struct page *page)
  5. {
  6. might_sleep();
  7. if (!PageHighMem(page))
  8. return page_address(page);
  9. return kmap_high(page);
  10. }
  11. EXPORT_SYMBOL(kmap);
  12. void kunmap(struct page *page)
  13. {
  14. if (in_interrupt())
  15. BUG();
  16. if (!PageHighMem(page))
  17. return;
  18. kunmap_high(page);
  19. }
  20. EXPORT_SYMBOL(kunmap);
  21. /*
  22. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  23. * no global lock is needed and because the kmap code must perform a global TLB
  24. * invalidation when the kmap pool wraps.
  25. *
  26. * However when holding an atomic kmap it is not legal to sleep, so atomic
  27. * kmaps are appropriate for short, tight code paths only.
  28. */
  29. void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  30. {
  31. unsigned long vaddr;
  32. int idx, type;
  33. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  34. pagefault_disable();
  35. if (!PageHighMem(page))
  36. return page_address(page);
  37. type = kmap_atomic_idx_push();
  38. idx = type + KM_TYPE_NR*smp_processor_id();
  39. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  40. BUG_ON(!pte_none(*(kmap_pte-idx)));
  41. set_pte(kmap_pte-idx, mk_pte(page, prot));
  42. return (void *)vaddr;
  43. }
  44. EXPORT_SYMBOL(kmap_atomic_prot);
  45. void *__kmap_atomic(struct page *page)
  46. {
  47. return kmap_atomic_prot(page, kmap_prot);
  48. }
  49. EXPORT_SYMBOL(__kmap_atomic);
  50. /*
  51. * This is the same as kmap_atomic() but can map memory that doesn't
  52. * have a struct page associated with it.
  53. */
  54. void *kmap_atomic_pfn(unsigned long pfn)
  55. {
  56. return kmap_atomic_prot_pfn(pfn, kmap_prot);
  57. }
  58. EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
  59. void __kunmap_atomic(void *kvaddr)
  60. {
  61. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  62. if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
  63. vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
  64. int idx, type;
  65. type = kmap_atomic_idx();
  66. idx = type + KM_TYPE_NR * smp_processor_id();
  67. #ifdef CONFIG_DEBUG_HIGHMEM
  68. WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  69. #endif
  70. /*
  71. * Force other mappings to Oops if they'll try to access this
  72. * pte without first remap it. Keeping stale mappings around
  73. * is a bad idea also, in case the page changes cacheability
  74. * attributes or becomes a protected page in a hypervisor.
  75. */
  76. kpte_clear_flush(kmap_pte-idx, vaddr);
  77. kmap_atomic_idx_pop();
  78. }
  79. #ifdef CONFIG_DEBUG_HIGHMEM
  80. else {
  81. BUG_ON(vaddr < PAGE_OFFSET);
  82. BUG_ON(vaddr >= (unsigned long)high_memory);
  83. }
  84. #endif
  85. pagefault_enable();
  86. }
  87. EXPORT_SYMBOL(__kunmap_atomic);
  88. struct page *kmap_atomic_to_page(void *ptr)
  89. {
  90. unsigned long idx, vaddr = (unsigned long)ptr;
  91. pte_t *pte;
  92. if (vaddr < FIXADDR_START)
  93. return virt_to_page(ptr);
  94. idx = virt_to_fix(vaddr);
  95. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  96. return pte_page(*pte);
  97. }
  98. EXPORT_SYMBOL(kmap_atomic_to_page);
  99. void __init set_highmem_pages_init(void)
  100. {
  101. struct zone *zone;
  102. int nid;
  103. for_each_zone(zone) {
  104. unsigned long zone_start_pfn, zone_end_pfn;
  105. if (!is_highmem(zone))
  106. continue;
  107. zone_start_pfn = zone->zone_start_pfn;
  108. zone_end_pfn = zone_start_pfn + zone->spanned_pages;
  109. nid = zone_to_nid(zone);
  110. printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
  111. zone->name, nid, zone_start_pfn, zone_end_pfn);
  112. add_highpages_with_active_regions(nid, zone_start_pfn,
  113. zone_end_pfn);
  114. }
  115. totalram_pages += totalhigh_pages;
  116. }