highmem_32.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. #include <linux/highmem.h>
  2. #include <linux/module.h>
  3. #include <linux/swap.h> /* for totalram_pages */
  4. void *kmap(struct page *page)
  5. {
  6. might_sleep();
  7. if (!PageHighMem(page))
  8. return page_address(page);
  9. return kmap_high(page);
  10. }
  11. void kunmap(struct page *page)
  12. {
  13. if (in_interrupt())
  14. BUG();
  15. if (!PageHighMem(page))
  16. return;
  17. kunmap_high(page);
  18. }
  19. /*
  20. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  21. * no global lock is needed and because the kmap code must perform a global TLB
  22. * invalidation when the kmap pool wraps.
  23. *
  24. * However when holding an atomic kmap is is not legal to sleep, so atomic
  25. * kmaps are appropriate for short, tight code paths only.
  26. */
  27. void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
  28. {
  29. enum fixed_addresses idx;
  30. unsigned long vaddr;
  31. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  32. pagefault_disable();
  33. if (!PageHighMem(page))
  34. return page_address(page);
  35. debug_kmap_atomic(type);
  36. idx = type + KM_TYPE_NR*smp_processor_id();
  37. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  38. BUG_ON(!pte_none(*(kmap_pte-idx)));
  39. set_pte(kmap_pte-idx, mk_pte(page, prot));
  40. arch_flush_lazy_mmu_mode();
  41. return (void *)vaddr;
  42. }
  43. void *kmap_atomic(struct page *page, enum km_type type)
  44. {
  45. return kmap_atomic_prot(page, type, kmap_prot);
  46. }
  47. void kunmap_atomic(void *kvaddr, enum km_type type)
  48. {
  49. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  50. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  51. /*
  52. * Force other mappings to Oops if they'll try to access this pte
  53. * without first remap it. Keeping stale mappings around is a bad idea
  54. * also, in case the page changes cacheability attributes or becomes
  55. * a protected page in a hypervisor.
  56. */
  57. if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
  58. kpte_clear_flush(kmap_pte-idx, vaddr);
  59. else {
  60. #ifdef CONFIG_DEBUG_HIGHMEM
  61. BUG_ON(vaddr < PAGE_OFFSET);
  62. BUG_ON(vaddr >= (unsigned long)high_memory);
  63. #endif
  64. }
  65. arch_flush_lazy_mmu_mode();
  66. pagefault_enable();
  67. }
  68. /*
  69. * This is the same as kmap_atomic() but can map memory that doesn't
  70. * have a struct page associated with it.
  71. */
  72. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  73. {
  74. return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
  75. }
  76. EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
  77. struct page *kmap_atomic_to_page(void *ptr)
  78. {
  79. unsigned long idx, vaddr = (unsigned long)ptr;
  80. pte_t *pte;
  81. if (vaddr < FIXADDR_START)
  82. return virt_to_page(ptr);
  83. idx = virt_to_fix(vaddr);
  84. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  85. return pte_page(*pte);
  86. }
  87. EXPORT_SYMBOL(kmap);
  88. EXPORT_SYMBOL(kunmap);
  89. EXPORT_SYMBOL(kmap_atomic);
  90. EXPORT_SYMBOL(kunmap_atomic);
  91. void __init set_highmem_pages_init(void)
  92. {
  93. struct zone *zone;
  94. int nid;
  95. for_each_zone(zone) {
  96. unsigned long zone_start_pfn, zone_end_pfn;
  97. if (!is_highmem(zone))
  98. continue;
  99. zone_start_pfn = zone->zone_start_pfn;
  100. zone_end_pfn = zone_start_pfn + zone->spanned_pages;
  101. nid = zone_to_nid(zone);
  102. printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
  103. zone->name, nid, zone_start_pfn, zone_end_pfn);
  104. add_highpages_with_active_regions(nid, zone_start_pfn,
  105. zone_end_pfn);
  106. }
  107. totalram_pages += totalhigh_pages;
  108. }