highmem.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /*
  2. * highmem.c: virtual kernel memory mappings for high memory
  3. *
  4. * Provides kernel-static versions of atomic kmap functions originally
  5. * found as inlines in include/asm-sparc/highmem.h. These became
  6. * needed as kmap_atomic() and kunmap_atomic() started getting
  7. * called from within modules.
  8. * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  9. *
  10. * But kmap_atomic() and kunmap_atomic() cannot be inlined in
  11. * modules because they are loaded with btfixup-ped functions.
  12. */
  13. /*
  14. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  15. * gives a more generic (and caching) interface. But kmap_atomic can
  16. * be used in IRQ contexts, so in some (very limited) cases we need it.
  17. *
  18. * XXX This is an old text. Actually, it's good to use atomic kmaps,
  19. * provided you remember that they are atomic and not try to sleep
  20. * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  21. * shared by CPUs, and so precious, and establishing them requires IPI.
  22. * Atomic kmaps are lightweight and we may have NCPUS more of them.
  23. */
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/fixmap.h>
  30. void *kmap_atomic(struct page *page, enum km_type type)
  31. {
  32. unsigned long idx;
  33. unsigned long vaddr;
  34. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  35. pagefault_disable();
  36. if (!PageHighMem(page))
  37. return page_address(page);
  38. debug_kmap_atomic(type);
  39. idx = type + KM_TYPE_NR*smp_processor_id();
  40. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  41. /* XXX Fix - Anton */
  42. #if 0
  43. __flush_cache_one(vaddr);
  44. #else
  45. flush_cache_all();
  46. #endif
  47. #ifdef CONFIG_DEBUG_HIGHMEM
  48. BUG_ON(!pte_none(*(kmap_pte-idx)));
  49. #endif
  50. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  51. /* XXX Fix - Anton */
  52. #if 0
  53. __flush_tlb_one(vaddr);
  54. #else
  55. flush_tlb_all();
  56. #endif
  57. return (void*) vaddr;
  58. }
  59. EXPORT_SYMBOL(kmap_atomic);
  60. void kunmap_atomic(void *kvaddr, enum km_type type)
  61. {
  62. #ifdef CONFIG_DEBUG_HIGHMEM
  63. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  64. unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
  65. if (vaddr < FIXADDR_START) { // FIXME
  66. pagefault_enable();
  67. return;
  68. }
  69. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  70. /* XXX Fix - Anton */
  71. #if 0
  72. __flush_cache_one(vaddr);
  73. #else
  74. flush_cache_all();
  75. #endif
  76. /*
  77. * force other mappings to Oops if they'll try to access
  78. * this pte without first remap it
  79. */
  80. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  81. /* XXX Fix - Anton */
  82. #if 0
  83. __flush_tlb_one(vaddr);
  84. #else
  85. flush_tlb_all();
  86. #endif
  87. #endif
  88. pagefault_enable();
  89. }
  90. EXPORT_SYMBOL(kunmap_atomic);
  91. /* We may be fed a pagetable here by ptep_to_xxx and others. */
  92. struct page *kmap_atomic_to_page(void *ptr)
  93. {
  94. unsigned long idx, vaddr = (unsigned long)ptr;
  95. pte_t *pte;
  96. if (vaddr < SRMMU_NOCACHE_VADDR)
  97. return virt_to_page(ptr);
  98. if (vaddr < PKMAP_BASE)
  99. return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
  100. BUG_ON(vaddr < FIXADDR_START);
  101. BUG_ON(vaddr > FIXADDR_TOP);
  102. idx = virt_to_fix(vaddr);
  103. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  104. return pte_page(*pte);
  105. }