highmem.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * highmem.c: virtual kernel memory mappings for high memory
  3. *
  4. * Provides kernel-static versions of atomic kmap functions originally
  5. * found as inlines in include/asm-sparc/highmem.h. These became
  6. * needed as kmap_atomic() and kunmap_atomic() started getting
  7. * called from within modules.
  8. * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  9. *
  10. * But kmap_atomic() and kunmap_atomic() cannot be inlined in
  11. * modules because they are loaded with btfixup-ped functions.
  12. */
  13. /*
  14. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  15. * gives a more generic (and caching) interface. But kmap_atomic can
  16. * be used in IRQ contexts, so in some (very limited) cases we need it.
  17. *
  18. * XXX This is an old text. Actually, it's good to use atomic kmaps,
  19. * provided you remember that they are atomic and not try to sleep
  20. * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  21. * shared by CPUs, and so precious, and establishing them requires IPI.
  22. * Atomic kmaps are lightweight and we may have NCPUS more of them.
  23. */
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/fixmap.h>
  30. void *kmap_atomic(struct page *page, enum km_type type)
  31. {
  32. unsigned long idx;
  33. unsigned long vaddr;
  34. /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
  35. inc_preempt_count();
  36. if (!PageHighMem(page))
  37. return page_address(page);
  38. idx = type + KM_TYPE_NR*smp_processor_id();
  39. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  40. /* XXX Fix - Anton */
  41. #if 0
  42. __flush_cache_one(vaddr);
  43. #else
  44. flush_cache_all();
  45. #endif
  46. #ifdef CONFIG_DEBUG_HIGHMEM
  47. BUG_ON(!pte_none(*(kmap_pte-idx)));
  48. #endif
  49. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  50. /* XXX Fix - Anton */
  51. #if 0
  52. __flush_tlb_one(vaddr);
  53. #else
  54. flush_tlb_all();
  55. #endif
  56. return (void*) vaddr;
  57. }
  58. void kunmap_atomic(void *kvaddr, enum km_type type)
  59. {
  60. #ifdef CONFIG_DEBUG_HIGHMEM
  61. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  62. unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
  63. if (vaddr < FIXADDR_START) { // FIXME
  64. dec_preempt_count();
  65. preempt_check_resched();
  66. return;
  67. }
  68. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  69. /* XXX Fix - Anton */
  70. #if 0
  71. __flush_cache_one(vaddr);
  72. #else
  73. flush_cache_all();
  74. #endif
  75. /*
  76. * force other mappings to Oops if they'll try to access
  77. * this pte without first remap it
  78. */
  79. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  80. /* XXX Fix - Anton */
  81. #if 0
  82. __flush_tlb_one(vaddr);
  83. #else
  84. flush_tlb_all();
  85. #endif
  86. #endif
  87. dec_preempt_count();
  88. preempt_check_resched();
  89. }
  90. /* We may be fed a pagetable here by ptep_to_xxx and others. */
  91. struct page *kmap_atomic_to_page(void *ptr)
  92. {
  93. unsigned long idx, vaddr = (unsigned long)ptr;
  94. pte_t *pte;
  95. if (vaddr < SRMMU_NOCACHE_VADDR)
  96. return virt_to_page(ptr);
  97. if (vaddr < PKMAP_BASE)
  98. return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
  99. BUG_ON(vaddr < FIXADDR_START);
  100. BUG_ON(vaddr > FIXADDR_TOP);
  101. idx = virt_to_fix(vaddr);
  102. pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
  103. return pte_page(*pte);
  104. }