highmem.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /*
  2. * arch/arm/mm/highmem.c -- ARM highmem support
  3. *
  4. * Author: Nicolas Pitre
  5. * Created: september 8, 2008
  6. * Copyright: Marvell Semiconductors Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/interrupt.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #include "mm.h"
  19. void *kmap(struct page *page)
  20. {
  21. might_sleep();
  22. if (!PageHighMem(page))
  23. return page_address(page);
  24. return kmap_high(page);
  25. }
  26. EXPORT_SYMBOL(kmap);
  27. void kunmap(struct page *page)
  28. {
  29. BUG_ON(in_interrupt());
  30. if (!PageHighMem(page))
  31. return;
  32. kunmap_high(page);
  33. }
  34. EXPORT_SYMBOL(kunmap);
  35. void *kmap_atomic(struct page *page, enum km_type type)
  36. {
  37. unsigned int idx;
  38. unsigned long vaddr;
  39. void *kmap;
  40. pagefault_disable();
  41. if (!PageHighMem(page))
  42. return page_address(page);
  43. kmap = kmap_high_get(page);
  44. if (kmap)
  45. return kmap;
  46. idx = type + KM_TYPE_NR * smp_processor_id();
  47. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  48. #ifdef CONFIG_DEBUG_HIGHMEM
  49. /*
  50. * With debugging enabled, kunmap_atomic forces that entry to 0.
  51. * Make sure it was indeed properly unmapped.
  52. */
  53. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  54. #endif
  55. set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  56. /*
  57. * When debugging is off, kunmap_atomic leaves the previous mapping
  58. * in place, so this TLB flush ensures the TLB is updated with the
  59. * new mapping.
  60. */
  61. local_flush_tlb_kernel_page(vaddr);
  62. return (void *)vaddr;
  63. }
  64. EXPORT_SYMBOL(kmap_atomic);
  65. void kunmap_atomic(void *kvaddr, enum km_type type)
  66. {
  67. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  68. unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
  69. if (kvaddr >= (void *)FIXADDR_START) {
  70. __cpuc_flush_dcache_page((void *)vaddr);
  71. #ifdef CONFIG_DEBUG_HIGHMEM
  72. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  73. set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
  74. local_flush_tlb_kernel_page(vaddr);
  75. #else
  76. (void) idx; /* to kill a warning */
  77. #endif
  78. } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
  79. /* this address was obtained through kmap_high_get() */
  80. kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
  81. }
  82. pagefault_enable();
  83. }
  84. EXPORT_SYMBOL(kunmap_atomic);
  85. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  86. {
  87. unsigned int idx;
  88. unsigned long vaddr;
  89. pagefault_disable();
  90. idx = type + KM_TYPE_NR * smp_processor_id();
  91. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  92. #ifdef CONFIG_DEBUG_HIGHMEM
  93. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  94. #endif
  95. set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
  96. local_flush_tlb_kernel_page(vaddr);
  97. return (void *)vaddr;
  98. }
  99. struct page *kmap_atomic_to_page(const void *ptr)
  100. {
  101. unsigned long vaddr = (unsigned long)ptr;
  102. pte_t *pte;
  103. if (vaddr < FIXADDR_START)
  104. return virt_to_page(ptr);
  105. pte = TOP_PTE(vaddr);
  106. return pte_page(*pte);
  107. }