highmem.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /*
  2. * arch/arm/mm/highmem.c -- ARM highmem support
  3. *
  4. * Author: Nicolas Pitre
  5. * Created: september 8, 2008
  6. * Copyright: Marvell Semiconductors Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/interrupt.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #include "mm.h"
  19. void *kmap(struct page *page)
  20. {
  21. might_sleep();
  22. if (!PageHighMem(page))
  23. return page_address(page);
  24. return kmap_high(page);
  25. }
  26. EXPORT_SYMBOL(kmap);
  27. void kunmap(struct page *page)
  28. {
  29. BUG_ON(in_interrupt());
  30. if (!PageHighMem(page))
  31. return;
  32. kunmap_high(page);
  33. }
  34. EXPORT_SYMBOL(kunmap);
  35. void *kmap_atomic(struct page *page, enum km_type type)
  36. {
  37. unsigned int idx;
  38. unsigned long vaddr;
  39. pagefault_disable();
  40. if (!PageHighMem(page))
  41. return page_address(page);
  42. idx = type + KM_TYPE_NR * smp_processor_id();
  43. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  44. #ifdef CONFIG_DEBUG_HIGHMEM
  45. /*
  46. * With debugging enabled, kunmap_atomic forces that entry to 0.
  47. * Make sure it was indeed properly unmapped.
  48. */
  49. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  50. #endif
  51. set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  52. /*
  53. * When debugging is off, kunmap_atomic leaves the previous mapping
  54. * in place, so this TLB flush ensures the TLB is updated with the
  55. * new mapping.
  56. */
  57. local_flush_tlb_kernel_page(vaddr);
  58. return (void *)vaddr;
  59. }
  60. EXPORT_SYMBOL(kmap_atomic);
  61. void kunmap_atomic(void *kvaddr, enum km_type type)
  62. {
  63. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  64. unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
  65. if (kvaddr >= (void *)FIXADDR_START) {
  66. __cpuc_flush_dcache_page((void *)vaddr);
  67. #ifdef CONFIG_DEBUG_HIGHMEM
  68. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  69. set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
  70. local_flush_tlb_kernel_page(vaddr);
  71. #else
  72. (void) idx; /* to kill a warning */
  73. #endif
  74. }
  75. pagefault_enable();
  76. }
  77. EXPORT_SYMBOL(kunmap_atomic);
  78. void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
  79. {
  80. unsigned int idx;
  81. unsigned long vaddr;
  82. pagefault_disable();
  83. idx = type + KM_TYPE_NR * smp_processor_id();
  84. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  85. #ifdef CONFIG_DEBUG_HIGHMEM
  86. BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  87. #endif
  88. set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
  89. local_flush_tlb_kernel_page(vaddr);
  90. return (void *)vaddr;
  91. }
  92. struct page *kmap_atomic_to_page(const void *ptr)
  93. {
  94. unsigned long vaddr = (unsigned long)ptr;
  95. pte_t *pte;
  96. if (vaddr < FIXADDR_START)
  97. return virt_to_page(ptr);
  98. pte = TOP_PTE(vaddr);
  99. return pte_page(*pte);
  100. }