iomap_32.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. /*
  2. * Copyright © 2008 Ingo Molnar
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along
  15. * with this program; if not, write to the Free Software Foundation, Inc.,
  16. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  17. */
  18. #include <asm/iomap.h>
  19. #include <asm/pat.h>
  20. #include <linux/module.h>
  21. #ifdef CONFIG_X86_PAE
  22. static int
  23. is_io_mapping_possible(resource_size_t base, unsigned long size)
  24. {
  25. return 1;
  26. }
  27. #else
  28. static int
  29. is_io_mapping_possible(resource_size_t base, unsigned long size)
  30. {
  31. /* There is no way to map greater than 1 << 32 address without PAE */
  32. if (base + size > 0x100000000ULL)
  33. return 0;
  34. return 1;
  35. }
  36. #endif
  37. int
  38. reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot)
  39. {
  40. unsigned long ret_flag;
  41. if (!is_io_mapping_possible(base, size))
  42. goto out_err;
  43. if (!pat_enabled) {
  44. *prot = pgprot_noncached(PAGE_KERNEL);
  45. return 0;
  46. }
  47. if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag))
  48. goto out_err;
  49. if (ret_flag == _PAGE_CACHE_WB)
  50. goto out_free;
  51. if (kernel_map_sync_memtype(base, size, ret_flag))
  52. goto out_free;
  53. *prot = __pgprot(__PAGE_KERNEL | ret_flag);
  54. return 0;
  55. out_free:
  56. free_memtype(base, base + size);
  57. out_err:
  58. return -EINVAL;
  59. }
  60. void
  61. free_io_memtype(u64 base, unsigned long size)
  62. {
  63. if (pat_enabled)
  64. free_memtype(base, base + size);
  65. }
  66. /* Map 'pfn' using fixed map 'type' and protections 'prot'
  67. */
  68. void *
  69. iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
  70. {
  71. enum fixed_addresses idx;
  72. unsigned long vaddr;
  73. pagefault_disable();
  74. /*
  75. * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
  76. * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
  77. * MTRR is UC or WC. UC_MINUS gets the real intention, of the
  78. * user, which is "WC if the MTRR is WC, UC if you can't do that."
  79. */
  80. if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
  81. prot = PAGE_KERNEL_UC_MINUS;
  82. idx = type + KM_TYPE_NR*smp_processor_id();
  83. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  84. set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
  85. arch_flush_lazy_mmu_mode();
  86. return (void*) vaddr;
  87. }
  88. EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
  89. void
  90. iounmap_atomic(void *kvaddr, enum km_type type)
  91. {
  92. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  93. enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
  94. /*
  95. * Force other mappings to Oops if they'll try to access this pte
  96. * without first remap it. Keeping stale mappings around is a bad idea
  97. * also, in case the page changes cacheability attributes or becomes
  98. * a protected page in a hypervisor.
  99. */
  100. if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
  101. kpte_clear_flush(kmap_pte-idx, vaddr);
  102. arch_flush_lazy_mmu_mode();
  103. pagefault_enable();
  104. }
  105. EXPORT_SYMBOL_GPL(iounmap_atomic);