ioremap.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * arch/sh/mm/ioremap.c
  3. *
  4. * Re-map IO memory to kernel address space so that we can access it.
  5. * This is needed for high PCI addresses that aren't mapped in the
  6. * 640k-1MB IO memory area on PC's
  7. *
  8. * (C) Copyright 1995 1996 Linus Torvalds
  9. * (C) Copyright 2005, 2006 Paul Mundt
  10. *
  11. * This file is subject to the terms and conditions of the GNU General
  12. * Public License. See the file "COPYING" in the main directory of this
  13. * archive for more details.
  14. */
  15. #include <linux/vmalloc.h>
  16. #include <linux/module.h>
  17. #include <linux/mm.h>
  18. #include <linux/pci.h>
  19. #include <linux/io.h>
  20. #include <asm/page.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/addrspace.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlbflush.h>
  25. /*
  26. * Remap an arbitrary physical address space into the kernel virtual
  27. * address space. Needed when the kernel wants to access high addresses
  28. * directly.
  29. *
  30. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  31. * have to convert them into an offset in a page-aligned mapping, but the
  32. * caller shouldn't need to know that small detail.
  33. */
  34. void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
  35. unsigned long flags)
  36. {
  37. struct vm_struct * area;
  38. unsigned long offset, last_addr, addr, orig_addr;
  39. pgprot_t pgprot;
  40. /* Don't allow wraparound or zero size */
  41. last_addr = phys_addr + size - 1;
  42. if (!size || last_addr < phys_addr)
  43. return NULL;
  44. /*
  45. * Don't remap the low PCI/ISA area, it's always mapped..
  46. */
  47. if (phys_addr >= 0xA0000 && last_addr < 0x100000)
  48. return (void __iomem *)phys_to_virt(phys_addr);
  49. /*
  50. * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
  51. * mapped at the end of the address space (typically 0xfd000000)
  52. * in a non-translatable area, so mapping through page tables for
  53. * this area is not only pointless, but also fundamentally
  54. * broken. Just return the physical address instead.
  55. *
  56. * For boards that map a small PCI memory aperture somewhere in
  57. * P1/P2 space, ioremap() will already do the right thing,
  58. * and we'll never get this far.
  59. */
  60. if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
  61. return (void __iomem *)phys_addr;
  62. /*
  63. * Don't allow anybody to remap normal RAM that we're using..
  64. */
  65. if (phys_addr < virt_to_phys(high_memory))
  66. return NULL;
  67. /*
  68. * Mappings have to be page-aligned
  69. */
  70. offset = phys_addr & ~PAGE_MASK;
  71. phys_addr &= PAGE_MASK;
  72. size = PAGE_ALIGN(last_addr+1) - phys_addr;
  73. /*
  74. * Ok, go for it..
  75. */
  76. area = get_vm_area(size, VM_IOREMAP);
  77. if (!area)
  78. return NULL;
  79. area->phys_addr = phys_addr;
  80. orig_addr = addr = (unsigned long)area->addr;
  81. #ifdef CONFIG_32BIT
  82. /*
  83. * First try to remap through the PMB once a valid VMA has been
  84. * established. Smaller allocations (or the rest of the size
  85. * remaining after a PMB mapping due to the size not being
  86. * perfectly aligned on a PMB size boundary) are then mapped
  87. * through the UTLB using conventional page tables.
  88. *
  89. * PMB entries are all pre-faulted.
  90. */
  91. if (unlikely(size >= 0x1000000)) {
  92. unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
  93. if (likely(mapped)) {
  94. addr += mapped;
  95. phys_addr += mapped;
  96. size -= mapped;
  97. }
  98. }
  99. #endif
  100. pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
  101. if (likely(size))
  102. if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
  103. vunmap((void *)orig_addr);
  104. return NULL;
  105. }
  106. return (void __iomem *)(offset + (char *)orig_addr);
  107. }
  108. EXPORT_SYMBOL(__ioremap);
  109. void __iounmap(void __iomem *addr)
  110. {
  111. unsigned long vaddr = (unsigned long __force)addr;
  112. struct vm_struct *p;
  113. if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
  114. return;
  115. #ifdef CONFIG_32BIT
  116. /*
  117. * Purge any PMB entries that may have been established for this
  118. * mapping, then proceed with conventional VMA teardown.
  119. *
  120. * XXX: Note that due to the way that remove_vm_area() does
  121. * matching of the resultant VMA, we aren't able to fast-forward
  122. * the address past the PMB space until the end of the VMA where
  123. * the page tables reside. As such, unmap_vm_area() will be
  124. * forced to linearly scan over the area until it finds the page
  125. * tables where PTEs that need to be unmapped actually reside,
  126. * which is far from optimal. Perhaps we need to use a separate
  127. * VMA for the PMB mappings?
  128. * -- PFM.
  129. */
  130. pmb_unmap(vaddr);
  131. #endif
  132. p = remove_vm_area((void *)(vaddr & PAGE_MASK));
  133. if (!p) {
  134. printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
  135. return;
  136. }
  137. kfree(p);
  138. }
  139. EXPORT_SYMBOL(__iounmap);