ioremap.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /*
  2. * arch/sh/mm/ioremap.c
  3. *
  4. * Re-map IO memory to kernel address space so that we can access it.
  5. * This is needed for high PCI addresses that aren't mapped in the
  6. * 640k-1MB IO memory area on PC's
  7. *
  8. * (C) Copyright 1995 1996 Linus Torvalds
  9. * (C) Copyright 2005, 2006 Paul Mundt
  10. *
  11. * This file is subject to the terms and conditions of the GNU General
  12. * Public License. See the file "COPYING" in the main directory of this
  13. * archive for more details.
  14. */
  15. #include <linux/vmalloc.h>
  16. #include <linux/module.h>
  17. #include <linux/mm.h>
  18. #include <linux/pci.h>
  19. #include <asm/io.h>
  20. #include <asm/page.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/addrspace.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlbflush.h>
  25. static inline void remap_area_pte(pte_t * pte, unsigned long address,
  26. unsigned long size, unsigned long phys_addr, unsigned long flags)
  27. {
  28. unsigned long end;
  29. unsigned long pfn;
  30. pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
  31. _PAGE_DIRTY | _PAGE_ACCESSED |
  32. _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
  33. address &= ~PMD_MASK;
  34. end = address + size;
  35. if (end > PMD_SIZE)
  36. end = PMD_SIZE;
  37. if (address >= end)
  38. BUG();
  39. pfn = phys_addr >> PAGE_SHIFT;
  40. do {
  41. if (!pte_none(*pte)) {
  42. printk("remap_area_pte: page already exists\n");
  43. BUG();
  44. }
  45. set_pte(pte, pfn_pte(pfn, pgprot));
  46. address += PAGE_SIZE;
  47. pfn++;
  48. pte++;
  49. } while (address && (address < end));
  50. }
  51. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
  52. unsigned long size, unsigned long phys_addr, unsigned long flags)
  53. {
  54. unsigned long end;
  55. address &= ~PGDIR_MASK;
  56. end = address + size;
  57. if (end > PGDIR_SIZE)
  58. end = PGDIR_SIZE;
  59. phys_addr -= address;
  60. if (address >= end)
  61. BUG();
  62. do {
  63. pte_t * pte = pte_alloc_kernel(pmd, address);
  64. if (!pte)
  65. return -ENOMEM;
  66. remap_area_pte(pte, address, end - address, address + phys_addr, flags);
  67. address = (address + PMD_SIZE) & PMD_MASK;
  68. pmd++;
  69. } while (address && (address < end));
  70. return 0;
  71. }
  72. int remap_area_pages(unsigned long address, unsigned long phys_addr,
  73. unsigned long size, unsigned long flags)
  74. {
  75. int error;
  76. pgd_t * dir;
  77. unsigned long end = address + size;
  78. phys_addr -= address;
  79. dir = pgd_offset_k(address);
  80. flush_cache_all();
  81. if (address >= end)
  82. BUG();
  83. do {
  84. pud_t *pud;
  85. pmd_t *pmd;
  86. error = -ENOMEM;
  87. pud = pud_alloc(&init_mm, dir, address);
  88. if (!pud)
  89. break;
  90. pmd = pmd_alloc(&init_mm, pud, address);
  91. if (!pmd)
  92. break;
  93. if (remap_area_pmd(pmd, address, end - address,
  94. phys_addr + address, flags))
  95. break;
  96. error = 0;
  97. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  98. dir++;
  99. } while (address && (address < end));
  100. flush_tlb_all();
  101. return error;
  102. }
  103. /*
  104. * Remap an arbitrary physical address space into the kernel virtual
  105. * address space. Needed when the kernel wants to access high addresses
  106. * directly.
  107. *
  108. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  109. * have to convert them into an offset in a page-aligned mapping, but the
  110. * caller shouldn't need to know that small detail.
  111. */
  112. void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
  113. unsigned long flags)
  114. {
  115. struct vm_struct * area;
  116. unsigned long offset, last_addr, addr, orig_addr;
  117. /* Don't allow wraparound or zero size */
  118. last_addr = phys_addr + size - 1;
  119. if (!size || last_addr < phys_addr)
  120. return NULL;
  121. /*
  122. * Don't remap the low PCI/ISA area, it's always mapped..
  123. */
  124. if (phys_addr >= 0xA0000 && last_addr < 0x100000)
  125. return (void __iomem *)phys_to_virt(phys_addr);
  126. /*
  127. * If we're on an SH7751 or SH7780 PCI controller, PCI memory is
  128. * mapped at the end of the address space (typically 0xfd000000)
  129. * in a non-translatable area, so mapping through page tables for
  130. * this area is not only pointless, but also fundamentally
  131. * broken. Just return the physical address instead.
  132. *
  133. * For boards that map a small PCI memory aperture somewhere in
  134. * P1/P2 space, ioremap() will already do the right thing,
  135. * and we'll never get this far.
  136. */
  137. if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
  138. return (void __iomem *)phys_addr;
  139. /*
  140. * Don't allow anybody to remap normal RAM that we're using..
  141. */
  142. if (phys_addr < virt_to_phys(high_memory))
  143. return NULL;
  144. /*
  145. * Mappings have to be page-aligned
  146. */
  147. offset = phys_addr & ~PAGE_MASK;
  148. phys_addr &= PAGE_MASK;
  149. size = PAGE_ALIGN(last_addr+1) - phys_addr;
  150. /*
  151. * Ok, go for it..
  152. */
  153. area = get_vm_area(size, VM_IOREMAP);
  154. if (!area)
  155. return NULL;
  156. area->phys_addr = phys_addr;
  157. orig_addr = addr = (unsigned long)area->addr;
  158. #ifdef CONFIG_32BIT
  159. /*
  160. * First try to remap through the PMB once a valid VMA has been
  161. * established. Smaller allocations (or the rest of the size
  162. * remaining after a PMB mapping due to the size not being
  163. * perfectly aligned on a PMB size boundary) are then mapped
  164. * through the UTLB using conventional page tables.
  165. *
  166. * PMB entries are all pre-faulted.
  167. */
  168. if (unlikely(size >= 0x1000000)) {
  169. unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
  170. if (likely(mapped)) {
  171. addr += mapped;
  172. phys_addr += mapped;
  173. size -= mapped;
  174. }
  175. }
  176. #endif
  177. if (likely(size))
  178. if (remap_area_pages(addr, phys_addr, size, flags)) {
  179. vunmap((void *)orig_addr);
  180. return NULL;
  181. }
  182. return (void __iomem *)(offset + (char *)orig_addr);
  183. }
  184. EXPORT_SYMBOL(__ioremap);
  185. void __iounmap(void __iomem *addr)
  186. {
  187. unsigned long vaddr = (unsigned long __force)addr;
  188. struct vm_struct *p;
  189. if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr))
  190. return;
  191. #ifdef CONFIG_32BIT
  192. /*
  193. * Purge any PMB entries that may have been established for this
  194. * mapping, then proceed with conventional VMA teardown.
  195. *
  196. * XXX: Note that due to the way that remove_vm_area() does
  197. * matching of the resultant VMA, we aren't able to fast-forward
  198. * the address past the PMB space until the end of the VMA where
  199. * the page tables reside. As such, unmap_vm_area() will be
  200. * forced to linearly scan over the area until it finds the page
  201. * tables where PTEs that need to be unmapped actually reside,
  202. * which is far from optimal. Perhaps we need to use a separate
  203. * VMA for the PMB mappings?
  204. * -- PFM.
  205. */
  206. pmb_unmap(vaddr);
  207. #endif
  208. p = remove_vm_area((void *)(vaddr & PAGE_MASK));
  209. if (!p) {
  210. printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
  211. return;
  212. }
  213. kfree(p);
  214. }
  215. EXPORT_SYMBOL(__iounmap);