ioremap.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * linux/arch/arm/mm/ioremap.c
  3. *
  4. * Re-map IO memory to kernel address space so that we can access it.
  5. *
  6. * (C) Copyright 1995 1996 Linus Torvalds
  7. *
  8. * Hacked for ARM by Phil Blundell <philb@gnu.org>
  9. * Hacked to allow all architectures to build, and various cleanups
  10. * by Russell King
  11. *
  12. * This allows a driver to remap an arbitrary region of bus memory into
  13. * virtual space. One should *only* use readl, writel, memcpy_toio and
  14. * so on with such remapped areas.
  15. *
  16. * Because the ARM only has a 32-bit address space we can't address the
  17. * whole of the (physical) PCI space at once. PCI huge-mode addressing
  18. * allows us to circumvent this restriction by splitting PCI space into
  19. * two 2GB chunks and mapping only one at a time into processor memory.
  20. * We use MMU protection domains to trap any attempt to access the bank
  21. * that is not currently mapped. (This isn't fully implemented yet.)
  22. */
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/mm.h>
  26. #include <linux/vmalloc.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/io.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/pgalloc.h>
  31. #include <asm/tlbflush.h>
  32. #include <asm/sizes.h>
  33. /*
  34. * Used by ioremap() and iounmap() code to mark (super)section-mapped
  35. * I/O regions in vm_struct->flags field.
  36. */
  37. #define VM_ARM_SECTION_MAPPING 0x80000000
  38. static inline void
  39. remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
  40. unsigned long phys_addr, pgprot_t pgprot)
  41. {
  42. unsigned long end;
  43. address &= ~PMD_MASK;
  44. end = address + size;
  45. if (end > PMD_SIZE)
  46. end = PMD_SIZE;
  47. BUG_ON(address >= end);
  48. do {
  49. if (!pte_none(*pte))
  50. goto bad;
  51. set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
  52. address += PAGE_SIZE;
  53. phys_addr += PAGE_SIZE;
  54. pte++;
  55. } while (address && (address < end));
  56. return;
  57. bad:
  58. printk("remap_area_pte: page already exists\n");
  59. BUG();
  60. }
  61. static inline int
  62. remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
  63. unsigned long phys_addr, unsigned long flags)
  64. {
  65. unsigned long end;
  66. pgprot_t pgprot;
  67. address &= ~PGDIR_MASK;
  68. end = address + size;
  69. if (end > PGDIR_SIZE)
  70. end = PGDIR_SIZE;
  71. phys_addr -= address;
  72. BUG_ON(address >= end);
  73. pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
  74. do {
  75. pte_t * pte = pte_alloc_kernel(pmd, address);
  76. if (!pte)
  77. return -ENOMEM;
  78. remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
  79. address = (address + PMD_SIZE) & PMD_MASK;
  80. pmd++;
  81. } while (address && (address < end));
  82. return 0;
  83. }
  84. static int
  85. remap_area_pages(unsigned long start, unsigned long pfn,
  86. unsigned long size, unsigned long flags)
  87. {
  88. unsigned long address = start;
  89. unsigned long end = start + size;
  90. unsigned long phys_addr = __pfn_to_phys(pfn);
  91. int err = 0;
  92. pgd_t * dir;
  93. phys_addr -= address;
  94. dir = pgd_offset(&init_mm, address);
  95. BUG_ON(address >= end);
  96. do {
  97. pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
  98. if (!pmd) {
  99. err = -ENOMEM;
  100. break;
  101. }
  102. if (remap_area_pmd(pmd, address, end - address,
  103. phys_addr + address, flags)) {
  104. err = -ENOMEM;
  105. break;
  106. }
  107. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  108. dir++;
  109. } while (address && (address < end));
  110. return err;
  111. }
  112. void __check_kvm_seq(struct mm_struct *mm)
  113. {
  114. unsigned int seq;
  115. do {
  116. seq = init_mm.context.kvm_seq;
  117. memcpy(pgd_offset(mm, VMALLOC_START),
  118. pgd_offset_k(VMALLOC_START),
  119. sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
  120. pgd_index(VMALLOC_START)));
  121. mm->context.kvm_seq = seq;
  122. } while (seq != init_mm.context.kvm_seq);
  123. }
  124. #ifndef CONFIG_SMP
  125. /*
  126. * Section support is unsafe on SMP - If you iounmap and ioremap a region,
  127. * the other CPUs will not see this change until their next context switch.
  128. * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
  129. * which requires the new ioremap'd region to be referenced, the CPU will
  130. * reference the _old_ region.
  131. *
  132. * Note that get_vm_area() allocates a guard 4K page, so we need to mask
  133. * the size back to 1MB aligned or we will overflow in the loop below.
  134. */
  135. static void unmap_area_sections(unsigned long virt, unsigned long size)
  136. {
  137. unsigned long addr = virt, end = virt + (size & ~SZ_1M);
  138. pgd_t *pgd;
  139. flush_cache_vunmap(addr, end);
  140. pgd = pgd_offset_k(addr);
  141. do {
  142. pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
  143. pmd = *pmdp;
  144. if (!pmd_none(pmd)) {
  145. /*
  146. * Clear the PMD from the page table, and
  147. * increment the kvm sequence so others
  148. * notice this change.
  149. *
  150. * Note: this is still racy on SMP machines.
  151. */
  152. pmd_clear(pmdp);
  153. init_mm.context.kvm_seq++;
  154. /*
  155. * Free the page table, if there was one.
  156. */
  157. if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
  158. pte_free_kernel(pmd_page_vaddr(pmd));
  159. }
  160. addr += PGDIR_SIZE;
  161. pgd++;
  162. } while (addr < end);
  163. /*
  164. * Ensure that the active_mm is up to date - we want to
  165. * catch any use-after-iounmap cases.
  166. */
  167. if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
  168. __check_kvm_seq(current->active_mm);
  169. flush_tlb_kernel_range(virt, end);
  170. }
  171. static int
  172. remap_area_sections(unsigned long virt, unsigned long pfn,
  173. unsigned long size, unsigned long flags)
  174. {
  175. unsigned long prot, addr = virt, end = virt + size;
  176. pgd_t *pgd;
  177. /*
  178. * Remove and free any PTE-based mapping, and
  179. * sync the current kernel mapping.
  180. */
  181. unmap_area_sections(virt, size);
  182. prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
  183. (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
  184. /*
  185. * ARMv6 and above need XN set to prevent speculative prefetches
  186. * hitting IO.
  187. */
  188. if (cpu_architecture() >= CPU_ARCH_ARMv6)
  189. prot |= PMD_SECT_XN;
  190. pgd = pgd_offset_k(addr);
  191. do {
  192. pmd_t *pmd = pmd_offset(pgd, addr);
  193. pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
  194. pfn += SZ_1M >> PAGE_SHIFT;
  195. pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
  196. pfn += SZ_1M >> PAGE_SHIFT;
  197. flush_pmd_entry(pmd);
  198. addr += PGDIR_SIZE;
  199. pgd++;
  200. } while (addr < end);
  201. return 0;
  202. }
  203. static int
  204. remap_area_supersections(unsigned long virt, unsigned long pfn,
  205. unsigned long size, unsigned long flags)
  206. {
  207. unsigned long prot, addr = virt, end = virt + size;
  208. pgd_t *pgd;
  209. /*
  210. * Remove and free any PTE-based mapping, and
  211. * sync the current kernel mapping.
  212. */
  213. unmap_area_sections(virt, size);
  214. prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
  215. PMD_DOMAIN(DOMAIN_IO) |
  216. (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
  217. /*
  218. * ARMv6 and above need XN set to prevent speculative prefetches
  219. * hitting IO.
  220. */
  221. if (cpu_architecture() >= CPU_ARCH_ARMv6)
  222. prot |= PMD_SECT_XN;
  223. pgd = pgd_offset_k(virt);
  224. do {
  225. unsigned long super_pmd_val, i;
  226. super_pmd_val = __pfn_to_phys(pfn) | prot;
  227. super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
  228. for (i = 0; i < 8; i++) {
  229. pmd_t *pmd = pmd_offset(pgd, addr);
  230. pmd[0] = __pmd(super_pmd_val);
  231. pmd[1] = __pmd(super_pmd_val);
  232. flush_pmd_entry(pmd);
  233. addr += PGDIR_SIZE;
  234. pgd++;
  235. }
  236. pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
  237. } while (addr < end);
  238. return 0;
  239. }
  240. #endif
  241. /*
  242. * Remap an arbitrary physical address space into the kernel virtual
  243. * address space. Needed when the kernel wants to access high addresses
  244. * directly.
  245. *
  246. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  247. * have to convert them into an offset in a page-aligned mapping, but the
  248. * caller shouldn't need to know that small detail.
  249. *
  250. * 'flags' are the extra L_PTE_ flags that you want to specify for this
  251. * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
  252. */
  253. void __iomem *
  254. __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
  255. unsigned long flags)
  256. {
  257. int err;
  258. unsigned long addr;
  259. struct vm_struct * area;
  260. /*
  261. * High mappings must be supersection aligned
  262. */
  263. if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
  264. return NULL;
  265. area = get_vm_area(size, VM_IOREMAP);
  266. if (!area)
  267. return NULL;
  268. addr = (unsigned long)area->addr;
  269. #ifndef CONFIG_SMP
  270. if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
  271. cpu_is_xsc3()) &&
  272. !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
  273. area->flags |= VM_ARM_SECTION_MAPPING;
  274. err = remap_area_supersections(addr, pfn, size, flags);
  275. } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
  276. area->flags |= VM_ARM_SECTION_MAPPING;
  277. err = remap_area_sections(addr, pfn, size, flags);
  278. } else
  279. #endif
  280. err = remap_area_pages(addr, pfn, size, flags);
  281. if (err) {
  282. vunmap((void *)addr);
  283. return NULL;
  284. }
  285. flush_cache_vmap(addr, addr + size);
  286. return (void __iomem *) (offset + addr);
  287. }
  288. EXPORT_SYMBOL(__ioremap_pfn);
  289. void __iomem *
  290. __ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
  291. {
  292. unsigned long last_addr;
  293. unsigned long offset = phys_addr & ~PAGE_MASK;
  294. unsigned long pfn = __phys_to_pfn(phys_addr);
  295. /*
  296. * Don't allow wraparound or zero size
  297. */
  298. last_addr = phys_addr + size - 1;
  299. if (!size || last_addr < phys_addr)
  300. return NULL;
  301. /*
  302. * Page align the mapping size
  303. */
  304. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  305. return __ioremap_pfn(pfn, offset, size, flags);
  306. }
  307. EXPORT_SYMBOL(__ioremap);
  308. void __iounmap(volatile void __iomem *addr)
  309. {
  310. #ifndef CONFIG_SMP
  311. struct vm_struct **p, *tmp;
  312. #endif
  313. unsigned int section_mapping = 0;
  314. addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
  315. #ifndef CONFIG_SMP
  316. /*
  317. * If this is a section based mapping we need to handle it
  318. * specially as the VM subysystem does not know how to handle
  319. * such a beast. We need the lock here b/c we need to clear
  320. * all the mappings before the area can be reclaimed
  321. * by someone else.
  322. */
  323. write_lock(&vmlist_lock);
  324. for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
  325. if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
  326. if (tmp->flags & VM_ARM_SECTION_MAPPING) {
  327. *p = tmp->next;
  328. unmap_area_sections((unsigned long)tmp->addr,
  329. tmp->size);
  330. kfree(tmp);
  331. section_mapping = 1;
  332. }
  333. break;
  334. }
  335. }
  336. write_unlock(&vmlist_lock);
  337. #endif
  338. if (!section_mapping)
  339. vunmap((void __force *)addr);
  340. }
  341. EXPORT_SYMBOL(__iounmap);