pgtable_64.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /*
  2. * This file contains ioremap and related functions for 64-bit machines.
  3. *
  4. * Derived from arch/ppc64/mm/init.c
  5. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6. *
  7. * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
  8. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  9. * Copyright (C) 1996 Paul Mackerras
  10. * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  11. *
  12. * Derived from "arch/i386/mm/init.c"
  13. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  14. *
  15. * Dave Engebretsen <engebret@us.ibm.com>
  16. * Rework for PPC64 port.
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. */
  24. #include <linux/signal.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/errno.h>
  28. #include <linux/string.h>
  29. #include <linux/types.h>
  30. #include <linux/mman.h>
  31. #include <linux/mm.h>
  32. #include <linux/swap.h>
  33. #include <linux/stddef.h>
  34. #include <linux/vmalloc.h>
  35. #include <linux/init.h>
  36. #include <linux/delay.h>
  37. #include <linux/bootmem.h>
  38. #include <linux/highmem.h>
  39. #include <linux/idr.h>
  40. #include <linux/nodemask.h>
  41. #include <linux/module.h>
  42. #include <asm/pgalloc.h>
  43. #include <asm/page.h>
  44. #include <asm/prom.h>
  45. #include <asm/lmb.h>
  46. #include <asm/rtas.h>
  47. #include <asm/io.h>
  48. #include <asm/mmu_context.h>
  49. #include <asm/pgtable.h>
  50. #include <asm/mmu.h>
  51. #include <asm/uaccess.h>
  52. #include <asm/smp.h>
  53. #include <asm/machdep.h>
  54. #include <asm/tlb.h>
  55. #include <asm/eeh.h>
  56. #include <asm/processor.h>
  57. #include <asm/mmzone.h>
  58. #include <asm/cputable.h>
  59. #include <asm/sections.h>
  60. #include <asm/system.h>
  61. #include <asm/iommu.h>
  62. #include <asm/abs_addr.h>
  63. #include <asm/vdso.h>
  64. #include <asm/firmware.h>
  65. #include "mmu_decl.h"
  66. unsigned long ioremap_bot = IMALLOC_BASE;
  67. static unsigned long phbs_io_bot = PHBS_IO_BASE;
  68. /*
  69. * map_io_page currently only called by __ioremap
  70. * map_io_page adds an entry to the ioremap page table
  71. * and adds an entry to the HPT, possibly bolting it
  72. */
  73. static int map_io_page(unsigned long ea, unsigned long pa, int flags)
  74. {
  75. pgd_t *pgdp;
  76. pud_t *pudp;
  77. pmd_t *pmdp;
  78. pte_t *ptep;
  79. if (mem_init_done) {
  80. pgdp = pgd_offset_k(ea);
  81. pudp = pud_alloc(&init_mm, pgdp, ea);
  82. if (!pudp)
  83. return -ENOMEM;
  84. pmdp = pmd_alloc(&init_mm, pudp, ea);
  85. if (!pmdp)
  86. return -ENOMEM;
  87. ptep = pte_alloc_kernel(pmdp, ea);
  88. if (!ptep)
  89. return -ENOMEM;
  90. set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
  91. __pgprot(flags)));
  92. } else {
  93. /*
  94. * If the mm subsystem is not fully up, we cannot create a
  95. * linux page table entry for this mapping. Simply bolt an
  96. * entry in the hardware page table.
  97. *
  98. */
  99. if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
  100. mmu_io_psize)) {
  101. printk(KERN_ERR "Failed to do bolted mapping IO "
  102. "memory at %016lx !\n", pa);
  103. return -ENOMEM;
  104. }
  105. }
  106. return 0;
  107. }
  108. static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
  109. unsigned long ea, unsigned long size,
  110. unsigned long flags)
  111. {
  112. unsigned long i;
  113. if ((flags & _PAGE_PRESENT) == 0)
  114. flags |= pgprot_val(PAGE_KERNEL);
  115. for (i = 0; i < size; i += PAGE_SIZE)
  116. if (map_io_page(ea+i, pa+i, flags))
  117. return NULL;
  118. return (void __iomem *) (ea + (addr & ~PAGE_MASK));
  119. }
  120. void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
  121. unsigned long flags)
  122. {
  123. unsigned long pa, ea;
  124. void __iomem *ret;
  125. /*
  126. * Choose an address to map it to.
  127. * Once the imalloc system is running, we use it.
  128. * Before that, we map using addresses going
  129. * up from ioremap_bot. imalloc will use
  130. * the addresses from ioremap_bot through
  131. * IMALLOC_END
  132. *
  133. */
  134. pa = addr & PAGE_MASK;
  135. size = PAGE_ALIGN(addr + size) - pa;
  136. if ((size == 0) || (pa == 0))
  137. return NULL;
  138. if (mem_init_done) {
  139. struct vm_struct *area;
  140. area = im_get_free_area(size);
  141. if (area == NULL)
  142. return NULL;
  143. ea = (unsigned long)(area->addr);
  144. ret = __ioremap_com(addr, pa, ea, size, flags);
  145. if (!ret)
  146. im_free(area->addr);
  147. } else {
  148. ea = ioremap_bot;
  149. ret = __ioremap_com(addr, pa, ea, size, flags);
  150. if (ret)
  151. ioremap_bot += size;
  152. }
  153. return ret;
  154. }
  155. void __iomem * ioremap(phys_addr_t addr, unsigned long size)
  156. {
  157. unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
  158. if (ppc_md.ioremap)
  159. return ppc_md.ioremap(addr, size, flags);
  160. return __ioremap(addr, size, flags);
  161. }
  162. void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
  163. unsigned long flags)
  164. {
  165. if (ppc_md.ioremap)
  166. return ppc_md.ioremap(addr, size, flags);
  167. return __ioremap(addr, size, flags);
  168. }
  169. #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
  170. int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
  171. unsigned long size, unsigned long flags)
  172. {
  173. struct vm_struct *area;
  174. void __iomem *ret;
  175. /* For now, require page-aligned values for pa, ea, and size */
  176. if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
  177. !IS_PAGE_ALIGNED(size)) {
  178. printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
  179. return 1;
  180. }
  181. if (!mem_init_done) {
  182. /* Two things to consider in this case:
  183. * 1) No records will be kept (imalloc, etc) that the region
  184. * has been remapped
  185. * 2) It won't be easy to iounmap() the region later (because
  186. * of 1)
  187. */
  188. ;
  189. } else {
  190. area = im_get_area(ea, size,
  191. IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
  192. if (area == NULL) {
  193. /* Expected when PHB-dlpar is in play */
  194. return 1;
  195. }
  196. if (ea != (unsigned long) area->addr) {
  197. printk(KERN_ERR "unexpected addr return from "
  198. "im_get_area\n");
  199. return 1;
  200. }
  201. }
  202. ret = __ioremap_com(pa, pa, ea, size, flags);
  203. if (ret == NULL) {
  204. printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
  205. return 1;
  206. }
  207. if (ret != (void *) ea) {
  208. printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
  209. return 1;
  210. }
  211. return 0;
  212. }
  213. /*
  214. * Unmap an IO region and remove it from imalloc'd list.
  215. * Access to IO memory should be serialized by driver.
  216. * This code is modeled after vmalloc code - unmap_vm_area()
  217. *
  218. * XXX what about calls before mem_init_done (ie python_countermeasures())
  219. */
  220. void __iounmap(volatile void __iomem *token)
  221. {
  222. void *addr;
  223. if (!mem_init_done)
  224. return;
  225. addr = (void *) ((unsigned long __force) token & PAGE_MASK);
  226. im_free(addr);
  227. }
  228. void iounmap(volatile void __iomem *token)
  229. {
  230. if (ppc_md.iounmap)
  231. ppc_md.iounmap(token);
  232. else
  233. __iounmap(token);
  234. }
  235. static int iounmap_subset_regions(unsigned long addr, unsigned long size)
  236. {
  237. struct vm_struct *area;
  238. /* Check whether subsets of this region exist */
  239. area = im_get_area(addr, size, IM_REGION_SUPERSET);
  240. if (area == NULL)
  241. return 1;
  242. while (area) {
  243. iounmap((void __iomem *) area->addr);
  244. area = im_get_area(addr, size,
  245. IM_REGION_SUPERSET);
  246. }
  247. return 0;
  248. }
  249. int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
  250. {
  251. struct vm_struct *area;
  252. unsigned long addr;
  253. int rc;
  254. addr = (unsigned long __force) start & PAGE_MASK;
  255. /* Verify that the region either exists or is a subset of an existing
  256. * region. In the latter case, split the parent region to create
  257. * the exact region
  258. */
  259. area = im_get_area(addr, size,
  260. IM_REGION_EXISTS | IM_REGION_SUBSET);
  261. if (area == NULL) {
  262. /* Determine whether subset regions exist. If so, unmap */
  263. rc = iounmap_subset_regions(addr, size);
  264. if (rc) {
  265. printk(KERN_ERR
  266. "%s() cannot unmap nonexistent range 0x%lx\n",
  267. __FUNCTION__, addr);
  268. return 1;
  269. }
  270. } else {
  271. iounmap((void __iomem *) area->addr);
  272. }
  273. /*
  274. * FIXME! This can't be right:
  275. iounmap(area->addr);
  276. * Maybe it should be "iounmap(area);"
  277. */
  278. return 0;
  279. }
  280. EXPORT_SYMBOL(ioremap);
  281. EXPORT_SYMBOL(ioremap_flags);
  282. EXPORT_SYMBOL(__ioremap);
  283. EXPORT_SYMBOL(iounmap);
  284. EXPORT_SYMBOL(__iounmap);
  285. void __iomem * reserve_phb_iospace(unsigned long size)
  286. {
  287. void __iomem *virt_addr;
  288. if (phbs_io_bot >= IMALLOC_BASE)
  289. panic("reserve_phb_iospace(): phb io space overflow\n");
  290. virt_addr = (void __iomem *) phbs_io_bot;
  291. phbs_io_bot += size;
  292. return virt_addr;
  293. }