ioremap.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. /*
  2. * Re-map IO memory to kernel address space so that we can access it.
  3. * This is needed for high PCI addresses that aren't mapped in the
  4. * 640k-1MB IO memory area on PC's
  5. *
  6. * (C) Copyright 1995 1996 Linus Torvalds
  7. */
  8. #include <linux/bootmem.h>
  9. #include <linux/init.h>
  10. #include <linux/io.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/e820.h>
  16. #include <asm/fixmap.h>
  17. #include <asm/pgtable.h>
  18. #include <asm/tlbflush.h>
  19. enum ioremap_mode {
  20. IOR_MODE_UNCACHED,
  21. IOR_MODE_CACHED,
  22. };
  23. #ifdef CONFIG_X86_64
  24. unsigned long __phys_addr(unsigned long x)
  25. {
  26. if (x >= __START_KERNEL_map)
  27. return x - __START_KERNEL_map + phys_base;
  28. return x - PAGE_OFFSET;
  29. }
  30. EXPORT_SYMBOL(__phys_addr);
  31. #endif
  32. int page_is_ram(unsigned long pagenr)
  33. {
  34. unsigned long addr, end;
  35. int i;
  36. for (i = 0; i < e820.nr_map; i++) {
  37. /*
  38. * Not usable memory:
  39. */
  40. if (e820.map[i].type != E820_RAM)
  41. continue;
  42. addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
  43. end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
  44. /*
  45. * Sanity check: Some BIOSen report areas as RAM that
  46. * are not. Notably the 640->1Mb area, which is the
  47. * PCI BIOS area.
  48. */
  49. if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
  50. end < (BIOS_END >> PAGE_SHIFT))
  51. continue;
  52. if ((pagenr >= addr) && (pagenr < end))
  53. return 1;
  54. }
  55. return 0;
  56. }
  57. /*
  58. * Fix up the linear direct mapping of the kernel to avoid cache attribute
  59. * conflicts.
  60. */
  61. static int ioremap_change_attr(unsigned long paddr, unsigned long size,
  62. enum ioremap_mode mode)
  63. {
  64. unsigned long vaddr = (unsigned long)__va(paddr);
  65. unsigned long nrpages = size >> PAGE_SHIFT;
  66. int err, level;
  67. /* No change for pages after the last mapping */
  68. if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
  69. return 0;
  70. /*
  71. * If there is no identity map for this address,
  72. * change_page_attr_addr is unnecessary
  73. */
  74. if (!lookup_address(vaddr, &level))
  75. return 0;
  76. switch (mode) {
  77. case IOR_MODE_UNCACHED:
  78. default:
  79. err = set_memory_uc(vaddr, nrpages);
  80. break;
  81. case IOR_MODE_CACHED:
  82. err = set_memory_wb(vaddr, nrpages);
  83. break;
  84. }
  85. return err;
  86. }
  87. /*
  88. * Remap an arbitrary physical address space into the kernel virtual
  89. * address space. Needed when the kernel wants to access high addresses
  90. * directly.
  91. *
  92. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  93. * have to convert them into an offset in a page-aligned mapping, but the
  94. * caller shouldn't need to know that small detail.
  95. */
  96. static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
  97. enum ioremap_mode mode)
  98. {
  99. void __iomem *addr;
  100. struct vm_struct *area;
  101. unsigned long offset, last_addr;
  102. pgprot_t prot;
  103. /* Don't allow wraparound or zero size */
  104. last_addr = phys_addr + size - 1;
  105. if (!size || last_addr < phys_addr)
  106. return NULL;
  107. /*
  108. * Don't remap the low PCI/ISA area, it's always mapped..
  109. */
  110. if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
  111. return (__force void __iomem *)phys_to_virt(phys_addr);
  112. /*
  113. * Don't allow anybody to remap normal RAM that we're using..
  114. */
  115. for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped &&
  116. (offset << PAGE_SHIFT) < last_addr; offset++) {
  117. if (page_is_ram(offset))
  118. return NULL;
  119. }
  120. switch (mode) {
  121. case IOR_MODE_UNCACHED:
  122. default:
  123. prot = PAGE_KERNEL_NOCACHE;
  124. break;
  125. case IOR_MODE_CACHED:
  126. prot = PAGE_KERNEL;
  127. break;
  128. }
  129. /*
  130. * Mappings have to be page-aligned
  131. */
  132. offset = phys_addr & ~PAGE_MASK;
  133. phys_addr &= PAGE_MASK;
  134. size = PAGE_ALIGN(last_addr+1) - phys_addr;
  135. /*
  136. * Ok, go for it..
  137. */
  138. area = get_vm_area(size, VM_IOREMAP);
  139. if (!area)
  140. return NULL;
  141. area->phys_addr = phys_addr;
  142. addr = (void __iomem *) area->addr;
  143. if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
  144. phys_addr, prot)) {
  145. remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
  146. return NULL;
  147. }
  148. if (ioremap_change_attr(phys_addr, size, mode) < 0) {
  149. vunmap(addr);
  150. return NULL;
  151. }
  152. return (void __iomem *) (offset + (char __iomem *)addr);
  153. }
  154. /**
  155. * ioremap_nocache - map bus memory into CPU space
  156. * @offset: bus address of the memory
  157. * @size: size of the resource to map
  158. *
  159. * ioremap_nocache performs a platform specific sequence of operations to
  160. * make bus memory CPU accessible via the readb/readw/readl/writeb/
  161. * writew/writel functions and the other mmio helpers. The returned
  162. * address is not guaranteed to be usable directly as a virtual
  163. * address.
  164. *
  165. * This version of ioremap ensures that the memory is marked uncachable
  166. * on the CPU as well as honouring existing caching rules from things like
  167. * the PCI bus. Note that there are other caches and buffers on many
  168. * busses. In particular driver authors should read up on PCI writes
  169. *
  170. * It's useful if some control registers are in such an area and
  171. * write combining or read caching is not desirable:
  172. *
  173. * Must be freed with iounmap.
  174. */
  175. void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
  176. {
  177. return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
  178. }
  179. EXPORT_SYMBOL(ioremap_nocache);
  180. void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
  181. {
  182. return __ioremap(phys_addr, size, IOR_MODE_CACHED);
  183. }
  184. EXPORT_SYMBOL(ioremap_cache);
  185. /**
  186. * iounmap - Free a IO remapping
  187. * @addr: virtual address from ioremap_*
  188. *
  189. * Caller must ensure there is only one unmapping for the same pointer.
  190. */
  191. void iounmap(volatile void __iomem *addr)
  192. {
  193. struct vm_struct *p, *o;
  194. if ((void __force *)addr <= high_memory)
  195. return;
  196. /*
  197. * __ioremap special-cases the PCI/ISA range by not instantiating a
  198. * vm_area and by simply returning an address into the kernel mapping
  199. * of ISA space. So handle that here.
  200. */
  201. if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
  202. addr < phys_to_virt(ISA_END_ADDRESS))
  203. return;
  204. addr = (volatile void __iomem *)
  205. (PAGE_MASK & (unsigned long __force)addr);
  206. /* Use the vm area unlocked, assuming the caller
  207. ensures there isn't another iounmap for the same address
  208. in parallel. Reuse of the virtual address is prevented by
  209. leaving it in the global lists until we're done with it.
  210. cpa takes care of the direct mappings. */
  211. read_lock(&vmlist_lock);
  212. for (p = vmlist; p; p = p->next) {
  213. if (p->addr == addr)
  214. break;
  215. }
  216. read_unlock(&vmlist_lock);
  217. if (!p) {
  218. printk(KERN_ERR "iounmap: bad address %p\n", addr);
  219. dump_stack();
  220. return;
  221. }
  222. /* Reset the direct mapping. Can block */
  223. ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED);
  224. /* Finally remove it */
  225. o = remove_vm_area((void *)addr);
  226. BUG_ON(p != o || o == NULL);
  227. kfree(p);
  228. }
  229. EXPORT_SYMBOL(iounmap);
  230. #ifdef CONFIG_X86_32
  231. int __initdata early_ioremap_debug;
  232. static int __init early_ioremap_debug_setup(char *str)
  233. {
  234. early_ioremap_debug = 1;
  235. return 0;
  236. }
  237. early_param("early_ioremap_debug", early_ioremap_debug_setup);
  238. static __initdata int after_paging_init;
  239. static __initdata unsigned long bm_pte[1024]
  240. __attribute__((aligned(PAGE_SIZE)));
  241. static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
  242. {
  243. return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
  244. }
  245. static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
  246. {
  247. return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
  248. }
  249. void __init early_ioremap_init(void)
  250. {
  251. unsigned long *pgd;
  252. if (early_ioremap_debug)
  253. printk(KERN_DEBUG "early_ioremap_init()\n");
  254. pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
  255. *pgd = __pa(bm_pte) | _PAGE_TABLE;
  256. memset(bm_pte, 0, sizeof(bm_pte));
  257. /*
  258. * The boot-ioremap range spans multiple pgds, for which
  259. * we are not prepared:
  260. */
  261. if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
  262. WARN_ON(1);
  263. printk(KERN_WARNING "pgd %p != %p\n",
  264. pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
  265. printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
  266. fix_to_virt(FIX_BTMAP_BEGIN));
  267. printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
  268. fix_to_virt(FIX_BTMAP_END));
  269. printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
  270. printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
  271. FIX_BTMAP_BEGIN);
  272. }
  273. }
  274. void __init early_ioremap_clear(void)
  275. {
  276. unsigned long *pgd;
  277. if (early_ioremap_debug)
  278. printk(KERN_DEBUG "early_ioremap_clear()\n");
  279. pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
  280. *pgd = 0;
  281. __flush_tlb_all();
  282. }
  283. void __init early_ioremap_reset(void)
  284. {
  285. enum fixed_addresses idx;
  286. unsigned long *pte, phys, addr;
  287. after_paging_init = 1;
  288. for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
  289. addr = fix_to_virt(idx);
  290. pte = early_ioremap_pte(addr);
  291. if (!*pte & _PAGE_PRESENT) {
  292. phys = *pte & PAGE_MASK;
  293. set_fixmap(idx, phys);
  294. }
  295. }
  296. }
  297. static void __init __early_set_fixmap(enum fixed_addresses idx,
  298. unsigned long phys, pgprot_t flags)
  299. {
  300. unsigned long *pte, addr = __fix_to_virt(idx);
  301. if (idx >= __end_of_fixed_addresses) {
  302. BUG();
  303. return;
  304. }
  305. pte = early_ioremap_pte(addr);
  306. if (pgprot_val(flags))
  307. *pte = (phys & PAGE_MASK) | pgprot_val(flags);
  308. else
  309. *pte = 0;
  310. __flush_tlb_one(addr);
  311. }
  312. static inline void __init early_set_fixmap(enum fixed_addresses idx,
  313. unsigned long phys)
  314. {
  315. if (after_paging_init)
  316. set_fixmap(idx, phys);
  317. else
  318. __early_set_fixmap(idx, phys, PAGE_KERNEL);
  319. }
  320. static inline void __init early_clear_fixmap(enum fixed_addresses idx)
  321. {
  322. if (after_paging_init)
  323. clear_fixmap(idx);
  324. else
  325. __early_set_fixmap(idx, 0, __pgprot(0));
  326. }
  327. int __initdata early_ioremap_nested;
  328. static int __init check_early_ioremap_leak(void)
  329. {
  330. if (!early_ioremap_nested)
  331. return 0;
  332. printk(KERN_WARNING
  333. "Debug warning: early ioremap leak of %d areas detected.\n",
  334. early_ioremap_nested);
  335. printk(KERN_WARNING
  336. "please boot with early_ioremap_debug and report the dmesg.\n");
  337. WARN_ON(1);
  338. return 1;
  339. }
  340. late_initcall(check_early_ioremap_leak);
  341. void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
  342. {
  343. unsigned long offset, last_addr;
  344. unsigned int nrpages, nesting;
  345. enum fixed_addresses idx0, idx;
  346. WARN_ON(system_state != SYSTEM_BOOTING);
  347. nesting = early_ioremap_nested;
  348. if (early_ioremap_debug) {
  349. printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
  350. phys_addr, size, nesting);
  351. dump_stack();
  352. }
  353. /* Don't allow wraparound or zero size */
  354. last_addr = phys_addr + size - 1;
  355. if (!size || last_addr < phys_addr) {
  356. WARN_ON(1);
  357. return NULL;
  358. }
  359. if (nesting >= FIX_BTMAPS_NESTING) {
  360. WARN_ON(1);
  361. return NULL;
  362. }
  363. early_ioremap_nested++;
  364. /*
  365. * Mappings have to be page-aligned
  366. */
  367. offset = phys_addr & ~PAGE_MASK;
  368. phys_addr &= PAGE_MASK;
  369. size = PAGE_ALIGN(last_addr) - phys_addr;
  370. /*
  371. * Mappings have to fit in the FIX_BTMAP area.
  372. */
  373. nrpages = size >> PAGE_SHIFT;
  374. if (nrpages > NR_FIX_BTMAPS) {
  375. WARN_ON(1);
  376. return NULL;
  377. }
  378. /*
  379. * Ok, go for it..
  380. */
  381. idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
  382. idx = idx0;
  383. while (nrpages > 0) {
  384. early_set_fixmap(idx, phys_addr);
  385. phys_addr += PAGE_SIZE;
  386. --idx;
  387. --nrpages;
  388. }
  389. if (early_ioremap_debug)
  390. printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
  391. return (void *) (offset + fix_to_virt(idx0));
  392. }
  393. void __init early_iounmap(void *addr, unsigned long size)
  394. {
  395. unsigned long virt_addr;
  396. unsigned long offset;
  397. unsigned int nrpages;
  398. enum fixed_addresses idx;
  399. unsigned int nesting;
  400. nesting = --early_ioremap_nested;
  401. WARN_ON(nesting < 0);
  402. if (early_ioremap_debug) {
  403. printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
  404. size, nesting);
  405. dump_stack();
  406. }
  407. virt_addr = (unsigned long)addr;
  408. if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
  409. WARN_ON(1);
  410. return;
  411. }
  412. offset = virt_addr & ~PAGE_MASK;
  413. nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
  414. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
  415. while (nrpages > 0) {
  416. early_clear_fixmap(idx);
  417. --idx;
  418. --nrpages;
  419. }
  420. }
  421. void __this_fixmap_does_not_exist(void)
  422. {
  423. WARN_ON(1);
  424. }
  425. #endif /* CONFIG_X86_32 */