ioremap.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * arch/x86_64/mm/ioremap.c
  3. *
  4. * Re-map IO memory to kernel address space so that we can access it.
  5. * This is needed for high PCI addresses that aren't mapped in the
  6. * 640k-1MB IO memory area on PC's
  7. *
  8. * (C) Copyright 1995 1996 Linus Torvalds
  9. */
  10. #include <linux/vmalloc.h>
  11. #include <linux/init.h>
  12. #include <linux/slab.h>
  13. #include <asm/io.h>
  14. #include <asm/pgalloc.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/proto.h>
  19. #define ISA_START_ADDRESS 0xa0000
  20. #define ISA_END_ADDRESS 0x100000
  21. static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
  22. unsigned long phys_addr, unsigned long flags)
  23. {
  24. unsigned long end;
  25. unsigned long pfn;
  26. address &= ~PMD_MASK;
  27. end = address + size;
  28. if (end > PMD_SIZE)
  29. end = PMD_SIZE;
  30. if (address >= end)
  31. BUG();
  32. pfn = phys_addr >> PAGE_SHIFT;
  33. do {
  34. if (!pte_none(*pte)) {
  35. printk("remap_area_pte: page already exists\n");
  36. BUG();
  37. }
  38. set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
  39. _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
  40. address += PAGE_SIZE;
  41. pfn++;
  42. pte++;
  43. } while (address && (address < end));
  44. }
  45. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
  46. unsigned long phys_addr, unsigned long flags)
  47. {
  48. unsigned long end;
  49. address &= ~PUD_MASK;
  50. end = address + size;
  51. if (end > PUD_SIZE)
  52. end = PUD_SIZE;
  53. phys_addr -= address;
  54. if (address >= end)
  55. BUG();
  56. do {
  57. pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
  58. if (!pte)
  59. return -ENOMEM;
  60. remap_area_pte(pte, address, end - address, address + phys_addr, flags);
  61. address = (address + PMD_SIZE) & PMD_MASK;
  62. pmd++;
  63. } while (address && (address < end));
  64. return 0;
  65. }
  66. static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
  67. unsigned long phys_addr, unsigned long flags)
  68. {
  69. unsigned long end;
  70. address &= ~PGDIR_MASK;
  71. end = address + size;
  72. if (end > PGDIR_SIZE)
  73. end = PGDIR_SIZE;
  74. phys_addr -= address;
  75. if (address >= end)
  76. BUG();
  77. do {
  78. pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
  79. if (!pmd)
  80. return -ENOMEM;
  81. remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
  82. address = (address + PUD_SIZE) & PUD_MASK;
  83. pud++;
  84. } while (address && (address < end));
  85. return 0;
  86. }
  87. static int remap_area_pages(unsigned long address, unsigned long phys_addr,
  88. unsigned long size, unsigned long flags)
  89. {
  90. int error;
  91. pgd_t *pgd;
  92. unsigned long end = address + size;
  93. phys_addr -= address;
  94. pgd = pgd_offset_k(address);
  95. flush_cache_all();
  96. if (address >= end)
  97. BUG();
  98. spin_lock(&init_mm.page_table_lock);
  99. do {
  100. pud_t *pud;
  101. pud = pud_alloc(&init_mm, pgd, address);
  102. error = -ENOMEM;
  103. if (!pud)
  104. break;
  105. if (remap_area_pud(pud, address, end - address,
  106. phys_addr + address, flags))
  107. break;
  108. error = 0;
  109. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  110. pgd++;
  111. } while (address && (address < end));
  112. spin_unlock(&init_mm.page_table_lock);
  113. flush_tlb_all();
  114. return error;
  115. }
  116. /*
  117. * Fix up the linear direct mapping of the kernel to avoid cache attribute
  118. * conflicts.
  119. */
  120. static int
  121. ioremap_change_attr(unsigned long phys_addr, unsigned long size,
  122. unsigned long flags)
  123. {
  124. int err = 0;
  125. if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
  126. unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  127. unsigned long vaddr = (unsigned long) __va(phys_addr);
  128. /*
  129. * Must use a address here and not struct page because the phys addr
  130. * can be a in hole between nodes and not have an memmap entry.
  131. */
  132. err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
  133. if (!err)
  134. global_flush_tlb();
  135. }
  136. return err;
  137. }
  138. /*
  139. * Generic mapping function
  140. */
  141. /*
  142. * Remap an arbitrary physical address space into the kernel virtual
  143. * address space. Needed when the kernel wants to access high addresses
  144. * directly.
  145. *
  146. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  147. * have to convert them into an offset in a page-aligned mapping, but the
  148. * caller shouldn't need to know that small detail.
  149. */
  150. void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
  151. {
  152. void * addr;
  153. struct vm_struct * area;
  154. unsigned long offset, last_addr;
  155. /* Don't allow wraparound or zero size */
  156. last_addr = phys_addr + size - 1;
  157. if (!size || last_addr < phys_addr)
  158. return NULL;
  159. /*
  160. * Don't remap the low PCI/ISA area, it's always mapped..
  161. */
  162. if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
  163. return (__force void __iomem *)phys_to_virt(phys_addr);
  164. #ifdef CONFIG_FLATMEM
  165. /*
  166. * Don't allow anybody to remap normal RAM that we're using..
  167. */
  168. if (last_addr < virt_to_phys(high_memory)) {
  169. char *t_addr, *t_end;
  170. struct page *page;
  171. t_addr = __va(phys_addr);
  172. t_end = t_addr + (size - 1);
  173. for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
  174. if(!PageReserved(page))
  175. return NULL;
  176. }
  177. #endif
  178. /*
  179. * Mappings have to be page-aligned
  180. */
  181. offset = phys_addr & ~PAGE_MASK;
  182. phys_addr &= PAGE_MASK;
  183. size = PAGE_ALIGN(last_addr+1) - phys_addr;
  184. /*
  185. * Ok, go for it..
  186. */
  187. area = get_vm_area(size, VM_IOREMAP | (flags << 20));
  188. if (!area)
  189. return NULL;
  190. area->phys_addr = phys_addr;
  191. addr = area->addr;
  192. if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
  193. remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
  194. return NULL;
  195. }
  196. if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
  197. area->flags &= 0xffffff;
  198. vunmap(addr);
  199. return NULL;
  200. }
  201. return (__force void __iomem *) (offset + (char *)addr);
  202. }
  203. /**
  204. * ioremap_nocache - map bus memory into CPU space
  205. * @offset: bus address of the memory
  206. * @size: size of the resource to map
  207. *
  208. * ioremap_nocache performs a platform specific sequence of operations to
  209. * make bus memory CPU accessible via the readb/readw/readl/writeb/
  210. * writew/writel functions and the other mmio helpers. The returned
  211. * address is not guaranteed to be usable directly as a virtual
  212. * address.
  213. *
  214. * This version of ioremap ensures that the memory is marked uncachable
  215. * on the CPU as well as honouring existing caching rules from things like
  216. * the PCI bus. Note that there are other caches and buffers on many
  217. * busses. In particular driver authors should read up on PCI writes
  218. *
  219. * It's useful if some control registers are in such an area and
  220. * write combining or read caching is not desirable:
  221. *
  222. * Must be freed with iounmap.
  223. */
  224. void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
  225. {
  226. return __ioremap(phys_addr, size, _PAGE_PCD);
  227. }
  228. void iounmap(volatile void __iomem *addr)
  229. {
  230. struct vm_struct *p;
  231. if (addr <= high_memory)
  232. return;
  233. if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
  234. addr < phys_to_virt(ISA_END_ADDRESS))
  235. return;
  236. write_lock(&vmlist_lock);
  237. p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
  238. if (!p)
  239. printk("iounmap: bad address %p\n", addr);
  240. else if (p->flags >> 20)
  241. ioremap_change_attr(p->phys_addr, p->size, 0);
  242. write_unlock(&vmlist_lock);
  243. kfree(p);
  244. }