ioremap.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * arch/sh64/mm/ioremap.c
  7. *
  8. * Copyright (C) 2000, 2001 Paolo Alberelli
  9. * Copyright (C) 2003, 2004 Paul Mundt
  10. *
  11. * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
  12. * derived from arch/i386/mm/ioremap.c .
  13. *
  14. * (C) Copyright 1995 1996 Linus Torvalds
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/sched.h>
  20. #include <linux/string.h>
  21. #include <asm/io.h>
  22. #include <asm/pgalloc.h>
  23. #include <asm/tlbflush.h>
  24. #include <linux/ioport.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/proc_fs.h>
  27. static void shmedia_mapioaddr(unsigned long, unsigned long);
  28. static unsigned long shmedia_ioremap(struct resource *, u32, int);
  29. static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
  30. unsigned long phys_addr, unsigned long flags)
  31. {
  32. unsigned long end;
  33. unsigned long pfn;
  34. pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ |
  35. _PAGE_WRITE | _PAGE_DIRTY |
  36. _PAGE_ACCESSED | _PAGE_SHARED | flags);
  37. address &= ~PMD_MASK;
  38. end = address + size;
  39. if (end > PMD_SIZE)
  40. end = PMD_SIZE;
  41. if (address >= end)
  42. BUG();
  43. pfn = phys_addr >> PAGE_SHIFT;
  44. pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n",
  45. __FUNCTION__,pte,address,size,phys_addr);
  46. do {
  47. if (!pte_none(*pte)) {
  48. printk("remap_area_pte: page already exists\n");
  49. BUG();
  50. }
  51. set_pte(pte, pfn_pte(pfn, pgprot));
  52. address += PAGE_SIZE;
  53. pfn++;
  54. pte++;
  55. } while (address && (address < end));
  56. }
  57. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
  58. unsigned long phys_addr, unsigned long flags)
  59. {
  60. unsigned long end;
  61. address &= ~PGDIR_MASK;
  62. end = address + size;
  63. if (end > PGDIR_SIZE)
  64. end = PGDIR_SIZE;
  65. phys_addr -= address;
  66. if (address >= end)
  67. BUG();
  68. do {
  69. pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
  70. if (!pte)
  71. return -ENOMEM;
  72. remap_area_pte(pte, address, end - address, address + phys_addr, flags);
  73. address = (address + PMD_SIZE) & PMD_MASK;
  74. pmd++;
  75. } while (address && (address < end));
  76. return 0;
  77. }
  78. static int remap_area_pages(unsigned long address, unsigned long phys_addr,
  79. unsigned long size, unsigned long flags)
  80. {
  81. int error;
  82. pgd_t * dir;
  83. unsigned long end = address + size;
  84. phys_addr -= address;
  85. dir = pgd_offset_k(address);
  86. flush_cache_all();
  87. if (address >= end)
  88. BUG();
  89. spin_lock(&init_mm.page_table_lock);
  90. do {
  91. pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
  92. error = -ENOMEM;
  93. if (!pmd)
  94. break;
  95. if (remap_area_pmd(pmd, address, end - address,
  96. phys_addr + address, flags)) {
  97. break;
  98. }
  99. error = 0;
  100. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  101. dir++;
  102. } while (address && (address < end));
  103. spin_unlock(&init_mm.page_table_lock);
  104. flush_tlb_all();
  105. return 0;
  106. }
  107. /*
  108. * Generic mapping function (not visible outside):
  109. */
  110. /*
  111. * Remap an arbitrary physical address space into the kernel virtual
  112. * address space. Needed when the kernel wants to access high addresses
  113. * directly.
  114. *
  115. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  116. * have to convert them into an offset in a page-aligned mapping, but the
  117. * caller shouldn't need to know that small detail.
  118. */
  119. void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
  120. {
  121. void * addr;
  122. struct vm_struct * area;
  123. unsigned long offset, last_addr;
  124. /* Don't allow wraparound or zero size */
  125. last_addr = phys_addr + size - 1;
  126. if (!size || last_addr < phys_addr)
  127. return NULL;
  128. /*
  129. * Mappings have to be page-aligned
  130. */
  131. offset = phys_addr & ~PAGE_MASK;
  132. phys_addr &= PAGE_MASK;
  133. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  134. /*
  135. * Ok, go for it..
  136. */
  137. area = get_vm_area(size, VM_IOREMAP);
  138. pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
  139. if (!area)
  140. return NULL;
  141. area->phys_addr = phys_addr;
  142. addr = area->addr;
  143. if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) {
  144. vunmap(addr);
  145. return NULL;
  146. }
  147. return (void *) (offset + (char *)addr);
  148. }
  149. void iounmap(void *addr)
  150. {
  151. struct vm_struct *area;
  152. vfree((void *) (PAGE_MASK & (unsigned long) addr));
  153. area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
  154. if (!area) {
  155. printk(KERN_ERR "iounmap: bad address %p\n", addr);
  156. return;
  157. }
  158. kfree(area);
  159. }
  160. static struct resource shmedia_iomap = {
  161. .name = "shmedia_iomap",
  162. .start = IOBASE_VADDR + PAGE_SIZE,
  163. .end = IOBASE_END - 1,
  164. };
  165. static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
  166. static void shmedia_unmapioaddr(unsigned long vaddr);
  167. static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
  168. /*
  169. * We have the same problem as the SPARC, so lets have the same comment:
  170. * Our mini-allocator...
  171. * Boy this is gross! We need it because we must map I/O for
  172. * timers and interrupt controller before the kmalloc is available.
  173. */
  174. #define XNMLN 15
  175. #define XNRES 10
  176. struct xresource {
  177. struct resource xres; /* Must be first */
  178. int xflag; /* 1 == used */
  179. char xname[XNMLN+1];
  180. };
  181. static struct xresource xresv[XNRES];
  182. static struct xresource *xres_alloc(void)
  183. {
  184. struct xresource *xrp;
  185. int n;
  186. xrp = xresv;
  187. for (n = 0; n < XNRES; n++) {
  188. if (xrp->xflag == 0) {
  189. xrp->xflag = 1;
  190. return xrp;
  191. }
  192. xrp++;
  193. }
  194. return NULL;
  195. }
  196. static void xres_free(struct xresource *xrp)
  197. {
  198. xrp->xflag = 0;
  199. }
  200. static struct resource *shmedia_find_resource(struct resource *root,
  201. unsigned long vaddr)
  202. {
  203. struct resource *res;
  204. for (res = root->child; res; res = res->sibling)
  205. if (res->start <= vaddr && res->end >= vaddr)
  206. return res;
  207. return NULL;
  208. }
  209. static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
  210. const char *name)
  211. {
  212. static int printed_full = 0;
  213. struct xresource *xres;
  214. struct resource *res;
  215. char *tack;
  216. int tlen;
  217. if (name == NULL) name = "???";
  218. if ((xres = xres_alloc()) != 0) {
  219. tack = xres->xname;
  220. res = &xres->xres;
  221. } else {
  222. if (!printed_full) {
  223. printk("%s: done with statics, switching to kmalloc\n",
  224. __FUNCTION__);
  225. printed_full = 1;
  226. }
  227. tlen = strlen(name);
  228. tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
  229. if (!tack)
  230. return -ENOMEM;
  231. memset(tack, 0, sizeof(struct resource));
  232. res = (struct resource *) tack;
  233. tack += sizeof (struct resource);
  234. }
  235. strncpy(tack, name, XNMLN);
  236. tack[XNMLN] = 0;
  237. res->name = tack;
  238. return shmedia_ioremap(res, phys, size);
  239. }
  240. static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
  241. {
  242. unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
  243. unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
  244. unsigned long va;
  245. unsigned int psz;
  246. if (allocate_resource(&shmedia_iomap, res, round_sz,
  247. shmedia_iomap.start, shmedia_iomap.end,
  248. PAGE_SIZE, NULL, NULL) != 0) {
  249. panic("alloc_io_res(%s): cannot occupy\n",
  250. (res->name != NULL)? res->name: "???");
  251. }
  252. va = res->start;
  253. pa &= PAGE_MASK;
  254. psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
  255. /* log at boot time ... */
  256. printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
  257. ((res->name != NULL) ? res->name : "???"),
  258. psz, psz == 1 ? " " : "s", va, pa);
  259. for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
  260. shmedia_mapioaddr(pa, va);
  261. va += PAGE_SIZE;
  262. pa += PAGE_SIZE;
  263. }
  264. res->start += offset;
  265. res->end = res->start + sz - 1; /* not strictly necessary.. */
  266. return res->start;
  267. }
  268. static void shmedia_free_io(struct resource *res)
  269. {
  270. unsigned long len = res->end - res->start + 1;
  271. BUG_ON((len & (PAGE_SIZE - 1)) != 0);
  272. while (len) {
  273. len -= PAGE_SIZE;
  274. shmedia_unmapioaddr(res->start + len);
  275. }
  276. release_resource(res);
  277. }
  278. static void *sh64_get_page(void)
  279. {
  280. extern int after_bootmem;
  281. void *page;
  282. if (after_bootmem) {
  283. page = (void *)get_zeroed_page(GFP_ATOMIC);
  284. } else {
  285. page = alloc_bootmem_pages(PAGE_SIZE);
  286. }
  287. if (!page || ((unsigned long)page & ~PAGE_MASK))
  288. panic("sh64_get_page: Out of memory already?\n");
  289. return page;
  290. }
  291. static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
  292. {
  293. pgd_t *pgdp;
  294. pmd_t *pmdp;
  295. pte_t *ptep, pte;
  296. pgprot_t prot;
  297. unsigned long flags = 1; /* 1 = CB0-1 device */
  298. pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
  299. pgdp = pgd_offset_k(va);
  300. if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
  301. pmdp = (pmd_t *)sh64_get_page();
  302. set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
  303. }
  304. pmdp = pmd_offset(pgdp, va);
  305. if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
  306. ptep = (pte_t *)sh64_get_page();
  307. set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
  308. }
  309. prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
  310. _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
  311. pte = pfn_pte(pa >> PAGE_SHIFT, prot);
  312. ptep = pte_offset_kernel(pmdp, va);
  313. if (!pte_none(*ptep) &&
  314. pte_val(*ptep) != pte_val(pte))
  315. pte_ERROR(*ptep);
  316. set_pte(ptep, pte);
  317. flush_tlb_kernel_range(va, PAGE_SIZE);
  318. }
  319. static void shmedia_unmapioaddr(unsigned long vaddr)
  320. {
  321. pgd_t *pgdp;
  322. pmd_t *pmdp;
  323. pte_t *ptep;
  324. pgdp = pgd_offset_k(vaddr);
  325. pmdp = pmd_offset(pgdp, vaddr);
  326. if (pmd_none(*pmdp) || pmd_bad(*pmdp))
  327. return;
  328. ptep = pte_offset_kernel(pmdp, vaddr);
  329. if (pte_none(*ptep) || !pte_present(*ptep))
  330. return;
  331. clear_page((void *)ptep);
  332. pte_clear(&init_mm, vaddr, ptep);
  333. }
  334. unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
  335. {
  336. if (size < PAGE_SIZE)
  337. size = PAGE_SIZE;
  338. return shmedia_alloc_io(phys, size, name);
  339. }
  340. void onchip_unmap(unsigned long vaddr)
  341. {
  342. struct resource *res;
  343. unsigned int psz;
  344. res = shmedia_find_resource(&shmedia_iomap, vaddr);
  345. if (!res) {
  346. printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
  347. __FUNCTION__, vaddr);
  348. return;
  349. }
  350. psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
  351. printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n",
  352. res->name, psz, psz == 1 ? " " : "s");
  353. shmedia_free_io(res);
  354. if ((char *)res >= (char *)xresv &&
  355. (char *)res < (char *)&xresv[XNRES]) {
  356. xres_free((struct xresource *)res);
  357. } else {
  358. kfree(res);
  359. }
  360. }
  361. #ifdef CONFIG_PROC_FS
  362. static int
  363. ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
  364. void *data)
  365. {
  366. char *p = buf, *e = buf + length;
  367. struct resource *r;
  368. const char *nm;
  369. for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
  370. if (p + 32 >= e) /* Better than nothing */
  371. break;
  372. if ((nm = r->name) == 0) nm = "???";
  373. p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm);
  374. }
  375. return p-buf;
  376. }
  377. #endif /* CONFIG_PROC_FS */
  378. static int __init register_proc_onchip(void)
  379. {
  380. #ifdef CONFIG_PROC_FS
  381. create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
  382. #endif
  383. return 0;
  384. }
  385. __initcall(register_proc_onchip);