ioremap.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * arch/sh64/mm/ioremap.c
  7. *
  8. * Copyright (C) 2000, 2001 Paolo Alberelli
  9. * Copyright (C) 2003, 2004 Paul Mundt
  10. *
  11. * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
  12. * derived from arch/i386/mm/ioremap.c .
  13. *
  14. * (C) Copyright 1995 1996 Linus Torvalds
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/sched.h>
  20. #include <linux/string.h>
  21. #include <asm/io.h>
  22. #include <asm/pgalloc.h>
  23. #include <asm/tlbflush.h>
  24. #include <linux/ioport.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/proc_fs.h>
  27. static void shmedia_mapioaddr(unsigned long, unsigned long);
  28. static unsigned long shmedia_ioremap(struct resource *, u32, int);
  29. static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
  30. unsigned long phys_addr, unsigned long flags)
  31. {
  32. unsigned long end;
  33. unsigned long pfn;
  34. pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ |
  35. _PAGE_WRITE | _PAGE_DIRTY |
  36. _PAGE_ACCESSED | _PAGE_SHARED | flags);
  37. address &= ~PMD_MASK;
  38. end = address + size;
  39. if (end > PMD_SIZE)
  40. end = PMD_SIZE;
  41. if (address >= end)
  42. BUG();
  43. pfn = phys_addr >> PAGE_SHIFT;
  44. pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n",
  45. __FUNCTION__,pte,address,size,phys_addr);
  46. do {
  47. if (!pte_none(*pte)) {
  48. printk("remap_area_pte: page already exists\n");
  49. BUG();
  50. }
  51. set_pte(pte, pfn_pte(pfn, pgprot));
  52. address += PAGE_SIZE;
  53. pfn++;
  54. pte++;
  55. } while (address && (address < end));
  56. }
  57. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
  58. unsigned long phys_addr, unsigned long flags)
  59. {
  60. unsigned long end;
  61. address &= ~PGDIR_MASK;
  62. end = address + size;
  63. if (end > PGDIR_SIZE)
  64. end = PGDIR_SIZE;
  65. phys_addr -= address;
  66. if (address >= end)
  67. BUG();
  68. do {
  69. pte_t * pte = pte_alloc_kernel(pmd, address);
  70. if (!pte)
  71. return -ENOMEM;
  72. remap_area_pte(pte, address, end - address, address + phys_addr, flags);
  73. address = (address + PMD_SIZE) & PMD_MASK;
  74. pmd++;
  75. } while (address && (address < end));
  76. return 0;
  77. }
  78. static int remap_area_pages(unsigned long address, unsigned long phys_addr,
  79. unsigned long size, unsigned long flags)
  80. {
  81. int error;
  82. pgd_t * dir;
  83. unsigned long end = address + size;
  84. phys_addr -= address;
  85. dir = pgd_offset_k(address);
  86. flush_cache_all();
  87. if (address >= end)
  88. BUG();
  89. do {
  90. pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
  91. error = -ENOMEM;
  92. if (!pmd)
  93. break;
  94. if (remap_area_pmd(pmd, address, end - address,
  95. phys_addr + address, flags)) {
  96. break;
  97. }
  98. error = 0;
  99. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  100. dir++;
  101. } while (address && (address < end));
  102. flush_tlb_all();
  103. return 0;
  104. }
  105. /*
  106. * Generic mapping function (not visible outside):
  107. */
  108. /*
  109. * Remap an arbitrary physical address space into the kernel virtual
  110. * address space. Needed when the kernel wants to access high addresses
  111. * directly.
  112. *
  113. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  114. * have to convert them into an offset in a page-aligned mapping, but the
  115. * caller shouldn't need to know that small detail.
  116. */
  117. void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
  118. {
  119. void * addr;
  120. struct vm_struct * area;
  121. unsigned long offset, last_addr;
  122. /* Don't allow wraparound or zero size */
  123. last_addr = phys_addr + size - 1;
  124. if (!size || last_addr < phys_addr)
  125. return NULL;
  126. /*
  127. * Mappings have to be page-aligned
  128. */
  129. offset = phys_addr & ~PAGE_MASK;
  130. phys_addr &= PAGE_MASK;
  131. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  132. /*
  133. * Ok, go for it..
  134. */
  135. area = get_vm_area(size, VM_IOREMAP);
  136. pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
  137. if (!area)
  138. return NULL;
  139. area->phys_addr = phys_addr;
  140. addr = area->addr;
  141. if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) {
  142. vunmap(addr);
  143. return NULL;
  144. }
  145. return (void *) (offset + (char *)addr);
  146. }
  147. void iounmap(void *addr)
  148. {
  149. struct vm_struct *area;
  150. vfree((void *) (PAGE_MASK & (unsigned long) addr));
  151. area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
  152. if (!area) {
  153. printk(KERN_ERR "iounmap: bad address %p\n", addr);
  154. return;
  155. }
  156. kfree(area);
  157. }
  158. static struct resource shmedia_iomap = {
  159. .name = "shmedia_iomap",
  160. .start = IOBASE_VADDR + PAGE_SIZE,
  161. .end = IOBASE_END - 1,
  162. };
  163. static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
  164. static void shmedia_unmapioaddr(unsigned long vaddr);
  165. static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
  166. /*
  167. * We have the same problem as the SPARC, so lets have the same comment:
  168. * Our mini-allocator...
  169. * Boy this is gross! We need it because we must map I/O for
  170. * timers and interrupt controller before the kmalloc is available.
  171. */
  172. #define XNMLN 15
  173. #define XNRES 10
  174. struct xresource {
  175. struct resource xres; /* Must be first */
  176. int xflag; /* 1 == used */
  177. char xname[XNMLN+1];
  178. };
  179. static struct xresource xresv[XNRES];
  180. static struct xresource *xres_alloc(void)
  181. {
  182. struct xresource *xrp;
  183. int n;
  184. xrp = xresv;
  185. for (n = 0; n < XNRES; n++) {
  186. if (xrp->xflag == 0) {
  187. xrp->xflag = 1;
  188. return xrp;
  189. }
  190. xrp++;
  191. }
  192. return NULL;
  193. }
  194. static void xres_free(struct xresource *xrp)
  195. {
  196. xrp->xflag = 0;
  197. }
  198. static struct resource *shmedia_find_resource(struct resource *root,
  199. unsigned long vaddr)
  200. {
  201. struct resource *res;
  202. for (res = root->child; res; res = res->sibling)
  203. if (res->start <= vaddr && res->end >= vaddr)
  204. return res;
  205. return NULL;
  206. }
  207. static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
  208. const char *name)
  209. {
  210. static int printed_full = 0;
  211. struct xresource *xres;
  212. struct resource *res;
  213. char *tack;
  214. int tlen;
  215. if (name == NULL) name = "???";
  216. if ((xres = xres_alloc()) != 0) {
  217. tack = xres->xname;
  218. res = &xres->xres;
  219. } else {
  220. if (!printed_full) {
  221. printk("%s: done with statics, switching to kmalloc\n",
  222. __FUNCTION__);
  223. printed_full = 1;
  224. }
  225. tlen = strlen(name);
  226. tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
  227. if (!tack)
  228. return -ENOMEM;
  229. memset(tack, 0, sizeof(struct resource));
  230. res = (struct resource *) tack;
  231. tack += sizeof (struct resource);
  232. }
  233. strncpy(tack, name, XNMLN);
  234. tack[XNMLN] = 0;
  235. res->name = tack;
  236. return shmedia_ioremap(res, phys, size);
  237. }
  238. static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
  239. {
  240. unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
  241. unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
  242. unsigned long va;
  243. unsigned int psz;
  244. if (allocate_resource(&shmedia_iomap, res, round_sz,
  245. shmedia_iomap.start, shmedia_iomap.end,
  246. PAGE_SIZE, NULL, NULL) != 0) {
  247. panic("alloc_io_res(%s): cannot occupy\n",
  248. (res->name != NULL)? res->name: "???");
  249. }
  250. va = res->start;
  251. pa &= PAGE_MASK;
  252. psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
  253. /* log at boot time ... */
  254. printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
  255. ((res->name != NULL) ? res->name : "???"),
  256. psz, psz == 1 ? " " : "s", va, pa);
  257. for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
  258. shmedia_mapioaddr(pa, va);
  259. va += PAGE_SIZE;
  260. pa += PAGE_SIZE;
  261. }
  262. res->start += offset;
  263. res->end = res->start + sz - 1; /* not strictly necessary.. */
  264. return res->start;
  265. }
  266. static void shmedia_free_io(struct resource *res)
  267. {
  268. unsigned long len = res->end - res->start + 1;
  269. BUG_ON((len & (PAGE_SIZE - 1)) != 0);
  270. while (len) {
  271. len -= PAGE_SIZE;
  272. shmedia_unmapioaddr(res->start + len);
  273. }
  274. release_resource(res);
  275. }
  276. static void *sh64_get_page(void)
  277. {
  278. extern int after_bootmem;
  279. void *page;
  280. if (after_bootmem) {
  281. page = (void *)get_zeroed_page(GFP_ATOMIC);
  282. } else {
  283. page = alloc_bootmem_pages(PAGE_SIZE);
  284. }
  285. if (!page || ((unsigned long)page & ~PAGE_MASK))
  286. panic("sh64_get_page: Out of memory already?\n");
  287. return page;
  288. }
  289. static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
  290. {
  291. pgd_t *pgdp;
  292. pmd_t *pmdp;
  293. pte_t *ptep, pte;
  294. pgprot_t prot;
  295. unsigned long flags = 1; /* 1 = CB0-1 device */
  296. pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
  297. pgdp = pgd_offset_k(va);
  298. if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
  299. pmdp = (pmd_t *)sh64_get_page();
  300. set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
  301. }
  302. pmdp = pmd_offset(pgdp, va);
  303. if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
  304. ptep = (pte_t *)sh64_get_page();
  305. set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
  306. }
  307. prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
  308. _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
  309. pte = pfn_pte(pa >> PAGE_SHIFT, prot);
  310. ptep = pte_offset_kernel(pmdp, va);
  311. if (!pte_none(*ptep) &&
  312. pte_val(*ptep) != pte_val(pte))
  313. pte_ERROR(*ptep);
  314. set_pte(ptep, pte);
  315. flush_tlb_kernel_range(va, PAGE_SIZE);
  316. }
  317. static void shmedia_unmapioaddr(unsigned long vaddr)
  318. {
  319. pgd_t *pgdp;
  320. pmd_t *pmdp;
  321. pte_t *ptep;
  322. pgdp = pgd_offset_k(vaddr);
  323. pmdp = pmd_offset(pgdp, vaddr);
  324. if (pmd_none(*pmdp) || pmd_bad(*pmdp))
  325. return;
  326. ptep = pte_offset_kernel(pmdp, vaddr);
  327. if (pte_none(*ptep) || !pte_present(*ptep))
  328. return;
  329. clear_page((void *)ptep);
  330. pte_clear(&init_mm, vaddr, ptep);
  331. }
  332. unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
  333. {
  334. if (size < PAGE_SIZE)
  335. size = PAGE_SIZE;
  336. return shmedia_alloc_io(phys, size, name);
  337. }
  338. void onchip_unmap(unsigned long vaddr)
  339. {
  340. struct resource *res;
  341. unsigned int psz;
  342. res = shmedia_find_resource(&shmedia_iomap, vaddr);
  343. if (!res) {
  344. printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
  345. __FUNCTION__, vaddr);
  346. return;
  347. }
  348. psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
  349. printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n",
  350. res->name, psz, psz == 1 ? " " : "s");
  351. shmedia_free_io(res);
  352. if ((char *)res >= (char *)xresv &&
  353. (char *)res < (char *)&xresv[XNRES]) {
  354. xres_free((struct xresource *)res);
  355. } else {
  356. kfree(res);
  357. }
  358. }
  359. #ifdef CONFIG_PROC_FS
  360. static int
  361. ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
  362. void *data)
  363. {
  364. char *p = buf, *e = buf + length;
  365. struct resource *r;
  366. const char *nm;
  367. for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
  368. if (p + 32 >= e) /* Better than nothing */
  369. break;
  370. if ((nm = r->name) == 0) nm = "???";
  371. p += sprintf(p, "%08lx-%08lx: %s\n",
  372. (unsigned long)r->start,
  373. (unsigned long)r->end, nm);
  374. }
  375. return p-buf;
  376. }
  377. #endif /* CONFIG_PROC_FS */
  378. static int __init register_proc_onchip(void)
  379. {
  380. #ifdef CONFIG_PROC_FS
  381. create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
  382. #endif
  383. return 0;
  384. }
  385. __initcall(register_proc_onchip);