pmb.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * arch/sh/mm/pmb.c
  3. *
  4. * Privileged Space Mapping Buffer (PMB) Support.
  5. *
  6. * Copyright (C) 2005, 2006, 2007 Paul Mundt
  7. *
  8. * P1/P2 Section mapping definitions from map32.h, which was:
  9. *
  10. * Copyright 2003 (c) Lineo Solutions,Inc.
  11. *
  12. * This file is subject to the terms and conditions of the GNU General Public
  13. * License. See the file "COPYING" in the main directory of this archive
  14. * for more details.
  15. */
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/sysdev.h>
  19. #include <linux/cpu.h>
  20. #include <linux/module.h>
  21. #include <linux/slab.h>
  22. #include <linux/bitops.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/fs.h>
  25. #include <linux/seq_file.h>
  26. #include <linux/err.h>
  27. #include <asm/system.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/mmu.h>
  31. #include <asm/io.h>
  32. #include <asm/mmu_context.h>
  33. #define NR_PMB_ENTRIES 16
  34. static void __pmb_unmap(struct pmb_entry *);
  35. static struct kmem_cache *pmb_cache;
  36. static unsigned long pmb_map;
  37. static struct pmb_entry pmb_init_map[] = {
  38. /* vpn ppn flags (ub/sz/c/wt) */
  39. /* P1 Section Mappings */
  40. { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
  41. { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
  42. { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
  43. { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
  44. { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
  45. { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
  46. /* P2 Section Mappings */
  47. { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
  48. { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
  49. { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
  50. { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
  51. { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
  52. { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
  53. };
  54. static inline unsigned long mk_pmb_entry(unsigned int entry)
  55. {
  56. return (entry & PMB_E_MASK) << PMB_E_SHIFT;
  57. }
  58. static inline unsigned long mk_pmb_addr(unsigned int entry)
  59. {
  60. return mk_pmb_entry(entry) | PMB_ADDR;
  61. }
  62. static inline unsigned long mk_pmb_data(unsigned int entry)
  63. {
  64. return mk_pmb_entry(entry) | PMB_DATA;
  65. }
  66. static DEFINE_SPINLOCK(pmb_list_lock);
  67. static struct pmb_entry *pmb_list;
  68. static inline void pmb_list_add(struct pmb_entry *pmbe)
  69. {
  70. struct pmb_entry **p, *tmp;
  71. p = &pmb_list;
  72. while ((tmp = *p) != NULL)
  73. p = &tmp->next;
  74. pmbe->next = tmp;
  75. *p = pmbe;
  76. }
  77. static inline void pmb_list_del(struct pmb_entry *pmbe)
  78. {
  79. struct pmb_entry **p, *tmp;
  80. for (p = &pmb_list; (tmp = *p); p = &tmp->next)
  81. if (tmp == pmbe) {
  82. *p = tmp->next;
  83. return;
  84. }
  85. }
  86. struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
  87. unsigned long flags)
  88. {
  89. struct pmb_entry *pmbe;
  90. pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL);
  91. if (!pmbe)
  92. return ERR_PTR(-ENOMEM);
  93. pmbe->vpn = vpn;
  94. pmbe->ppn = ppn;
  95. pmbe->flags = flags;
  96. spin_lock_irq(&pmb_list_lock);
  97. pmb_list_add(pmbe);
  98. spin_unlock_irq(&pmb_list_lock);
  99. return pmbe;
  100. }
  101. void pmb_free(struct pmb_entry *pmbe)
  102. {
  103. spin_lock_irq(&pmb_list_lock);
  104. pmb_list_del(pmbe);
  105. spin_unlock_irq(&pmb_list_lock);
  106. kmem_cache_free(pmb_cache, pmbe);
  107. }
  108. /*
  109. * Must be in P2 for __set_pmb_entry()
  110. */
  111. int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
  112. unsigned long flags, int *entry)
  113. {
  114. unsigned int pos = *entry;
  115. if (unlikely(pos == PMB_NO_ENTRY))
  116. pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
  117. repeat:
  118. if (unlikely(pos > NR_PMB_ENTRIES))
  119. return -ENOSPC;
  120. if (test_and_set_bit(pos, &pmb_map)) {
  121. pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
  122. goto repeat;
  123. }
  124. ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
  125. #ifdef CONFIG_CACHE_WRITETHROUGH
  126. /*
  127. * When we are in 32-bit address extended mode, CCR.CB becomes
  128. * invalid, so care must be taken to manually adjust cacheable
  129. * translations.
  130. */
  131. if (likely(flags & PMB_C))
  132. flags |= PMB_WT;
  133. #endif
  134. ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
  135. *entry = pos;
  136. return 0;
  137. }
  138. int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
  139. {
  140. int ret;
  141. jump_to_uncached();
  142. ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
  143. back_to_cached();
  144. return ret;
  145. }
  146. void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
  147. {
  148. unsigned int entry = pmbe->entry;
  149. unsigned long addr;
  150. /*
  151. * Don't allow clearing of wired init entries, P1 or P2 access
  152. * without a corresponding mapping in the PMB will lead to reset
  153. * by the TLB.
  154. */
  155. if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
  156. entry >= NR_PMB_ENTRIES))
  157. return;
  158. jump_to_uncached();
  159. /* Clear V-bit */
  160. addr = mk_pmb_addr(entry);
  161. ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
  162. addr = mk_pmb_data(entry);
  163. ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
  164. back_to_cached();
  165. clear_bit(entry, &pmb_map);
  166. }
  167. static struct {
  168. unsigned long size;
  169. int flag;
  170. } pmb_sizes[] = {
  171. { .size = 0x20000000, .flag = PMB_SZ_512M, },
  172. { .size = 0x08000000, .flag = PMB_SZ_128M, },
  173. { .size = 0x04000000, .flag = PMB_SZ_64M, },
  174. { .size = 0x01000000, .flag = PMB_SZ_16M, },
  175. };
  176. long pmb_remap(unsigned long vaddr, unsigned long phys,
  177. unsigned long size, unsigned long flags)
  178. {
  179. struct pmb_entry *pmbp, *pmbe;
  180. unsigned long wanted;
  181. int pmb_flags, i;
  182. long err;
  183. /* Convert typical pgprot value to the PMB equivalent */
  184. if (flags & _PAGE_CACHABLE) {
  185. if (flags & _PAGE_WT)
  186. pmb_flags = PMB_WT;
  187. else
  188. pmb_flags = PMB_C;
  189. } else
  190. pmb_flags = PMB_WT | PMB_UB;
  191. pmbp = NULL;
  192. wanted = size;
  193. again:
  194. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
  195. int ret;
  196. if (size < pmb_sizes[i].size)
  197. continue;
  198. pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
  199. if (IS_ERR(pmbe)) {
  200. err = PTR_ERR(pmbe);
  201. goto out;
  202. }
  203. ret = set_pmb_entry(pmbe);
  204. if (ret != 0) {
  205. pmb_free(pmbe);
  206. err = -EBUSY;
  207. goto out;
  208. }
  209. phys += pmb_sizes[i].size;
  210. vaddr += pmb_sizes[i].size;
  211. size -= pmb_sizes[i].size;
  212. /*
  213. * Link adjacent entries that span multiple PMB entries
  214. * for easier tear-down.
  215. */
  216. if (likely(pmbp))
  217. pmbp->link = pmbe;
  218. pmbp = pmbe;
  219. /*
  220. * Instead of trying smaller sizes on every iteration
  221. * (even if we succeed in allocating space), try using
  222. * pmb_sizes[i].size again.
  223. */
  224. i--;
  225. }
  226. if (size >= 0x1000000)
  227. goto again;
  228. return wanted - size;
  229. out:
  230. if (pmbp)
  231. __pmb_unmap(pmbp);
  232. return err;
  233. }
  234. void pmb_unmap(unsigned long addr)
  235. {
  236. struct pmb_entry **p, *pmbe;
  237. for (p = &pmb_list; (pmbe = *p); p = &pmbe->next)
  238. if (pmbe->vpn == addr)
  239. break;
  240. if (unlikely(!pmbe))
  241. return;
  242. __pmb_unmap(pmbe);
  243. }
  244. static void __pmb_unmap(struct pmb_entry *pmbe)
  245. {
  246. WARN_ON(!test_bit(pmbe->entry, &pmb_map));
  247. do {
  248. struct pmb_entry *pmblink = pmbe;
  249. if (pmbe->entry != PMB_NO_ENTRY)
  250. clear_pmb_entry(pmbe);
  251. pmbe = pmblink->link;
  252. pmb_free(pmblink);
  253. } while (pmbe);
  254. }
  255. static void pmb_cache_ctor(void *pmb)
  256. {
  257. struct pmb_entry *pmbe = pmb;
  258. memset(pmb, 0, sizeof(struct pmb_entry));
  259. pmbe->entry = PMB_NO_ENTRY;
  260. }
  261. static int __uses_jump_to_uncached pmb_init(void)
  262. {
  263. unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
  264. unsigned int entry, i;
  265. BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
  266. pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
  267. SLAB_PANIC, pmb_cache_ctor);
  268. jump_to_uncached();
  269. /*
  270. * Ordering is important, P2 must be mapped in the PMB before we
  271. * can set PMB.SE, and P1 must be mapped before we jump back to
  272. * P1 space.
  273. */
  274. for (entry = 0; entry < nr_entries; entry++) {
  275. struct pmb_entry *pmbe = pmb_init_map + entry;
  276. __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry);
  277. }
  278. ctrl_outl(0, PMB_IRMCR);
  279. /* PMB.SE and UB[7] */
  280. ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
  281. /* Flush out the TLB */
  282. i = ctrl_inl(MMUCR);
  283. i |= MMUCR_TI;
  284. ctrl_outl(i, MMUCR);
  285. back_to_cached();
  286. return 0;
  287. }
  288. arch_initcall(pmb_init);
  289. static int pmb_seq_show(struct seq_file *file, void *iter)
  290. {
  291. int i;
  292. seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
  293. "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
  294. seq_printf(file, "ety vpn ppn size flags\n");
  295. for (i = 0; i < NR_PMB_ENTRIES; i++) {
  296. unsigned long addr, data;
  297. unsigned int size;
  298. char *sz_str = NULL;
  299. addr = ctrl_inl(mk_pmb_addr(i));
  300. data = ctrl_inl(mk_pmb_data(i));
  301. size = data & PMB_SZ_MASK;
  302. sz_str = (size == PMB_SZ_16M) ? " 16MB":
  303. (size == PMB_SZ_64M) ? " 64MB":
  304. (size == PMB_SZ_128M) ? "128MB":
  305. "512MB";
  306. /* 02: V 0x88 0x08 128MB C CB B */
  307. seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
  308. i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
  309. (addr >> 24) & 0xff, (data >> 24) & 0xff,
  310. sz_str, (data & PMB_C) ? 'C' : ' ',
  311. (data & PMB_WT) ? "WT" : "CB",
  312. (data & PMB_UB) ? "UB" : " B");
  313. }
  314. return 0;
  315. }
  316. static int pmb_debugfs_open(struct inode *inode, struct file *file)
  317. {
  318. return single_open(file, pmb_seq_show, NULL);
  319. }
  320. static const struct file_operations pmb_debugfs_fops = {
  321. .owner = THIS_MODULE,
  322. .open = pmb_debugfs_open,
  323. .read = seq_read,
  324. .llseek = seq_lseek,
  325. .release = single_release,
  326. };
  327. static int __init pmb_debugfs_init(void)
  328. {
  329. struct dentry *dentry;
  330. dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
  331. sh_debugfs_root, NULL, &pmb_debugfs_fops);
  332. if (!dentry)
  333. return -ENOMEM;
  334. if (IS_ERR(dentry))
  335. return PTR_ERR(dentry);
  336. return 0;
  337. }
  338. postcore_initcall(pmb_debugfs_init);
  339. #ifdef CONFIG_PM
  340. static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
  341. {
  342. static pm_message_t prev_state;
  343. /* Restore the PMB after a resume from hibernation */
  344. if (state.event == PM_EVENT_ON &&
  345. prev_state.event == PM_EVENT_FREEZE) {
  346. struct pmb_entry *pmbe;
  347. spin_lock_irq(&pmb_list_lock);
  348. for (pmbe = pmb_list; pmbe; pmbe = pmbe->next)
  349. set_pmb_entry(pmbe);
  350. spin_unlock_irq(&pmb_list_lock);
  351. }
  352. prev_state = state;
  353. return 0;
  354. }
  355. static int pmb_sysdev_resume(struct sys_device *dev)
  356. {
  357. return pmb_sysdev_suspend(dev, PMSG_ON);
  358. }
  359. static struct sysdev_driver pmb_sysdev_driver = {
  360. .suspend = pmb_sysdev_suspend,
  361. .resume = pmb_sysdev_resume,
  362. };
  363. static int __init pmb_sysdev_init(void)
  364. {
  365. return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
  366. }
  367. subsys_initcall(pmb_sysdev_init);
  368. #endif