pmb.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /*
  2. * arch/sh/mm/pmb.c
  3. *
  4. * Privileged Space Mapping Buffer (PMB) Support.
  5. *
  6. * Copyright (C) 2005 - 2010 Paul Mundt
  7. * Copyright (C) 2010 Matt Fleming
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sysdev.h>
  16. #include <linux/cpu.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/bitops.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/fs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/err.h>
  24. #include <linux/io.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/vmalloc.h>
  27. #include <asm/sizes.h>
  28. #include <asm/system.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/page.h>
  32. #include <asm/mmu.h>
  33. #include <asm/mmu_context.h>
  34. struct pmb_entry;
  35. struct pmb_entry {
  36. unsigned long vpn;
  37. unsigned long ppn;
  38. unsigned long flags;
  39. unsigned long size;
  40. spinlock_t lock;
  41. /*
  42. * 0 .. NR_PMB_ENTRIES for specific entry selection, or
  43. * PMB_NO_ENTRY to search for a free one
  44. */
  45. int entry;
  46. /* Adjacent entry link for contiguous multi-entry mappings */
  47. struct pmb_entry *link;
  48. };
  49. static struct {
  50. unsigned long size;
  51. int flag;
  52. } pmb_sizes[] = {
  53. { .size = SZ_512M, .flag = PMB_SZ_512M, },
  54. { .size = SZ_128M, .flag = PMB_SZ_128M, },
  55. { .size = SZ_64M, .flag = PMB_SZ_64M, },
  56. { .size = SZ_16M, .flag = PMB_SZ_16M, },
  57. };
  58. static void pmb_unmap_entry(struct pmb_entry *, int depth);
  59. static DEFINE_RWLOCK(pmb_rwlock);
  60. static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
  61. static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
  62. static unsigned int pmb_iomapping_enabled;
  63. static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
  64. {
  65. return (entry & PMB_E_MASK) << PMB_E_SHIFT;
  66. }
  67. static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
  68. {
  69. return mk_pmb_entry(entry) | PMB_ADDR;
  70. }
  71. static __always_inline unsigned long mk_pmb_data(unsigned int entry)
  72. {
  73. return mk_pmb_entry(entry) | PMB_DATA;
  74. }
  75. static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
  76. {
  77. return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
  78. }
  79. /*
  80. * Ensure that the PMB entries match our cache configuration.
  81. *
  82. * When we are in 32-bit address extended mode, CCR.CB becomes
  83. * invalid, so care must be taken to manually adjust cacheable
  84. * translations.
  85. */
  86. static __always_inline unsigned long pmb_cache_flags(void)
  87. {
  88. unsigned long flags = 0;
  89. #if defined(CONFIG_CACHE_OFF)
  90. flags |= PMB_WT | PMB_UB;
  91. #elif defined(CONFIG_CACHE_WRITETHROUGH)
  92. flags |= PMB_C | PMB_WT | PMB_UB;
  93. #elif defined(CONFIG_CACHE_WRITEBACK)
  94. flags |= PMB_C;
  95. #endif
  96. return flags;
  97. }
  98. /*
  99. * Convert typical pgprot value to the PMB equivalent
  100. */
  101. static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
  102. {
  103. unsigned long pmb_flags = 0;
  104. u64 flags = pgprot_val(prot);
  105. if (flags & _PAGE_CACHABLE)
  106. pmb_flags |= PMB_C;
  107. if (flags & _PAGE_WT)
  108. pmb_flags |= PMB_WT | PMB_UB;
  109. return pmb_flags;
  110. }
  111. static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
  112. {
  113. return (b->vpn == (a->vpn + a->size)) &&
  114. (b->ppn == (a->ppn + a->size)) &&
  115. (b->flags == a->flags);
  116. }
  117. static bool pmb_size_valid(unsigned long size)
  118. {
  119. int i;
  120. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  121. if (pmb_sizes[i].size == size)
  122. return true;
  123. return false;
  124. }
  125. static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
  126. {
  127. return (addr >= P1SEG && (addr + size - 1) < P3SEG);
  128. }
  129. static inline bool pmb_prot_valid(pgprot_t prot)
  130. {
  131. return (pgprot_val(prot) & _PAGE_USER) == 0;
  132. }
  133. static int pmb_size_to_flags(unsigned long size)
  134. {
  135. int i;
  136. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  137. if (pmb_sizes[i].size == size)
  138. return pmb_sizes[i].flag;
  139. return 0;
  140. }
  141. static int pmb_alloc_entry(void)
  142. {
  143. int pos;
  144. pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
  145. if (pos >= 0 && pos < NR_PMB_ENTRIES)
  146. __set_bit(pos, pmb_map);
  147. else
  148. pos = -ENOSPC;
  149. return pos;
  150. }
  151. static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
  152. unsigned long flags, int entry)
  153. {
  154. struct pmb_entry *pmbe;
  155. unsigned long irqflags;
  156. void *ret = NULL;
  157. int pos;
  158. write_lock_irqsave(&pmb_rwlock, irqflags);
  159. if (entry == PMB_NO_ENTRY) {
  160. pos = pmb_alloc_entry();
  161. if (unlikely(pos < 0)) {
  162. ret = ERR_PTR(pos);
  163. goto out;
  164. }
  165. } else {
  166. if (__test_and_set_bit(entry, pmb_map)) {
  167. ret = ERR_PTR(-ENOSPC);
  168. goto out;
  169. }
  170. pos = entry;
  171. }
  172. write_unlock_irqrestore(&pmb_rwlock, irqflags);
  173. pmbe = &pmb_entry_list[pos];
  174. memset(pmbe, 0, sizeof(struct pmb_entry));
  175. spin_lock_init(&pmbe->lock);
  176. pmbe->vpn = vpn;
  177. pmbe->ppn = ppn;
  178. pmbe->flags = flags;
  179. pmbe->entry = pos;
  180. return pmbe;
  181. out:
  182. write_unlock_irqrestore(&pmb_rwlock, irqflags);
  183. return ret;
  184. }
  185. static void pmb_free(struct pmb_entry *pmbe)
  186. {
  187. __clear_bit(pmbe->entry, pmb_map);
  188. pmbe->entry = PMB_NO_ENTRY;
  189. pmbe->link = NULL;
  190. }
  191. /*
  192. * Must be run uncached.
  193. */
  194. static void __set_pmb_entry(struct pmb_entry *pmbe)
  195. {
  196. /* Set V-bit */
  197. __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
  198. __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
  199. }
  200. static void __clear_pmb_entry(struct pmb_entry *pmbe)
  201. {
  202. unsigned long addr, data;
  203. unsigned long addr_val, data_val;
  204. addr = mk_pmb_addr(pmbe->entry);
  205. data = mk_pmb_data(pmbe->entry);
  206. addr_val = __raw_readl(addr);
  207. data_val = __raw_readl(data);
  208. /* Clear V-bit */
  209. writel_uncached(addr_val & ~PMB_V, addr);
  210. writel_uncached(data_val & ~PMB_V, data);
  211. }
  212. static void set_pmb_entry(struct pmb_entry *pmbe)
  213. {
  214. unsigned long flags;
  215. spin_lock_irqsave(&pmbe->lock, flags);
  216. __set_pmb_entry(pmbe);
  217. spin_unlock_irqrestore(&pmbe->lock, flags);
  218. }
  219. int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
  220. unsigned long size, pgprot_t prot)
  221. {
  222. struct pmb_entry *pmbp, *pmbe;
  223. unsigned long pmb_flags;
  224. int i, mapped;
  225. if (!pmb_addr_valid(vaddr, size))
  226. return -EFAULT;
  227. pmb_flags = pgprot_to_pmb_flags(prot);
  228. pmbp = NULL;
  229. again:
  230. for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
  231. unsigned long flags;
  232. if (size < pmb_sizes[i].size)
  233. continue;
  234. pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
  235. PMB_NO_ENTRY);
  236. if (IS_ERR(pmbe)) {
  237. pmb_unmap_entry(pmbp, mapped);
  238. return PTR_ERR(pmbe);
  239. }
  240. spin_lock_irqsave(&pmbe->lock, flags);
  241. pmbe->size = pmb_sizes[i].size;
  242. __set_pmb_entry(pmbe);
  243. phys += pmbe->size;
  244. vaddr += pmbe->size;
  245. size -= pmbe->size;
  246. /*
  247. * Link adjacent entries that span multiple PMB entries
  248. * for easier tear-down.
  249. */
  250. if (likely(pmbp)) {
  251. spin_lock(&pmbp->lock);
  252. pmbp->link = pmbe;
  253. spin_unlock(&pmbp->lock);
  254. }
  255. pmbp = pmbe;
  256. /*
  257. * Instead of trying smaller sizes on every iteration
  258. * (even if we succeed in allocating space), try using
  259. * pmb_sizes[i].size again.
  260. */
  261. i--;
  262. mapped++;
  263. spin_unlock_irqrestore(&pmbe->lock, flags);
  264. }
  265. if (size >= SZ_16M)
  266. goto again;
  267. return 0;
  268. }
  269. void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
  270. pgprot_t prot, void *caller)
  271. {
  272. unsigned long orig_addr, vaddr;
  273. phys_addr_t offset, last_addr;
  274. phys_addr_t align_mask;
  275. unsigned long aligned;
  276. struct vm_struct *area;
  277. int i, ret;
  278. if (!pmb_iomapping_enabled)
  279. return NULL;
  280. /*
  281. * Small mappings need to go through the TLB.
  282. */
  283. if (size < SZ_16M)
  284. return ERR_PTR(-EINVAL);
  285. if (!pmb_prot_valid(prot))
  286. return ERR_PTR(-EINVAL);
  287. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  288. if (size >= pmb_sizes[i].size)
  289. break;
  290. last_addr = phys + size;
  291. align_mask = ~(pmb_sizes[i].size - 1);
  292. offset = phys & ~align_mask;
  293. phys &= align_mask;
  294. aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
  295. area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
  296. P3SEG, caller);
  297. if (!area)
  298. return NULL;
  299. area->phys_addr = phys;
  300. orig_addr = vaddr = (unsigned long)area->addr;
  301. ret = pmb_bolt_mapping(vaddr, phys, size, prot);
  302. if (ret != 0)
  303. return ERR_PTR(ret);
  304. return (void __iomem *)(offset + (char *)orig_addr);
  305. }
  306. int pmb_unmap(void __iomem *addr)
  307. {
  308. struct pmb_entry *pmbe = NULL;
  309. unsigned long vaddr = (unsigned long __force)addr;
  310. int i, found = 0;
  311. read_lock(&pmb_rwlock);
  312. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  313. if (test_bit(i, pmb_map)) {
  314. pmbe = &pmb_entry_list[i];
  315. if (pmbe->vpn == vaddr) {
  316. found = 1;
  317. break;
  318. }
  319. }
  320. }
  321. read_unlock(&pmb_rwlock);
  322. if (found) {
  323. pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
  324. return 0;
  325. }
  326. return -EINVAL;
  327. }
  328. static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
  329. {
  330. do {
  331. struct pmb_entry *pmblink = pmbe;
  332. /*
  333. * We may be called before this pmb_entry has been
  334. * entered into the PMB table via set_pmb_entry(), but
  335. * that's OK because we've allocated a unique slot for
  336. * this entry in pmb_alloc() (even if we haven't filled
  337. * it yet).
  338. *
  339. * Therefore, calling __clear_pmb_entry() is safe as no
  340. * other mapping can be using that slot.
  341. */
  342. __clear_pmb_entry(pmbe);
  343. pmbe = pmblink->link;
  344. pmb_free(pmblink);
  345. } while (pmbe && --depth);
  346. }
  347. static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
  348. {
  349. unsigned long flags;
  350. if (unlikely(!pmbe))
  351. return;
  352. write_lock_irqsave(&pmb_rwlock, flags);
  353. __pmb_unmap_entry(pmbe, depth);
  354. write_unlock_irqrestore(&pmb_rwlock, flags);
  355. }
  356. static void __init pmb_notify(void)
  357. {
  358. int i;
  359. pr_info("PMB: boot mappings:\n");
  360. read_lock(&pmb_rwlock);
  361. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  362. struct pmb_entry *pmbe;
  363. if (!test_bit(i, pmb_map))
  364. continue;
  365. pmbe = &pmb_entry_list[i];
  366. pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
  367. pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
  368. pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
  369. }
  370. read_unlock(&pmb_rwlock);
  371. }
  372. /*
  373. * Sync our software copy of the PMB mappings with those in hardware. The
  374. * mappings in the hardware PMB were either set up by the bootloader or
  375. * very early on by the kernel.
  376. */
  377. static void __init pmb_synchronize(void)
  378. {
  379. struct pmb_entry *pmbp = NULL;
  380. int i, j;
  381. /*
  382. * Run through the initial boot mappings, log the established
  383. * ones, and blow away anything that falls outside of the valid
  384. * PPN range. Specifically, we only care about existing mappings
  385. * that impact the cached/uncached sections.
  386. *
  387. * Note that touching these can be a bit of a minefield; the boot
  388. * loader can establish multi-page mappings with the same caching
  389. * attributes, so we need to ensure that we aren't modifying a
  390. * mapping that we're presently executing from, or may execute
  391. * from in the case of straddling page boundaries.
  392. *
  393. * In the future we will have to tidy up after the boot loader by
  394. * jumping between the cached and uncached mappings and tearing
  395. * down alternating mappings while executing from the other.
  396. */
  397. for (i = 0; i < NR_PMB_ENTRIES; i++) {
  398. unsigned long addr, data;
  399. unsigned long addr_val, data_val;
  400. unsigned long ppn, vpn, flags;
  401. unsigned long irqflags;
  402. unsigned int size;
  403. struct pmb_entry *pmbe;
  404. addr = mk_pmb_addr(i);
  405. data = mk_pmb_data(i);
  406. addr_val = __raw_readl(addr);
  407. data_val = __raw_readl(data);
  408. /*
  409. * Skip over any bogus entries
  410. */
  411. if (!(data_val & PMB_V) || !(addr_val & PMB_V))
  412. continue;
  413. ppn = data_val & PMB_PFN_MASK;
  414. vpn = addr_val & PMB_PFN_MASK;
  415. /*
  416. * Only preserve in-range mappings.
  417. */
  418. if (!pmb_ppn_in_range(ppn)) {
  419. /*
  420. * Invalidate anything out of bounds.
  421. */
  422. writel_uncached(addr_val & ~PMB_V, addr);
  423. writel_uncached(data_val & ~PMB_V, data);
  424. continue;
  425. }
  426. /*
  427. * Update the caching attributes if necessary
  428. */
  429. if (data_val & PMB_C) {
  430. data_val &= ~PMB_CACHE_MASK;
  431. data_val |= pmb_cache_flags();
  432. writel_uncached(data_val, data);
  433. }
  434. size = data_val & PMB_SZ_MASK;
  435. flags = size | (data_val & PMB_CACHE_MASK);
  436. pmbe = pmb_alloc(vpn, ppn, flags, i);
  437. if (IS_ERR(pmbe)) {
  438. WARN_ON_ONCE(1);
  439. continue;
  440. }
  441. spin_lock_irqsave(&pmbe->lock, irqflags);
  442. for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
  443. if (pmb_sizes[j].flag == size)
  444. pmbe->size = pmb_sizes[j].size;
  445. if (pmbp) {
  446. spin_lock(&pmbp->lock);
  447. /*
  448. * Compare the previous entry against the current one to
  449. * see if the entries span a contiguous mapping. If so,
  450. * setup the entry links accordingly. Compound mappings
  451. * are later coalesced.
  452. */
  453. if (pmb_can_merge(pmbp, pmbe))
  454. pmbp->link = pmbe;
  455. spin_unlock(&pmbp->lock);
  456. }
  457. pmbp = pmbe;
  458. spin_unlock_irqrestore(&pmbe->lock, irqflags);
  459. }
  460. }
  461. static void __init pmb_merge(struct pmb_entry *head)
  462. {
  463. unsigned long span, newsize;
  464. struct pmb_entry *tail;
  465. int i = 1, depth = 0;
  466. span = newsize = head->size;
  467. tail = head->link;
  468. while (tail) {
  469. span += tail->size;
  470. if (pmb_size_valid(span)) {
  471. newsize = span;
  472. depth = i;
  473. }
  474. /* This is the end of the line.. */
  475. if (!tail->link)
  476. break;
  477. tail = tail->link;
  478. i++;
  479. }
  480. /*
  481. * The merged page size must be valid.
  482. */
  483. if (!pmb_size_valid(newsize))
  484. return;
  485. head->flags &= ~PMB_SZ_MASK;
  486. head->flags |= pmb_size_to_flags(newsize);
  487. head->size = newsize;
  488. __pmb_unmap_entry(head->link, depth);
  489. __set_pmb_entry(head);
  490. }
  491. static void __init pmb_coalesce(void)
  492. {
  493. unsigned long flags;
  494. int i;
  495. write_lock_irqsave(&pmb_rwlock, flags);
  496. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  497. struct pmb_entry *pmbe;
  498. if (!test_bit(i, pmb_map))
  499. continue;
  500. pmbe = &pmb_entry_list[i];
  501. /*
  502. * We're only interested in compound mappings
  503. */
  504. if (!pmbe->link)
  505. continue;
  506. /*
  507. * Nothing to do if it already uses the largest possible
  508. * page size.
  509. */
  510. if (pmbe->size == SZ_512M)
  511. continue;
  512. pmb_merge(pmbe);
  513. }
  514. write_unlock_irqrestore(&pmb_rwlock, flags);
  515. }
  516. #ifdef CONFIG_UNCACHED_MAPPING
  517. static void __init pmb_resize(void)
  518. {
  519. int i;
  520. /*
  521. * If the uncached mapping was constructed by the kernel, it will
  522. * already be a reasonable size.
  523. */
  524. if (uncached_size == SZ_16M)
  525. return;
  526. read_lock(&pmb_rwlock);
  527. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  528. struct pmb_entry *pmbe;
  529. unsigned long flags;
  530. if (!test_bit(i, pmb_map))
  531. continue;
  532. pmbe = &pmb_entry_list[i];
  533. if (pmbe->vpn != uncached_start)
  534. continue;
  535. /*
  536. * Found it, now resize it.
  537. */
  538. spin_lock_irqsave(&pmbe->lock, flags);
  539. pmbe->size = SZ_16M;
  540. pmbe->flags &= ~PMB_SZ_MASK;
  541. pmbe->flags |= pmb_size_to_flags(pmbe->size);
  542. uncached_resize(pmbe->size);
  543. __set_pmb_entry(pmbe);
  544. spin_unlock_irqrestore(&pmbe->lock, flags);
  545. }
  546. read_lock(&pmb_rwlock);
  547. }
  548. #endif
  549. static int __init early_pmb(char *p)
  550. {
  551. if (!p)
  552. return 0;
  553. if (strstr(p, "iomap"))
  554. pmb_iomapping_enabled = 1;
  555. return 0;
  556. }
  557. early_param("pmb", early_pmb);
  558. void __init pmb_init(void)
  559. {
  560. /* Synchronize software state */
  561. pmb_synchronize();
  562. /* Attempt to combine compound mappings */
  563. pmb_coalesce();
  564. #ifdef CONFIG_UNCACHED_MAPPING
  565. /* Resize initial mappings, if necessary */
  566. pmb_resize();
  567. #endif
  568. /* Log them */
  569. pmb_notify();
  570. writel_uncached(0, PMB_IRMCR);
  571. /* Flush out the TLB */
  572. __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
  573. ctrl_barrier();
  574. }
  575. bool __in_29bit_mode(void)
  576. {
  577. return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
  578. }
  579. static int pmb_seq_show(struct seq_file *file, void *iter)
  580. {
  581. int i;
  582. seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
  583. "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
  584. seq_printf(file, "ety vpn ppn size flags\n");
  585. for (i = 0; i < NR_PMB_ENTRIES; i++) {
  586. unsigned long addr, data;
  587. unsigned int size;
  588. char *sz_str = NULL;
  589. addr = __raw_readl(mk_pmb_addr(i));
  590. data = __raw_readl(mk_pmb_data(i));
  591. size = data & PMB_SZ_MASK;
  592. sz_str = (size == PMB_SZ_16M) ? " 16MB":
  593. (size == PMB_SZ_64M) ? " 64MB":
  594. (size == PMB_SZ_128M) ? "128MB":
  595. "512MB";
  596. /* 02: V 0x88 0x08 128MB C CB B */
  597. seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
  598. i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
  599. (addr >> 24) & 0xff, (data >> 24) & 0xff,
  600. sz_str, (data & PMB_C) ? 'C' : ' ',
  601. (data & PMB_WT) ? "WT" : "CB",
  602. (data & PMB_UB) ? "UB" : " B");
  603. }
  604. return 0;
  605. }
  606. static int pmb_debugfs_open(struct inode *inode, struct file *file)
  607. {
  608. return single_open(file, pmb_seq_show, NULL);
  609. }
  610. static const struct file_operations pmb_debugfs_fops = {
  611. .owner = THIS_MODULE,
  612. .open = pmb_debugfs_open,
  613. .read = seq_read,
  614. .llseek = seq_lseek,
  615. .release = single_release,
  616. };
  617. static int __init pmb_debugfs_init(void)
  618. {
  619. struct dentry *dentry;
  620. dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
  621. sh_debugfs_root, NULL, &pmb_debugfs_fops);
  622. if (!dentry)
  623. return -ENOMEM;
  624. if (IS_ERR(dentry))
  625. return PTR_ERR(dentry);
  626. return 0;
  627. }
  628. postcore_initcall(pmb_debugfs_init);
  629. #ifdef CONFIG_PM
  630. static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
  631. {
  632. static pm_message_t prev_state;
  633. int i;
  634. /* Restore the PMB after a resume from hibernation */
  635. if (state.event == PM_EVENT_ON &&
  636. prev_state.event == PM_EVENT_FREEZE) {
  637. struct pmb_entry *pmbe;
  638. read_lock(&pmb_rwlock);
  639. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  640. if (test_bit(i, pmb_map)) {
  641. pmbe = &pmb_entry_list[i];
  642. set_pmb_entry(pmbe);
  643. }
  644. }
  645. read_unlock(&pmb_rwlock);
  646. }
  647. prev_state = state;
  648. return 0;
  649. }
  650. static int pmb_sysdev_resume(struct sys_device *dev)
  651. {
  652. return pmb_sysdev_suspend(dev, PMSG_ON);
  653. }
  654. static struct sysdev_driver pmb_sysdev_driver = {
  655. .suspend = pmb_sysdev_suspend,
  656. .resume = pmb_sysdev_resume,
  657. };
  658. static int __init pmb_sysdev_init(void)
  659. {
  660. return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
  661. }
  662. subsys_initcall(pmb_sysdev_init);
  663. #endif