pmb.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921
  1. /*
  2. * arch/sh/mm/pmb.c
  3. *
  4. * Privileged Space Mapping Buffer (PMB) Support.
  5. *
  6. * Copyright (C) 2005 - 2010 Paul Mundt
  7. * Copyright (C) 2010 Matt Fleming
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sysdev.h>
  16. #include <linux/cpu.h>
  17. #include <linux/module.h>
  18. #include <linux/slab.h>
  19. #include <linux/bitops.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/fs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/err.h>
  24. #include <linux/io.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/vmalloc.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/sizes.h>
  29. #include <asm/system.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/pgtable.h>
  32. #include <asm/page.h>
  33. #include <asm/mmu.h>
  34. #include <asm/mmu_context.h>
  35. struct pmb_entry;
  36. struct pmb_entry {
  37. unsigned long vpn;
  38. unsigned long ppn;
  39. unsigned long flags;
  40. unsigned long size;
  41. spinlock_t lock;
  42. /*
  43. * 0 .. NR_PMB_ENTRIES for specific entry selection, or
  44. * PMB_NO_ENTRY to search for a free one
  45. */
  46. int entry;
  47. /* Adjacent entry link for contiguous multi-entry mappings */
  48. struct pmb_entry *link;
  49. };
  50. static struct {
  51. unsigned long size;
  52. int flag;
  53. } pmb_sizes[] = {
  54. { .size = SZ_512M, .flag = PMB_SZ_512M, },
  55. { .size = SZ_128M, .flag = PMB_SZ_128M, },
  56. { .size = SZ_64M, .flag = PMB_SZ_64M, },
  57. { .size = SZ_16M, .flag = PMB_SZ_16M, },
  58. };
  59. static void pmb_unmap_entry(struct pmb_entry *, int depth);
  60. static DEFINE_RWLOCK(pmb_rwlock);
  61. static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
  62. static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
  63. static unsigned int pmb_iomapping_enabled;
  64. static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
  65. {
  66. return (entry & PMB_E_MASK) << PMB_E_SHIFT;
  67. }
  68. static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
  69. {
  70. return mk_pmb_entry(entry) | PMB_ADDR;
  71. }
  72. static __always_inline unsigned long mk_pmb_data(unsigned int entry)
  73. {
  74. return mk_pmb_entry(entry) | PMB_DATA;
  75. }
  76. static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
  77. {
  78. return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
  79. }
  80. /*
  81. * Ensure that the PMB entries match our cache configuration.
  82. *
  83. * When we are in 32-bit address extended mode, CCR.CB becomes
  84. * invalid, so care must be taken to manually adjust cacheable
  85. * translations.
  86. */
  87. static __always_inline unsigned long pmb_cache_flags(void)
  88. {
  89. unsigned long flags = 0;
  90. #if defined(CONFIG_CACHE_OFF)
  91. flags |= PMB_WT | PMB_UB;
  92. #elif defined(CONFIG_CACHE_WRITETHROUGH)
  93. flags |= PMB_C | PMB_WT | PMB_UB;
  94. #elif defined(CONFIG_CACHE_WRITEBACK)
  95. flags |= PMB_C;
  96. #endif
  97. return flags;
  98. }
  99. /*
  100. * Convert typical pgprot value to the PMB equivalent
  101. */
  102. static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
  103. {
  104. unsigned long pmb_flags = 0;
  105. u64 flags = pgprot_val(prot);
  106. if (flags & _PAGE_CACHABLE)
  107. pmb_flags |= PMB_C;
  108. if (flags & _PAGE_WT)
  109. pmb_flags |= PMB_WT | PMB_UB;
  110. return pmb_flags;
  111. }
  112. static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
  113. {
  114. return (b->vpn == (a->vpn + a->size)) &&
  115. (b->ppn == (a->ppn + a->size)) &&
  116. (b->flags == a->flags);
  117. }
  118. static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
  119. unsigned long size)
  120. {
  121. int i;
  122. read_lock(&pmb_rwlock);
  123. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  124. struct pmb_entry *pmbe, *iter;
  125. unsigned long span;
  126. if (!test_bit(i, pmb_map))
  127. continue;
  128. pmbe = &pmb_entry_list[i];
  129. /*
  130. * See if VPN and PPN are bounded by an existing mapping.
  131. */
  132. if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
  133. continue;
  134. if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
  135. continue;
  136. /*
  137. * Now see if we're in range of a simple mapping.
  138. */
  139. if (size <= pmbe->size) {
  140. read_unlock(&pmb_rwlock);
  141. return true;
  142. }
  143. span = pmbe->size;
  144. /*
  145. * Finally for sizes that involve compound mappings, walk
  146. * the chain.
  147. */
  148. for (iter = pmbe->link; iter; iter = iter->link)
  149. span += iter->size;
  150. /*
  151. * Nothing else to do if the range requirements are met.
  152. */
  153. if (size <= span) {
  154. read_unlock(&pmb_rwlock);
  155. return true;
  156. }
  157. }
  158. read_unlock(&pmb_rwlock);
  159. return false;
  160. }
  161. static bool pmb_size_valid(unsigned long size)
  162. {
  163. int i;
  164. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  165. if (pmb_sizes[i].size == size)
  166. return true;
  167. return false;
  168. }
  169. static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
  170. {
  171. return (addr >= P1SEG && (addr + size - 1) < P3SEG);
  172. }
  173. static inline bool pmb_prot_valid(pgprot_t prot)
  174. {
  175. return (pgprot_val(prot) & _PAGE_USER) == 0;
  176. }
  177. static int pmb_size_to_flags(unsigned long size)
  178. {
  179. int i;
  180. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  181. if (pmb_sizes[i].size == size)
  182. return pmb_sizes[i].flag;
  183. return 0;
  184. }
  185. static int pmb_alloc_entry(void)
  186. {
  187. int pos;
  188. pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
  189. if (pos >= 0 && pos < NR_PMB_ENTRIES)
  190. __set_bit(pos, pmb_map);
  191. else
  192. pos = -ENOSPC;
  193. return pos;
  194. }
  195. static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
  196. unsigned long flags, int entry)
  197. {
  198. struct pmb_entry *pmbe;
  199. unsigned long irqflags;
  200. void *ret = NULL;
  201. int pos;
  202. write_lock_irqsave(&pmb_rwlock, irqflags);
  203. if (entry == PMB_NO_ENTRY) {
  204. pos = pmb_alloc_entry();
  205. if (unlikely(pos < 0)) {
  206. ret = ERR_PTR(pos);
  207. goto out;
  208. }
  209. } else {
  210. if (__test_and_set_bit(entry, pmb_map)) {
  211. ret = ERR_PTR(-ENOSPC);
  212. goto out;
  213. }
  214. pos = entry;
  215. }
  216. write_unlock_irqrestore(&pmb_rwlock, irqflags);
  217. pmbe = &pmb_entry_list[pos];
  218. memset(pmbe, 0, sizeof(struct pmb_entry));
  219. spin_lock_init(&pmbe->lock);
  220. pmbe->vpn = vpn;
  221. pmbe->ppn = ppn;
  222. pmbe->flags = flags;
  223. pmbe->entry = pos;
  224. return pmbe;
  225. out:
  226. write_unlock_irqrestore(&pmb_rwlock, irqflags);
  227. return ret;
  228. }
  229. static void pmb_free(struct pmb_entry *pmbe)
  230. {
  231. __clear_bit(pmbe->entry, pmb_map);
  232. pmbe->entry = PMB_NO_ENTRY;
  233. pmbe->link = NULL;
  234. }
  235. /*
  236. * Must be run uncached.
  237. */
  238. static void __set_pmb_entry(struct pmb_entry *pmbe)
  239. {
  240. unsigned long addr, data;
  241. addr = mk_pmb_addr(pmbe->entry);
  242. data = mk_pmb_data(pmbe->entry);
  243. jump_to_uncached();
  244. /* Set V-bit */
  245. __raw_writel(pmbe->vpn | PMB_V, addr);
  246. __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
  247. back_to_cached();
  248. }
  249. static void __clear_pmb_entry(struct pmb_entry *pmbe)
  250. {
  251. unsigned long addr, data;
  252. unsigned long addr_val, data_val;
  253. addr = mk_pmb_addr(pmbe->entry);
  254. data = mk_pmb_data(pmbe->entry);
  255. addr_val = __raw_readl(addr);
  256. data_val = __raw_readl(data);
  257. /* Clear V-bit */
  258. writel_uncached(addr_val & ~PMB_V, addr);
  259. writel_uncached(data_val & ~PMB_V, data);
  260. }
  261. #ifdef CONFIG_PM
  262. static void set_pmb_entry(struct pmb_entry *pmbe)
  263. {
  264. unsigned long flags;
  265. spin_lock_irqsave(&pmbe->lock, flags);
  266. __set_pmb_entry(pmbe);
  267. spin_unlock_irqrestore(&pmbe->lock, flags);
  268. }
  269. #endif /* CONFIG_PM */
  270. int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
  271. unsigned long size, pgprot_t prot)
  272. {
  273. struct pmb_entry *pmbp, *pmbe;
  274. unsigned long orig_addr, orig_size;
  275. unsigned long flags, pmb_flags;
  276. int i, mapped;
  277. if (!pmb_addr_valid(vaddr, size))
  278. return -EFAULT;
  279. if (pmb_mapping_exists(vaddr, phys, size))
  280. return 0;
  281. orig_addr = vaddr;
  282. orig_size = size;
  283. flush_tlb_kernel_range(vaddr, vaddr + size);
  284. pmb_flags = pgprot_to_pmb_flags(prot);
  285. pmbp = NULL;
  286. do {
  287. for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
  288. if (size < pmb_sizes[i].size)
  289. continue;
  290. pmbe = pmb_alloc(vaddr, phys, pmb_flags |
  291. pmb_sizes[i].flag, PMB_NO_ENTRY);
  292. if (IS_ERR(pmbe)) {
  293. pmb_unmap_entry(pmbp, mapped);
  294. return PTR_ERR(pmbe);
  295. }
  296. spin_lock_irqsave(&pmbe->lock, flags);
  297. pmbe->size = pmb_sizes[i].size;
  298. __set_pmb_entry(pmbe);
  299. phys += pmbe->size;
  300. vaddr += pmbe->size;
  301. size -= pmbe->size;
  302. /*
  303. * Link adjacent entries that span multiple PMB
  304. * entries for easier tear-down.
  305. */
  306. if (likely(pmbp)) {
  307. spin_lock(&pmbp->lock);
  308. pmbp->link = pmbe;
  309. spin_unlock(&pmbp->lock);
  310. }
  311. pmbp = pmbe;
  312. /*
  313. * Instead of trying smaller sizes on every
  314. * iteration (even if we succeed in allocating
  315. * space), try using pmb_sizes[i].size again.
  316. */
  317. i--;
  318. mapped++;
  319. spin_unlock_irqrestore(&pmbe->lock, flags);
  320. }
  321. } while (size >= SZ_16M);
  322. flush_cache_vmap(orig_addr, orig_addr + orig_size);
  323. return 0;
  324. }
  325. void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
  326. pgprot_t prot, void *caller)
  327. {
  328. unsigned long vaddr;
  329. phys_addr_t offset, last_addr;
  330. phys_addr_t align_mask;
  331. unsigned long aligned;
  332. struct vm_struct *area;
  333. int i, ret;
  334. if (!pmb_iomapping_enabled)
  335. return NULL;
  336. /*
  337. * Small mappings need to go through the TLB.
  338. */
  339. if (size < SZ_16M)
  340. return ERR_PTR(-EINVAL);
  341. if (!pmb_prot_valid(prot))
  342. return ERR_PTR(-EINVAL);
  343. for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  344. if (size >= pmb_sizes[i].size)
  345. break;
  346. last_addr = phys + size;
  347. align_mask = ~(pmb_sizes[i].size - 1);
  348. offset = phys & ~align_mask;
  349. phys &= align_mask;
  350. aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
  351. /*
  352. * XXX: This should really start from uncached_end, but this
  353. * causes the MMU to reset, so for now we restrict it to the
  354. * 0xb000...0xc000 range.
  355. */
  356. area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
  357. P3SEG, caller);
  358. if (!area)
  359. return NULL;
  360. area->phys_addr = phys;
  361. vaddr = (unsigned long)area->addr;
  362. ret = pmb_bolt_mapping(vaddr, phys, size, prot);
  363. if (unlikely(ret != 0))
  364. return ERR_PTR(ret);
  365. return (void __iomem *)(offset + (char *)vaddr);
  366. }
  367. int pmb_unmap(void __iomem *addr)
  368. {
  369. struct pmb_entry *pmbe = NULL;
  370. unsigned long vaddr = (unsigned long __force)addr;
  371. int i, found = 0;
  372. read_lock(&pmb_rwlock);
  373. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  374. if (test_bit(i, pmb_map)) {
  375. pmbe = &pmb_entry_list[i];
  376. if (pmbe->vpn == vaddr) {
  377. found = 1;
  378. break;
  379. }
  380. }
  381. }
  382. read_unlock(&pmb_rwlock);
  383. if (found) {
  384. pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
  385. return 0;
  386. }
  387. return -EINVAL;
  388. }
  389. static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
  390. {
  391. do {
  392. struct pmb_entry *pmblink = pmbe;
  393. /*
  394. * We may be called before this pmb_entry has been
  395. * entered into the PMB table via set_pmb_entry(), but
  396. * that's OK because we've allocated a unique slot for
  397. * this entry in pmb_alloc() (even if we haven't filled
  398. * it yet).
  399. *
  400. * Therefore, calling __clear_pmb_entry() is safe as no
  401. * other mapping can be using that slot.
  402. */
  403. __clear_pmb_entry(pmbe);
  404. flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
  405. pmbe = pmblink->link;
  406. pmb_free(pmblink);
  407. } while (pmbe && --depth);
  408. }
  409. static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
  410. {
  411. unsigned long flags;
  412. if (unlikely(!pmbe))
  413. return;
  414. write_lock_irqsave(&pmb_rwlock, flags);
  415. __pmb_unmap_entry(pmbe, depth);
  416. write_unlock_irqrestore(&pmb_rwlock, flags);
  417. }
  418. static void __init pmb_notify(void)
  419. {
  420. int i;
  421. pr_info("PMB: boot mappings:\n");
  422. read_lock(&pmb_rwlock);
  423. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  424. struct pmb_entry *pmbe;
  425. if (!test_bit(i, pmb_map))
  426. continue;
  427. pmbe = &pmb_entry_list[i];
  428. pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
  429. pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
  430. pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
  431. }
  432. read_unlock(&pmb_rwlock);
  433. }
  434. /*
  435. * Sync our software copy of the PMB mappings with those in hardware. The
  436. * mappings in the hardware PMB were either set up by the bootloader or
  437. * very early on by the kernel.
  438. */
  439. static void __init pmb_synchronize(void)
  440. {
  441. struct pmb_entry *pmbp = NULL;
  442. int i, j;
  443. /*
  444. * Run through the initial boot mappings, log the established
  445. * ones, and blow away anything that falls outside of the valid
  446. * PPN range. Specifically, we only care about existing mappings
  447. * that impact the cached/uncached sections.
  448. *
  449. * Note that touching these can be a bit of a minefield; the boot
  450. * loader can establish multi-page mappings with the same caching
  451. * attributes, so we need to ensure that we aren't modifying a
  452. * mapping that we're presently executing from, or may execute
  453. * from in the case of straddling page boundaries.
  454. *
  455. * In the future we will have to tidy up after the boot loader by
  456. * jumping between the cached and uncached mappings and tearing
  457. * down alternating mappings while executing from the other.
  458. */
  459. for (i = 0; i < NR_PMB_ENTRIES; i++) {
  460. unsigned long addr, data;
  461. unsigned long addr_val, data_val;
  462. unsigned long ppn, vpn, flags;
  463. unsigned long irqflags;
  464. unsigned int size;
  465. struct pmb_entry *pmbe;
  466. addr = mk_pmb_addr(i);
  467. data = mk_pmb_data(i);
  468. addr_val = __raw_readl(addr);
  469. data_val = __raw_readl(data);
  470. /*
  471. * Skip over any bogus entries
  472. */
  473. if (!(data_val & PMB_V) || !(addr_val & PMB_V))
  474. continue;
  475. ppn = data_val & PMB_PFN_MASK;
  476. vpn = addr_val & PMB_PFN_MASK;
  477. /*
  478. * Only preserve in-range mappings.
  479. */
  480. if (!pmb_ppn_in_range(ppn)) {
  481. /*
  482. * Invalidate anything out of bounds.
  483. */
  484. writel_uncached(addr_val & ~PMB_V, addr);
  485. writel_uncached(data_val & ~PMB_V, data);
  486. continue;
  487. }
  488. /*
  489. * Update the caching attributes if necessary
  490. */
  491. if (data_val & PMB_C) {
  492. data_val &= ~PMB_CACHE_MASK;
  493. data_val |= pmb_cache_flags();
  494. writel_uncached(data_val, data);
  495. }
  496. size = data_val & PMB_SZ_MASK;
  497. flags = size | (data_val & PMB_CACHE_MASK);
  498. pmbe = pmb_alloc(vpn, ppn, flags, i);
  499. if (IS_ERR(pmbe)) {
  500. WARN_ON_ONCE(1);
  501. continue;
  502. }
  503. spin_lock_irqsave(&pmbe->lock, irqflags);
  504. for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
  505. if (pmb_sizes[j].flag == size)
  506. pmbe->size = pmb_sizes[j].size;
  507. if (pmbp) {
  508. spin_lock(&pmbp->lock);
  509. /*
  510. * Compare the previous entry against the current one to
  511. * see if the entries span a contiguous mapping. If so,
  512. * setup the entry links accordingly. Compound mappings
  513. * are later coalesced.
  514. */
  515. if (pmb_can_merge(pmbp, pmbe))
  516. pmbp->link = pmbe;
  517. spin_unlock(&pmbp->lock);
  518. }
  519. pmbp = pmbe;
  520. spin_unlock_irqrestore(&pmbe->lock, irqflags);
  521. }
  522. }
  523. static void __init pmb_merge(struct pmb_entry *head)
  524. {
  525. unsigned long span, newsize;
  526. struct pmb_entry *tail;
  527. int i = 1, depth = 0;
  528. span = newsize = head->size;
  529. tail = head->link;
  530. while (tail) {
  531. span += tail->size;
  532. if (pmb_size_valid(span)) {
  533. newsize = span;
  534. depth = i;
  535. }
  536. /* This is the end of the line.. */
  537. if (!tail->link)
  538. break;
  539. tail = tail->link;
  540. i++;
  541. }
  542. /*
  543. * The merged page size must be valid.
  544. */
  545. if (!pmb_size_valid(newsize))
  546. return;
  547. head->flags &= ~PMB_SZ_MASK;
  548. head->flags |= pmb_size_to_flags(newsize);
  549. head->size = newsize;
  550. __pmb_unmap_entry(head->link, depth);
  551. __set_pmb_entry(head);
  552. }
  553. static void __init pmb_coalesce(void)
  554. {
  555. unsigned long flags;
  556. int i;
  557. write_lock_irqsave(&pmb_rwlock, flags);
  558. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  559. struct pmb_entry *pmbe;
  560. if (!test_bit(i, pmb_map))
  561. continue;
  562. pmbe = &pmb_entry_list[i];
  563. /*
  564. * We're only interested in compound mappings
  565. */
  566. if (!pmbe->link)
  567. continue;
  568. /*
  569. * Nothing to do if it already uses the largest possible
  570. * page size.
  571. */
  572. if (pmbe->size == SZ_512M)
  573. continue;
  574. pmb_merge(pmbe);
  575. }
  576. write_unlock_irqrestore(&pmb_rwlock, flags);
  577. }
  578. #ifdef CONFIG_UNCACHED_MAPPING
  579. static void __init pmb_resize(void)
  580. {
  581. int i;
  582. /*
  583. * If the uncached mapping was constructed by the kernel, it will
  584. * already be a reasonable size.
  585. */
  586. if (uncached_size == SZ_16M)
  587. return;
  588. read_lock(&pmb_rwlock);
  589. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  590. struct pmb_entry *pmbe;
  591. unsigned long flags;
  592. if (!test_bit(i, pmb_map))
  593. continue;
  594. pmbe = &pmb_entry_list[i];
  595. if (pmbe->vpn != uncached_start)
  596. continue;
  597. /*
  598. * Found it, now resize it.
  599. */
  600. spin_lock_irqsave(&pmbe->lock, flags);
  601. pmbe->size = SZ_16M;
  602. pmbe->flags &= ~PMB_SZ_MASK;
  603. pmbe->flags |= pmb_size_to_flags(pmbe->size);
  604. uncached_resize(pmbe->size);
  605. __set_pmb_entry(pmbe);
  606. spin_unlock_irqrestore(&pmbe->lock, flags);
  607. }
  608. read_lock(&pmb_rwlock);
  609. }
  610. #endif
  611. static int __init early_pmb(char *p)
  612. {
  613. if (!p)
  614. return 0;
  615. if (strstr(p, "iomap"))
  616. pmb_iomapping_enabled = 1;
  617. return 0;
  618. }
  619. early_param("pmb", early_pmb);
  620. void __init pmb_init(void)
  621. {
  622. /* Synchronize software state */
  623. pmb_synchronize();
  624. /* Attempt to combine compound mappings */
  625. pmb_coalesce();
  626. #ifdef CONFIG_UNCACHED_MAPPING
  627. /* Resize initial mappings, if necessary */
  628. pmb_resize();
  629. #endif
  630. /* Log them */
  631. pmb_notify();
  632. writel_uncached(0, PMB_IRMCR);
  633. /* Flush out the TLB */
  634. local_flush_tlb_all();
  635. ctrl_barrier();
  636. }
  637. bool __in_29bit_mode(void)
  638. {
  639. return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
  640. }
  641. static int pmb_seq_show(struct seq_file *file, void *iter)
  642. {
  643. int i;
  644. seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
  645. "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
  646. seq_printf(file, "ety vpn ppn size flags\n");
  647. for (i = 0; i < NR_PMB_ENTRIES; i++) {
  648. unsigned long addr, data;
  649. unsigned int size;
  650. char *sz_str = NULL;
  651. addr = __raw_readl(mk_pmb_addr(i));
  652. data = __raw_readl(mk_pmb_data(i));
  653. size = data & PMB_SZ_MASK;
  654. sz_str = (size == PMB_SZ_16M) ? " 16MB":
  655. (size == PMB_SZ_64M) ? " 64MB":
  656. (size == PMB_SZ_128M) ? "128MB":
  657. "512MB";
  658. /* 02: V 0x88 0x08 128MB C CB B */
  659. seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
  660. i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
  661. (addr >> 24) & 0xff, (data >> 24) & 0xff,
  662. sz_str, (data & PMB_C) ? 'C' : ' ',
  663. (data & PMB_WT) ? "WT" : "CB",
  664. (data & PMB_UB) ? "UB" : " B");
  665. }
  666. return 0;
  667. }
  668. static int pmb_debugfs_open(struct inode *inode, struct file *file)
  669. {
  670. return single_open(file, pmb_seq_show, NULL);
  671. }
  672. static const struct file_operations pmb_debugfs_fops = {
  673. .owner = THIS_MODULE,
  674. .open = pmb_debugfs_open,
  675. .read = seq_read,
  676. .llseek = seq_lseek,
  677. .release = single_release,
  678. };
  679. static int __init pmb_debugfs_init(void)
  680. {
  681. struct dentry *dentry;
  682. dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
  683. sh_debugfs_root, NULL, &pmb_debugfs_fops);
  684. if (!dentry)
  685. return -ENOMEM;
  686. if (IS_ERR(dentry))
  687. return PTR_ERR(dentry);
  688. return 0;
  689. }
  690. subsys_initcall(pmb_debugfs_init);
  691. #ifdef CONFIG_PM
  692. static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
  693. {
  694. static pm_message_t prev_state;
  695. int i;
  696. /* Restore the PMB after a resume from hibernation */
  697. if (state.event == PM_EVENT_ON &&
  698. prev_state.event == PM_EVENT_FREEZE) {
  699. struct pmb_entry *pmbe;
  700. read_lock(&pmb_rwlock);
  701. for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  702. if (test_bit(i, pmb_map)) {
  703. pmbe = &pmb_entry_list[i];
  704. set_pmb_entry(pmbe);
  705. }
  706. }
  707. read_unlock(&pmb_rwlock);
  708. }
  709. prev_state = state;
  710. return 0;
  711. }
  712. static int pmb_sysdev_resume(struct sys_device *dev)
  713. {
  714. return pmb_sysdev_suspend(dev, PMSG_ON);
  715. }
  716. static struct sysdev_driver pmb_sysdev_driver = {
  717. .suspend = pmb_sysdev_suspend,
  718. .resume = pmb_sysdev_resume,
  719. };
  720. static int __init pmb_sysdev_init(void)
  721. {
  722. return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
  723. }
  724. subsys_initcall(pmb_sysdev_init);
  725. #endif