|
@@ -21,47 +21,67 @@
|
|
|
#include <linux/fs.h>
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/err.h>
|
|
|
+#include <linux/io.h>
|
|
|
+#include <linux/spinlock.h>
|
|
|
+#include <linux/rwlock.h>
|
|
|
+#include <asm/sizes.h>
|
|
|
#include <asm/system.h>
|
|
|
#include <asm/uaccess.h>
|
|
|
#include <asm/pgtable.h>
|
|
|
+#include <asm/page.h>
|
|
|
#include <asm/mmu.h>
|
|
|
-#include <asm/io.h>
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
-#define NR_PMB_ENTRIES 16
|
|
|
+struct pmb_entry;
|
|
|
|
|
|
-static void __pmb_unmap(struct pmb_entry *);
|
|
|
+struct pmb_entry {
|
|
|
+ unsigned long vpn;
|
|
|
+ unsigned long ppn;
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned long size;
|
|
|
+
|
|
|
+ spinlock_t lock;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * 0 .. NR_PMB_ENTRIES for specific entry selection, or
|
|
|
+ * PMB_NO_ENTRY to search for a free one
|
|
|
+ */
|
|
|
+ int entry;
|
|
|
+
|
|
|
+ /* Adjacent entry link for contiguous multi-entry mappings */
|
|
|
+ struct pmb_entry *link;
|
|
|
+};
|
|
|
+
|
|
|
+static void pmb_unmap_entry(struct pmb_entry *, int depth);
|
|
|
|
|
|
+static DEFINE_RWLOCK(pmb_rwlock);
|
|
|
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
|
|
|
-static unsigned long pmb_map;
|
|
|
+static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
|
|
|
|
|
|
-static inline unsigned long mk_pmb_entry(unsigned int entry)
|
|
|
+static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
|
|
|
{
|
|
|
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
|
|
|
}
|
|
|
|
|
|
-static inline unsigned long mk_pmb_addr(unsigned int entry)
|
|
|
+static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
|
|
|
{
|
|
|
return mk_pmb_entry(entry) | PMB_ADDR;
|
|
|
}
|
|
|
|
|
|
-static inline unsigned long mk_pmb_data(unsigned int entry)
|
|
|
+static __always_inline unsigned long mk_pmb_data(unsigned int entry)
|
|
|
{
|
|
|
return mk_pmb_entry(entry) | PMB_DATA;
|
|
|
}
|
|
|
|
|
|
static int pmb_alloc_entry(void)
|
|
|
{
|
|
|
- unsigned int pos;
|
|
|
-
|
|
|
-repeat:
|
|
|
- pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
|
|
|
-
|
|
|
- if (unlikely(pos > NR_PMB_ENTRIES))
|
|
|
- return -ENOSPC;
|
|
|
+ int pos;
|
|
|
|
|
|
- if (test_and_set_bit(pos, &pmb_map))
|
|
|
- goto repeat;
|
|
|
+ pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
|
|
|
+ if (pos >= 0 && pos < NR_PMB_ENTRIES)
|
|
|
+ __set_bit(pos, pmb_map);
|
|
|
+ else
|
|
|
+ pos = -ENOSPC;
|
|
|
|
|
|
return pos;
|
|
|
}
|
|
@@ -70,21 +90,34 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
|
|
|
unsigned long flags, int entry)
|
|
|
{
|
|
|
struct pmb_entry *pmbe;
|
|
|
+ unsigned long irqflags;
|
|
|
+ void *ret = NULL;
|
|
|
int pos;
|
|
|
|
|
|
+ write_lock_irqsave(&pmb_rwlock, irqflags);
|
|
|
+
|
|
|
if (entry == PMB_NO_ENTRY) {
|
|
|
pos = pmb_alloc_entry();
|
|
|
- if (pos < 0)
|
|
|
- return ERR_PTR(pos);
|
|
|
+ if (unlikely(pos < 0)) {
|
|
|
+ ret = ERR_PTR(pos);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
} else {
|
|
|
- if (test_bit(entry, &pmb_map))
|
|
|
- return ERR_PTR(-ENOSPC);
|
|
|
+ if (__test_and_set_bit(entry, pmb_map)) {
|
|
|
+ ret = ERR_PTR(-ENOSPC);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
pos = entry;
|
|
|
}
|
|
|
|
|
|
+ write_unlock_irqrestore(&pmb_rwlock, irqflags);
|
|
|
+
|
|
|
pmbe = &pmb_entry_list[pos];
|
|
|
- if (!pmbe)
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
+
|
|
|
+ memset(pmbe, 0, sizeof(struct pmb_entry));
|
|
|
+
|
|
|
+ spin_lock_init(&pmbe->lock);
|
|
|
|
|
|
pmbe->vpn = vpn;
|
|
|
pmbe->ppn = ppn;
|
|
@@ -92,101 +125,113 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
|
|
|
pmbe->entry = pos;
|
|
|
|
|
|
return pmbe;
|
|
|
+
|
|
|
+out:
|
|
|
+ write_unlock_irqrestore(&pmb_rwlock, irqflags);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static void pmb_free(struct pmb_entry *pmbe)
|
|
|
{
|
|
|
- int pos = pmbe->entry;
|
|
|
-
|
|
|
- pmbe->vpn = 0;
|
|
|
- pmbe->ppn = 0;
|
|
|
- pmbe->flags = 0;
|
|
|
- pmbe->entry = 0;
|
|
|
+ __clear_bit(pmbe->entry, pmb_map);
|
|
|
|
|
|
- clear_bit(pos, &pmb_map);
|
|
|
+ pmbe->entry = PMB_NO_ENTRY;
|
|
|
+ pmbe->link = NULL;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Must be in P2 for __set_pmb_entry()
|
|
|
+ * Ensure that the PMB entries match our cache configuration.
|
|
|
+ *
|
|
|
+ * When we are in 32-bit address extended mode, CCR.CB becomes
|
|
|
+ * invalid, so care must be taken to manually adjust cacheable
|
|
|
+ * translations.
|
|
|
*/
|
|
|
-static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
|
|
|
- unsigned long flags, int pos)
|
|
|
+static __always_inline unsigned long pmb_cache_flags(void)
|
|
|
{
|
|
|
- __raw_writel(vpn | PMB_V, mk_pmb_addr(pos));
|
|
|
+ unsigned long flags = 0;
|
|
|
|
|
|
-#ifdef CONFIG_CACHE_WRITETHROUGH
|
|
|
- /*
|
|
|
- * When we are in 32-bit address extended mode, CCR.CB becomes
|
|
|
- * invalid, so care must be taken to manually adjust cacheable
|
|
|
- * translations.
|
|
|
- */
|
|
|
- if (likely(flags & PMB_C))
|
|
|
- flags |= PMB_WT;
|
|
|
+#if defined(CONFIG_CACHE_WRITETHROUGH)
|
|
|
+ flags |= PMB_C | PMB_WT | PMB_UB;
|
|
|
+#elif defined(CONFIG_CACHE_WRITEBACK)
|
|
|
+ flags |= PMB_C;
|
|
|
#endif
|
|
|
|
|
|
- __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos));
|
|
|
+ return flags;
|
|
|
}
|
|
|
|
|
|
-static void set_pmb_entry(struct pmb_entry *pmbe)
|
|
|
+/*
|
|
|
+ * Must be run uncached.
|
|
|
+ */
|
|
|
+static void __set_pmb_entry(struct pmb_entry *pmbe)
|
|
|
{
|
|
|
- jump_to_uncached();
|
|
|
- __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
|
|
|
- back_to_cached();
|
|
|
+ writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
|
|
|
+ writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
|
|
|
+ mk_pmb_data(pmbe->entry));
|
|
|
}
|
|
|
|
|
|
-static void clear_pmb_entry(struct pmb_entry *pmbe)
|
|
|
+static void __clear_pmb_entry(struct pmb_entry *pmbe)
|
|
|
{
|
|
|
- unsigned int entry = pmbe->entry;
|
|
|
- unsigned long addr;
|
|
|
+ unsigned long addr, data;
|
|
|
+ unsigned long addr_val, data_val;
|
|
|
|
|
|
- if (unlikely(entry >= NR_PMB_ENTRIES))
|
|
|
- return;
|
|
|
+ addr = mk_pmb_addr(pmbe->entry);
|
|
|
+ data = mk_pmb_data(pmbe->entry);
|
|
|
|
|
|
- jump_to_uncached();
|
|
|
+ addr_val = __raw_readl(addr);
|
|
|
+ data_val = __raw_readl(data);
|
|
|
|
|
|
/* Clear V-bit */
|
|
|
- addr = mk_pmb_addr(entry);
|
|
|
- __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
|
|
|
+ writel_uncached(addr_val & ~PMB_V, addr);
|
|
|
+ writel_uncached(data_val & ~PMB_V, data);
|
|
|
+}
|
|
|
|
|
|
- addr = mk_pmb_data(entry);
|
|
|
- __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
|
|
|
+static void set_pmb_entry(struct pmb_entry *pmbe)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- back_to_cached();
|
|
|
+ spin_lock_irqsave(&pmbe->lock, flags);
|
|
|
+ __set_pmb_entry(pmbe);
|
|
|
+ spin_unlock_irqrestore(&pmbe->lock, flags);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
static struct {
|
|
|
unsigned long size;
|
|
|
int flag;
|
|
|
} pmb_sizes[] = {
|
|
|
- { .size = 0x20000000, .flag = PMB_SZ_512M, },
|
|
|
- { .size = 0x08000000, .flag = PMB_SZ_128M, },
|
|
|
- { .size = 0x04000000, .flag = PMB_SZ_64M, },
|
|
|
- { .size = 0x01000000, .flag = PMB_SZ_16M, },
|
|
|
+ { .size = SZ_512M, .flag = PMB_SZ_512M, },
|
|
|
+ { .size = SZ_128M, .flag = PMB_SZ_128M, },
|
|
|
+ { .size = SZ_64M, .flag = PMB_SZ_64M, },
|
|
|
+ { .size = SZ_16M, .flag = PMB_SZ_16M, },
|
|
|
};
|
|
|
|
|
|
long pmb_remap(unsigned long vaddr, unsigned long phys,
|
|
|
- unsigned long size, unsigned long flags)
|
|
|
+ unsigned long size, pgprot_t prot)
|
|
|
{
|
|
|
struct pmb_entry *pmbp, *pmbe;
|
|
|
unsigned long wanted;
|
|
|
int pmb_flags, i;
|
|
|
long err;
|
|
|
+ u64 flags;
|
|
|
+
|
|
|
+ flags = pgprot_val(prot);
|
|
|
+
|
|
|
+ pmb_flags = PMB_WT | PMB_UB;
|
|
|
|
|
|
/* Convert typical pgprot value to the PMB equivalent */
|
|
|
if (flags & _PAGE_CACHABLE) {
|
|
|
- if (flags & _PAGE_WT)
|
|
|
- pmb_flags = PMB_WT;
|
|
|
- else
|
|
|
- pmb_flags = PMB_C;
|
|
|
- } else
|
|
|
- pmb_flags = PMB_WT | PMB_UB;
|
|
|
+ pmb_flags |= PMB_C;
|
|
|
+
|
|
|
+ if ((flags & _PAGE_WT) == 0)
|
|
|
+ pmb_flags &= ~(PMB_WT | PMB_UB);
|
|
|
+ }
|
|
|
|
|
|
pmbp = NULL;
|
|
|
wanted = size;
|
|
|
|
|
|
again:
|
|
|
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
if (size < pmb_sizes[i].size)
|
|
|
continue;
|
|
|
|
|
@@ -197,18 +242,25 @@ again:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- set_pmb_entry(pmbe);
|
|
|
+ spin_lock_irqsave(&pmbe->lock, flags);
|
|
|
+
|
|
|
+ __set_pmb_entry(pmbe);
|
|
|
|
|
|
phys += pmb_sizes[i].size;
|
|
|
vaddr += pmb_sizes[i].size;
|
|
|
size -= pmb_sizes[i].size;
|
|
|
|
|
|
+ pmbe->size = pmb_sizes[i].size;
|
|
|
+
|
|
|
/*
|
|
|
* Link adjacent entries that span multiple PMB entries
|
|
|
* for easier tear-down.
|
|
|
*/
|
|
|
- if (likely(pmbp))
|
|
|
+ if (likely(pmbp)) {
|
|
|
+ spin_lock(&pmbp->lock);
|
|
|
pmbp->link = pmbe;
|
|
|
+ spin_unlock(&pmbp->lock);
|
|
|
+ }
|
|
|
|
|
|
pmbp = pmbe;
|
|
|
|
|
@@ -218,16 +270,17 @@ again:
|
|
|
* pmb_sizes[i].size again.
|
|
|
*/
|
|
|
i--;
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&pmbe->lock, flags);
|
|
|
}
|
|
|
|
|
|
- if (size >= 0x1000000)
|
|
|
+ if (size >= SZ_16M)
|
|
|
goto again;
|
|
|
|
|
|
return wanted - size;
|
|
|
|
|
|
out:
|
|
|
- if (pmbp)
|
|
|
- __pmb_unmap(pmbp);
|
|
|
+ pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
|
|
|
|
|
|
return err;
|
|
|
}
|
|
@@ -237,24 +290,52 @@ void pmb_unmap(unsigned long addr)
|
|
|
struct pmb_entry *pmbe = NULL;
|
|
|
int i;
|
|
|
|
|
|
+ read_lock(&pmb_rwlock);
|
|
|
+
|
|
|
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
|
|
- if (test_bit(i, &pmb_map)) {
|
|
|
+ if (test_bit(i, pmb_map)) {
|
|
|
pmbe = &pmb_entry_list[i];
|
|
|
if (pmbe->vpn == addr)
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (unlikely(!pmbe))
|
|
|
- return;
|
|
|
+ read_unlock(&pmb_rwlock);
|
|
|
+
|
|
|
+ pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
|
|
|
+}
|
|
|
+
|
|
|
+static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
|
|
|
+{
|
|
|
+ return (b->vpn == (a->vpn + a->size)) &&
|
|
|
+ (b->ppn == (a->ppn + a->size)) &&
|
|
|
+ (b->flags == a->flags);
|
|
|
+}
|
|
|
+
|
|
|
+static bool pmb_size_valid(unsigned long size)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
|
|
|
+ if (pmb_sizes[i].size == size)
|
|
|
+ return true;
|
|
|
|
|
|
- __pmb_unmap(pmbe);
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
-static void __pmb_unmap(struct pmb_entry *pmbe)
|
|
|
+static int pmb_size_to_flags(unsigned long size)
|
|
|
{
|
|
|
- BUG_ON(!test_bit(pmbe->entry, &pmb_map));
|
|
|
+ int i;
|
|
|
|
|
|
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
|
|
|
+ if (pmb_sizes[i].size == size)
|
|
|
+ return pmb_sizes[i].flag;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
|
|
|
+{
|
|
|
do {
|
|
|
struct pmb_entry *pmblink = pmbe;
|
|
|
|
|
@@ -265,52 +346,91 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
|
|
|
* this entry in pmb_alloc() (even if we haven't filled
|
|
|
* it yet).
|
|
|
*
|
|
|
- * Therefore, calling clear_pmb_entry() is safe as no
|
|
|
+ * Therefore, calling __clear_pmb_entry() is safe as no
|
|
|
* other mapping can be using that slot.
|
|
|
*/
|
|
|
- clear_pmb_entry(pmbe);
|
|
|
+ __clear_pmb_entry(pmbe);
|
|
|
|
|
|
pmbe = pmblink->link;
|
|
|
|
|
|
pmb_free(pmblink);
|
|
|
- } while (pmbe);
|
|
|
+ } while (pmbe && --depth);
|
|
|
+}
|
|
|
+
|
|
|
+static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (unlikely(!pmbe))
|
|
|
+ return;
|
|
|
+
|
|
|
+ write_lock_irqsave(&pmb_rwlock, flags);
|
|
|
+ __pmb_unmap_entry(pmbe, depth);
|
|
|
+ write_unlock_irqrestore(&pmb_rwlock, flags);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_PMB_LEGACY
|
|
|
-static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
|
|
|
+static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
|
|
|
{
|
|
|
- return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE;
|
|
|
+ return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
|
|
|
}
|
|
|
|
|
|
-static int pmb_apply_legacy_mappings(void)
|
|
|
+static void __init pmb_notify(void)
|
|
|
{
|
|
|
- unsigned int applied = 0;
|
|
|
int i;
|
|
|
|
|
|
- pr_info("PMB: Preserving legacy mappings:\n");
|
|
|
+ pr_info("PMB: boot mappings:\n");
|
|
|
+
|
|
|
+ read_lock(&pmb_rwlock);
|
|
|
+
|
|
|
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
|
|
+ struct pmb_entry *pmbe;
|
|
|
+
|
|
|
+ if (!test_bit(i, pmb_map))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ pmbe = &pmb_entry_list[i];
|
|
|
+
|
|
|
+ pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
|
|
|
+ pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
|
|
|
+ pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
|
|
|
+ }
|
|
|
+
|
|
|
+ read_unlock(&pmb_rwlock);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Sync our software copy of the PMB mappings with those in hardware. The
|
|
|
+ * mappings in the hardware PMB were either set up by the bootloader or
|
|
|
+ * very early on by the kernel.
|
|
|
+ */
|
|
|
+static void __init pmb_synchronize(void)
|
|
|
+{
|
|
|
+ struct pmb_entry *pmbp = NULL;
|
|
|
+ int i, j;
|
|
|
|
|
|
/*
|
|
|
- * The following entries are setup by the bootloader.
|
|
|
+ * Run through the initial boot mappings, log the established
|
|
|
+ * ones, and blow away anything that falls outside of the valid
|
|
|
+ * PPN range. Specifically, we only care about existing mappings
|
|
|
+ * that impact the cached/uncached sections.
|
|
|
*
|
|
|
- * Entry VPN PPN V SZ C UB
|
|
|
- * --------------------------------------------------------
|
|
|
- * 0 0xA0000000 0x00000000 1 64MB 0 0
|
|
|
- * 1 0xA4000000 0x04000000 1 16MB 0 0
|
|
|
- * 2 0xA6000000 0x08000000 1 16MB 0 0
|
|
|
- * 9 0x88000000 0x48000000 1 128MB 1 1
|
|
|
- * 10 0x90000000 0x50000000 1 128MB 1 1
|
|
|
- * 11 0x98000000 0x58000000 1 128MB 1 1
|
|
|
- * 13 0xA8000000 0x48000000 1 128MB 0 0
|
|
|
- * 14 0xB0000000 0x50000000 1 128MB 0 0
|
|
|
- * 15 0xB8000000 0x58000000 1 128MB 0 0
|
|
|
+ * Note that touching these can be a bit of a minefield; the boot
|
|
|
+ * loader can establish multi-page mappings with the same caching
|
|
|
+ * attributes, so we need to ensure that we aren't modifying a
|
|
|
+ * mapping that we're presently executing from, or may execute
|
|
|
+ * from in the case of straddling page boundaries.
|
|
|
*
|
|
|
- * The only entries the we need are the ones that map the kernel
|
|
|
- * at the cached and uncached addresses.
|
|
|
+ * In the future we will have to tidy up after the boot loader by
|
|
|
+ * jumping between the cached and uncached mappings and tearing
|
|
|
+ * down alternating mappings while executing from the other.
|
|
|
*/
|
|
|
- for (i = 0; i < PMB_ENTRY_MAX; i++) {
|
|
|
+ for (i = 0; i < NR_PMB_ENTRIES; i++) {
|
|
|
unsigned long addr, data;
|
|
|
unsigned long addr_val, data_val;
|
|
|
- unsigned long ppn, vpn;
|
|
|
+ unsigned long ppn, vpn, flags;
|
|
|
+ unsigned long irqflags;
|
|
|
+ unsigned int size;
|
|
|
+ struct pmb_entry *pmbe;
|
|
|
|
|
|
addr = mk_pmb_addr(i);
|
|
|
data = mk_pmb_data(i);
|
|
@@ -330,110 +450,202 @@ static int pmb_apply_legacy_mappings(void)
|
|
|
/*
|
|
|
* Only preserve in-range mappings.
|
|
|
*/
|
|
|
- if (pmb_ppn_in_range(ppn)) {
|
|
|
- unsigned int size;
|
|
|
- char *sz_str = NULL;
|
|
|
+ if (!pmb_ppn_in_range(ppn)) {
|
|
|
+ /*
|
|
|
+ * Invalidate anything out of bounds.
|
|
|
+ */
|
|
|
+ writel_uncached(addr_val & ~PMB_V, addr);
|
|
|
+ writel_uncached(data_val & ~PMB_V, data);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Update the caching attributes if necessary
|
|
|
+ */
|
|
|
+ if (data_val & PMB_C) {
|
|
|
+ data_val &= ~PMB_CACHE_MASK;
|
|
|
+ data_val |= pmb_cache_flags();
|
|
|
+
|
|
|
+ writel_uncached(data_val, data);
|
|
|
+ }
|
|
|
+
|
|
|
+ size = data_val & PMB_SZ_MASK;
|
|
|
+ flags = size | (data_val & PMB_CACHE_MASK);
|
|
|
|
|
|
- size = data_val & PMB_SZ_MASK;
|
|
|
+ pmbe = pmb_alloc(vpn, ppn, flags, i);
|
|
|
+ if (IS_ERR(pmbe)) {
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_lock_irqsave(&pmbe->lock, irqflags);
|
|
|
|
|
|
- sz_str = (size == PMB_SZ_16M) ? " 16MB":
|
|
|
- (size == PMB_SZ_64M) ? " 64MB":
|
|
|
- (size == PMB_SZ_128M) ? "128MB":
|
|
|
- "512MB";
|
|
|
+ for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
|
|
|
+ if (pmb_sizes[j].flag == size)
|
|
|
+ pmbe->size = pmb_sizes[j].size;
|
|
|
|
|
|
- pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
|
|
|
- vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
|
|
|
- (data_val & PMB_C) ? "" : "un");
|
|
|
+ if (pmbp) {
|
|
|
+ spin_lock(&pmbp->lock);
|
|
|
|
|
|
- applied++;
|
|
|
- } else {
|
|
|
/*
|
|
|
- * Invalidate anything out of bounds.
|
|
|
+ * Compare the previous entry against the current one to
|
|
|
+ * see if the entries span a contiguous mapping. If so,
|
|
|
+ * setup the entry links accordingly. Compound mappings
|
|
|
+ * are later coalesced.
|
|
|
*/
|
|
|
- __raw_writel(addr_val & ~PMB_V, addr);
|
|
|
- __raw_writel(data_val & ~PMB_V, data);
|
|
|
+ if (pmb_can_merge(pmbp, pmbe))
|
|
|
+ pmbp->link = pmbe;
|
|
|
+
|
|
|
+ spin_unlock(&pmbp->lock);
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- return (applied == 0);
|
|
|
+ pmbp = pmbe;
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&pmbe->lock, irqflags);
|
|
|
+ }
|
|
|
}
|
|
|
-#else
|
|
|
-static inline int pmb_apply_legacy_mappings(void)
|
|
|
+
|
|
|
+static void __init pmb_merge(struct pmb_entry *head)
|
|
|
{
|
|
|
- return 1;
|
|
|
+ unsigned long span, newsize;
|
|
|
+ struct pmb_entry *tail;
|
|
|
+ int i = 1, depth = 0;
|
|
|
+
|
|
|
+ span = newsize = head->size;
|
|
|
+
|
|
|
+ tail = head->link;
|
|
|
+ while (tail) {
|
|
|
+ span += tail->size;
|
|
|
+
|
|
|
+ if (pmb_size_valid(span)) {
|
|
|
+ newsize = span;
|
|
|
+ depth = i;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* This is the end of the line.. */
|
|
|
+ if (!tail->link)
|
|
|
+ break;
|
|
|
+
|
|
|
+ tail = tail->link;
|
|
|
+ i++;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The merged page size must be valid.
|
|
|
+ */
|
|
|
+ if (!pmb_size_valid(newsize))
|
|
|
+ return;
|
|
|
+
|
|
|
+ head->flags &= ~PMB_SZ_MASK;
|
|
|
+ head->flags |= pmb_size_to_flags(newsize);
|
|
|
+
|
|
|
+ head->size = newsize;
|
|
|
+
|
|
|
+ __pmb_unmap_entry(head->link, depth);
|
|
|
+ __set_pmb_entry(head);
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
-int pmb_init(void)
|
|
|
+static void __init pmb_coalesce(void)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
int i;
|
|
|
- unsigned long addr, data;
|
|
|
- unsigned long ret;
|
|
|
|
|
|
- jump_to_uncached();
|
|
|
+ write_lock_irqsave(&pmb_rwlock, flags);
|
|
|
|
|
|
- /*
|
|
|
- * Attempt to apply the legacy boot mappings if configured. If
|
|
|
- * this is successful then we simply carry on with those and
|
|
|
- * don't bother establishing additional memory mappings. Dynamic
|
|
|
- * device mappings through pmb_remap() can still be bolted on
|
|
|
- * after this.
|
|
|
- */
|
|
|
- ret = pmb_apply_legacy_mappings();
|
|
|
- if (ret == 0) {
|
|
|
- back_to_cached();
|
|
|
- return 0;
|
|
|
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
|
|
+ struct pmb_entry *pmbe;
|
|
|
+
|
|
|
+ if (!test_bit(i, pmb_map))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ pmbe = &pmb_entry_list[i];
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We're only interested in compound mappings
|
|
|
+ */
|
|
|
+ if (!pmbe->link)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Nothing to do if it already uses the largest possible
|
|
|
+ * page size.
|
|
|
+ */
|
|
|
+ if (pmbe->size == SZ_512M)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ pmb_merge(pmbe);
|
|
|
}
|
|
|
|
|
|
+ write_unlock_irqrestore(&pmb_rwlock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_UNCACHED_MAPPING
|
|
|
+static void __init pmb_resize(void)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
/*
|
|
|
- * Sync our software copy of the PMB mappings with those in
|
|
|
- * hardware. The mappings in the hardware PMB were either set up
|
|
|
- * by the bootloader or very early on by the kernel.
|
|
|
+ * If the uncached mapping was constructed by the kernel, it will
|
|
|
+ * already be a reasonable size.
|
|
|
*/
|
|
|
- for (i = 0; i < PMB_ENTRY_MAX; i++) {
|
|
|
+ if (uncached_size == SZ_16M)
|
|
|
+ return;
|
|
|
+
|
|
|
+ read_lock(&pmb_rwlock);
|
|
|
+
|
|
|
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
|
|
struct pmb_entry *pmbe;
|
|
|
- unsigned long vpn, ppn, flags;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- addr = PMB_DATA + (i << PMB_E_SHIFT);
|
|
|
- data = __raw_readl(addr);
|
|
|
- if (!(data & PMB_V))
|
|
|
+ if (!test_bit(i, pmb_map))
|
|
|
continue;
|
|
|
|
|
|
- if (data & PMB_C) {
|
|
|
-#if defined(CONFIG_CACHE_WRITETHROUGH)
|
|
|
- data |= PMB_WT;
|
|
|
-#elif defined(CONFIG_CACHE_WRITEBACK)
|
|
|
- data &= ~PMB_WT;
|
|
|
-#else
|
|
|
- data &= ~(PMB_C | PMB_WT);
|
|
|
-#endif
|
|
|
- }
|
|
|
- __raw_writel(data, addr);
|
|
|
+ pmbe = &pmb_entry_list[i];
|
|
|
|
|
|
- ppn = data & PMB_PFN_MASK;
|
|
|
+ if (pmbe->vpn != uncached_start)
|
|
|
+ continue;
|
|
|
|
|
|
- flags = data & (PMB_C | PMB_WT | PMB_UB);
|
|
|
- flags |= data & PMB_SZ_MASK;
|
|
|
+ /*
|
|
|
+ * Found it, now resize it.
|
|
|
+ */
|
|
|
+ spin_lock_irqsave(&pmbe->lock, flags);
|
|
|
|
|
|
- addr = PMB_ADDR + (i << PMB_E_SHIFT);
|
|
|
- data = __raw_readl(addr);
|
|
|
+ pmbe->size = SZ_16M;
|
|
|
+ pmbe->flags &= ~PMB_SZ_MASK;
|
|
|
+ pmbe->flags |= pmb_size_to_flags(pmbe->size);
|
|
|
|
|
|
- vpn = data & PMB_PFN_MASK;
|
|
|
+ uncached_resize(pmbe->size);
|
|
|
|
|
|
- pmbe = pmb_alloc(vpn, ppn, flags, i);
|
|
|
- WARN_ON(IS_ERR(pmbe));
|
|
|
+ __set_pmb_entry(pmbe);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&pmbe->lock, flags);
|
|
|
}
|
|
|
|
|
|
- __raw_writel(0, PMB_IRMCR);
|
|
|
+ read_lock(&pmb_rwlock);
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
- /* Flush out the TLB */
|
|
|
- i = __raw_readl(MMUCR);
|
|
|
- i |= MMUCR_TI;
|
|
|
- __raw_writel(i, MMUCR);
|
|
|
+void __init pmb_init(void)
|
|
|
+{
|
|
|
+ /* Synchronize software state */
|
|
|
+ pmb_synchronize();
|
|
|
|
|
|
- back_to_cached();
|
|
|
+ /* Attempt to combine compound mappings */
|
|
|
+ pmb_coalesce();
|
|
|
|
|
|
- return 0;
|
|
|
+#ifdef CONFIG_UNCACHED_MAPPING
|
|
|
+ /* Resize initial mappings, if necessary */
|
|
|
+ pmb_resize();
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* Log them */
|
|
|
+ pmb_notify();
|
|
|
+
|
|
|
+ writel_uncached(0, PMB_IRMCR);
|
|
|
+
|
|
|
+ /* Flush out the TLB */
|
|
|
+ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
|
|
|
+ ctrl_barrier();
|
|
|
}
|
|
|
|
|
|
bool __in_29bit_mode(void)
|
|
@@ -513,14 +725,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
|
|
|
if (state.event == PM_EVENT_ON &&
|
|
|
prev_state.event == PM_EVENT_FREEZE) {
|
|
|
struct pmb_entry *pmbe;
|
|
|
+
|
|
|
+ read_lock(&pmb_rwlock);
|
|
|
+
|
|
|
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
|
|
|
- if (test_bit(i, &pmb_map)) {
|
|
|
+ if (test_bit(i, pmb_map)) {
|
|
|
pmbe = &pmb_entry_list[i];
|
|
|
set_pmb_entry(pmbe);
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ read_unlock(&pmb_rwlock);
|
|
|
}
|
|
|
+
|
|
|
prev_state = state;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|