|
@@ -38,26 +38,6 @@ static void __pmb_unmap(struct pmb_entry *);
|
|
|
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
|
|
|
static unsigned long pmb_map;
|
|
|
|
|
|
-static struct pmb_entry pmb_init_map[] = {
|
|
|
- /* vpn ppn flags (ub/sz/c/wt) */
|
|
|
-
|
|
|
- /* P1 Section Mappings */
|
|
|
- { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
|
|
|
- { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
|
|
|
- { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
|
|
|
- { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
|
|
|
- { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
|
|
|
- { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
|
|
|
-
|
|
|
- /* P2 Section Mappings */
|
|
|
- { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
|
|
|
- { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
|
|
|
- { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
|
|
|
- { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
|
|
|
- { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
|
|
|
- { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
|
|
|
-};
|
|
|
-
|
|
|
static inline unsigned long mk_pmb_entry(unsigned int entry)
|
|
|
{
|
|
|
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
|
|
@@ -156,13 +136,7 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
|
|
|
unsigned int entry = pmbe->entry;
|
|
|
unsigned long addr;
|
|
|
|
|
|
- /*
|
|
|
- * Don't allow clearing of wired init entries, P1 or P2 access
|
|
|
- * without a corresponding mapping in the PMB will lead to reset
|
|
|
- * by the TLB.
|
|
|
- */
|
|
|
- if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
|
|
|
- entry >= NR_PMB_ENTRIES))
|
|
|
+ if (unlikely(entry >= NR_PMB_ENTRIES))
|
|
|
return;
|
|
|
|
|
|
jump_to_uncached();
|
|
@@ -300,28 +274,30 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
|
|
|
|
|
|
int __uses_jump_to_uncached pmb_init(void)
|
|
|
{
|
|
|
- unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
|
|
|
- unsigned int entry, i;
|
|
|
-
|
|
|
- BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
|
|
|
+ unsigned int i;
|
|
|
+ long size;
|
|
|
|
|
|
jump_to_uncached();
|
|
|
|
|
|
/*
|
|
|
- * Ordering is important, P2 must be mapped in the PMB before we
|
|
|
- * can set PMB.SE, and P1 must be mapped before we jump back to
|
|
|
- * P1 space.
|
|
|
+ * Insert PMB entries for the P1 and P2 areas so that, after
|
|
|
+ * we've switched the MMU to 32-bit mode, the semantics of P1
|
|
|
+ * and P2 are the same as in 29-bit mode, e.g.
|
|
|
+ *
|
|
|
+ * P1 - provides a cached window onto physical memory
|
|
|
+ * P2 - provides an uncached window onto physical memory
|
|
|
*/
|
|
|
- for (entry = 0; entry < nr_entries; entry++) {
|
|
|
- struct pmb_entry *pmbe = pmb_init_map + entry;
|
|
|
+ size = pmb_remap(P2SEG, __MEMORY_START, __MEMORY_SIZE,
|
|
|
+ PMB_WT | PMB_UB);
|
|
|
+ BUG_ON(size != __MEMORY_SIZE);
|
|
|
|
|
|
- __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry);
|
|
|
- }
|
|
|
+ size = pmb_remap(P1SEG, __MEMORY_START, __MEMORY_SIZE, PMB_C);
|
|
|
+ BUG_ON(size != __MEMORY_SIZE);
|
|
|
|
|
|
ctrl_outl(0, PMB_IRMCR);
|
|
|
|
|
|
/* PMB.SE and UB[7] */
|
|
|
- ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
|
|
|
+ ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
|
|
|
|
|
|
/* Flush out the TLB */
|
|
|
i = ctrl_inl(MMUCR);
|