|
@@ -34,6 +34,7 @@
|
|
|
#include <asm/mach/pci.h>
|
|
|
|
|
|
#include "mm.h"
|
|
|
+#include "tcm.h"
|
|
|
|
|
|
/*
|
|
|
* empty_zero_page is a special page that is used for
|
|
@@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
}
|
|
|
|
|
|
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
|
|
|
- unsigned long end, phys_addr_t phys,
|
|
|
- const struct mem_type *type)
|
|
|
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
|
|
|
+ unsigned long end, phys_addr_t phys,
|
|
|
+ const struct mem_type *type)
|
|
|
{
|
|
|
- pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
-
|
|
|
+#ifndef CONFIG_ARM_LPAE
|
|
|
/*
|
|
|
- * Try a section mapping - end, addr and phys must all be aligned
|
|
|
- * to a section boundary. Note that PMDs refer to the individual
|
|
|
- * L1 entries, whereas PGDs refer to a group of L1 entries making
|
|
|
- * up one logical pointer to an L2 table.
|
|
|
+ * In classic MMU format, puds and pmds are folded in to
|
|
|
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
|
|
|
+ * group of L1 entries making up one logical pointer to
|
|
|
+ * an L2 table (2MB), where as PMDs refer to the individual
|
|
|
+ * L1 entries (1MB). Hence increment to get the correct
|
|
|
+ * offset for odd 1MB sections.
|
|
|
+ * (See arch/arm/include/asm/pgtable-2level.h)
|
|
|
*/
|
|
|
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
|
|
|
- pmd_t *p = pmd;
|
|
|
-
|
|
|
-#ifndef CONFIG_ARM_LPAE
|
|
|
- if (addr & SECTION_SIZE)
|
|
|
- pmd++;
|
|
|
+ if (addr & SECTION_SIZE)
|
|
|
+ pmd++;
|
|
|
#endif
|
|
|
+ do {
|
|
|
+ *pmd = __pmd(phys | type->prot_sect);
|
|
|
+ phys += SECTION_SIZE;
|
|
|
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
|
|
|
|
|
|
- do {
|
|
|
- *pmd = __pmd(phys | type->prot_sect);
|
|
|
- phys += SECTION_SIZE;
|
|
|
- } while (pmd++, addr += SECTION_SIZE, addr != end);
|
|
|
+ flush_pmd_entry(pmd);
|
|
|
+}
|
|
|
|
|
|
- flush_pmd_entry(p);
|
|
|
- } else {
|
|
|
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
|
|
+ unsigned long end, phys_addr_t phys,
|
|
|
+ const struct mem_type *type)
|
|
|
+{
|
|
|
+ pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
+ unsigned long next;
|
|
|
+
|
|
|
+ do {
|
|
|
/*
|
|
|
- * No need to loop; pte's aren't interested in the
|
|
|
- * individual L1 entries.
|
|
|
+ * With LPAE, we must loop over to map
|
|
|
+ * all the pmds for the given range.
|
|
|
*/
|
|
|
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
|
|
|
- }
|
|
|
+ next = pmd_addr_end(addr, end);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Try a section mapping - addr, next and phys must all be
|
|
|
+ * aligned to a section boundary.
|
|
|
+ */
|
|
|
+ if (type->prot_sect &&
|
|
|
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
|
|
|
+ map_init_section(pmd, addr, next, phys, type);
|
|
|
+ } else {
|
|
|
+ alloc_init_pte(pmd, addr, next,
|
|
|
+ __phys_to_pfn(phys), type);
|
|
|
+ }
|
|
|
+
|
|
|
+ phys += next - addr;
|
|
|
+
|
|
|
+ } while (pmd++, addr = next, addr != end);
|
|
|
}
|
|
|
|
|
|
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
|
@@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
|
|
|
|
|
do {
|
|
|
next = pud_addr_end(addr, end);
|
|
|
- alloc_init_section(pud, addr, next, phys, type);
|
|
|
+ alloc_init_pmd(pud, addr, next, phys, type);
|
|
|
phys += next - addr;
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
}
|
|
@@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
|
|
|
dma_contiguous_remap();
|
|
|
devicemaps_init(mdesc);
|
|
|
kmap_init();
|
|
|
+ tcm_init();
|
|
|
|
|
|
top_pmd = pmd_off_k(0xffff0000);
|
|
|
|