init.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * Set up paging and the MMU.
  3. *
  4. * Copyright (C) 2000-2003, Axis Communications AB.
  5. *
  6. * Authors: Bjorn Wesen <bjornw@axis.com>
  7. * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
  8. */
  9. #include <linux/mmzone.h>
  10. #include <linux/init.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/mm.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/page.h>
  15. #include <asm/types.h>
  16. #include <asm/mmu.h>
  17. #include <asm/io.h>
  18. #include <asm/mmu_context.h>
  19. #include <arch/hwregs/asm/mmu_defs_asm.h>
  20. #include <arch/hwregs/supp_reg.h>
  21. extern void tlb_init(void);
  22. /*
  23. * The kernel is already mapped with linear mapping at kseg_c so there's no
  24. * need to map it with a page table. However, head.S also temporarily mapped it
  25. * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
  26. * other paging stuff.
  27. */
  28. void __init cris_mmu_init(void)
  29. {
  30. unsigned long mmu_config;
  31. unsigned long mmu_kbase_hi;
  32. unsigned long mmu_kbase_lo;
  33. unsigned short mmu_page_id;
  34. /*
  35. * Make sure the current pgd table points to something sane, even if it
  36. * is most probably not used until the next switch_mm.
  37. */
  38. per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
  39. #ifdef CONFIG_SMP
  40. {
  41. pgd_t **pgd;
  42. pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
  43. SUPP_BANK_SEL(1);
  44. SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
  45. SUPP_BANK_SEL(2);
  46. SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
  47. }
  48. #endif
  49. /* Initialise the TLB. Function found in tlb.c. */
  50. tlb_init();
  51. /*
  52. * Enable exceptions and initialize the kernel segments.
  53. * See head.S for differences between ARTPEC-3 and ETRAX FS.
  54. */
  55. mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
  56. REG_STATE(mmu, rw_mm_cfg, acc, on) |
  57. REG_STATE(mmu, rw_mm_cfg, ex, on) |
  58. REG_STATE(mmu, rw_mm_cfg, inv, on) |
  59. #ifdef CONFIG_CRIS_MACH_ARTPEC3
  60. REG_STATE(mmu, rw_mm_cfg, seg_f, page) |
  61. REG_STATE(mmu, rw_mm_cfg, seg_e, page) |
  62. REG_STATE(mmu, rw_mm_cfg, seg_d, linear) |
  63. #else
  64. REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
  65. REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
  66. REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
  67. #endif
  68. REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
  69. REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
  70. REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
  71. REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
  72. REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
  73. REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
  74. REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
  75. REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
  76. REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
  77. REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
  78. REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
  79. REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
  80. REG_STATE(mmu, rw_mm_cfg, seg_0, page));
  81. /* See head.S for differences between ARTPEC-3 and ETRAX FS. */
  82. mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
  83. #ifdef CONFIG_CRIS_MACH_ARTPEC3
  84. REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x0) |
  85. REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x5) |
  86. #else
  87. REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
  88. REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
  89. #endif
  90. REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
  91. REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
  92. REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
  93. REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
  94. REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
  95. mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
  96. REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
  97. REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
  98. REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
  99. REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
  100. REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
  101. REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
  102. REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
  103. mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
  104. /* Update the instruction MMU. */
  105. SUPP_BANK_SEL(BANK_IM);
  106. SUPP_REG_WR(RW_MM_CFG, mmu_config);
  107. SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
  108. SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
  109. SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
  110. /* Update the data MMU. */
  111. SUPP_BANK_SEL(BANK_DM);
  112. SUPP_REG_WR(RW_MM_CFG, mmu_config);
  113. SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
  114. SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
  115. SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
  116. SPEC_REG_WR(SPEC_REG_PID, 0);
  117. /*
  118. * The MMU has been enabled ever since head.S but just to make it
  119. * totally obvious enable it here as well.
  120. */
  121. SUPP_BANK_SEL(BANK_GC);
  122. SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
  123. }
  124. void __init paging_init(void)
  125. {
  126. int i;
  127. unsigned long zones_size[MAX_NR_ZONES];
  128. printk("Setting up paging and the MMU.\n");
  129. /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
  130. for(i = 0; i < PTRS_PER_PGD; i++)
  131. swapper_pg_dir[i] = __pgd(0);
  132. cris_mmu_init();
  133. /*
  134. * Initialize the bad page table and bad page to point to a couple of
  135. * allocated pages.
  136. */
  137. empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
  138. memset((void *) empty_zero_page, 0, PAGE_SIZE);
  139. /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
  140. zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
  141. for (i = 1; i < MAX_NR_ZONES; i++)
  142. zones_size[i] = 0;
  143. /*
  144. * Use free_area_init_node instead of free_area_init, because it is
  145. * designed for systems where the DRAM starts at an address
  146. * substantially higher than 0, like us (we start at PAGE_OFFSET). This
  147. * saves space in the mem_map page array.
  148. */
  149. free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
  150. mem_map = contig_page_data.node_mem_map;
  151. }