init.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * Set up paging and the MMU.
  3. *
  4. * Copyright (C) 2000-2003, Axis Communications AB.
  5. *
  6. * Authors: Bjorn Wesen <bjornw@axis.com>
  7. * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
  8. */
  9. #include <linux/mmzone.h>
  10. #include <linux/init.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/mm.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/page.h>
  15. #include <asm/types.h>
  16. #include <asm/mmu.h>
  17. #include <asm/io.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/arch/hwregs/asm/mmu_defs_asm.h>
  20. #include <asm/arch/hwregs/supp_reg.h>
  21. extern void tlb_init(void);
  22. /*
  23. * The kernel is already mapped with linear mapping at kseg_c so there's no
  24. * need to map it with a page table. However, head.S also temporarily mapped it
  25. * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
  26. * other paging stuff.
  27. */
  28. void __init
  29. cris_mmu_init(void)
  30. {
  31. unsigned long mmu_config;
  32. unsigned long mmu_kbase_hi;
  33. unsigned long mmu_kbase_lo;
  34. unsigned short mmu_page_id;
  35. /*
  36. * Make sure the current pgd table points to something sane, even if it
  37. * is most probably not used until the next switch_mm.
  38. */
  39. per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
  40. #ifdef CONFIG_SMP
  41. {
  42. pgd_t **pgd;
  43. pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
  44. SUPP_BANK_SEL(1);
  45. SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
  46. SUPP_BANK_SEL(2);
  47. SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
  48. }
  49. #endif
  50. /* Initialise the TLB. Function found in tlb.c. */
  51. tlb_init();
  52. /* Enable exceptions and initialize the kernel segments. */
  53. mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
  54. REG_STATE(mmu, rw_mm_cfg, acc, on) |
  55. REG_STATE(mmu, rw_mm_cfg, ex, on) |
  56. REG_STATE(mmu, rw_mm_cfg, inv, on) |
  57. REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
  58. REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
  59. REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
  60. REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
  61. REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
  62. #ifndef CONFIG_ETRAXFS_SIM
  63. REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
  64. #else
  65. REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
  66. #endif
  67. REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
  68. REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
  69. REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
  70. REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
  71. REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
  72. REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
  73. REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
  74. REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
  75. REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
  76. REG_STATE(mmu, rw_mm_cfg, seg_0, page));
  77. mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
  78. REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
  79. REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
  80. #ifndef CONFIG_ETRAXFS_SIM
  81. REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
  82. #else
  83. REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x0) |
  84. #endif
  85. REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
  86. #ifndef CONFIG_ETRAXFS_SIM
  87. REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
  88. #else
  89. REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
  90. #endif
  91. REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
  92. REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
  93. mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
  94. REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
  95. REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
  96. REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
  97. REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
  98. REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
  99. REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
  100. REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
  101. mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
  102. /* Update the instruction MMU. */
  103. SUPP_BANK_SEL(BANK_IM);
  104. SUPP_REG_WR(RW_MM_CFG, mmu_config);
  105. SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
  106. SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
  107. SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
  108. /* Update the data MMU. */
  109. SUPP_BANK_SEL(BANK_DM);
  110. SUPP_REG_WR(RW_MM_CFG, mmu_config);
  111. SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
  112. SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
  113. SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
  114. SPEC_REG_WR(SPEC_REG_PID, 0);
  115. /*
  116. * The MMU has been enabled ever since head.S but just to make it
  117. * totally obvious enable it here as well.
  118. */
  119. SUPP_BANK_SEL(BANK_GC);
  120. SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
  121. }
  122. void __init
  123. paging_init(void)
  124. {
  125. int i;
  126. unsigned long zones_size[MAX_NR_ZONES];
  127. printk("Setting up paging and the MMU.\n");
  128. /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
  129. for(i = 0; i < PTRS_PER_PGD; i++)
  130. swapper_pg_dir[i] = __pgd(0);
  131. cris_mmu_init();
  132. /*
  133. * Initialize the bad page table and bad page to point to a couple of
  134. * allocated pages.
  135. */
  136. empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
  137. memset((void *) empty_zero_page, 0, PAGE_SIZE);
  138. /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
  139. zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
  140. for (i = 1; i < MAX_NR_ZONES; i++)
  141. zones_size[i] = 0;
  142. /*
  143. * Use free_area_init_node instead of free_area_init, because it is
  144. * designed for systems where the DRAM starts at an address
  145. * substantially higher than 0, like us (we start at PAGE_OFFSET). This
  146. * saves space in the mem_map page array.
  147. */
  148. free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
  149. mem_map = contig_page_data.node_mem_map;
  150. }