40x_mmu.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. * This file contains the routines for initializing the MMU
  3. * on the 4xx series of chips.
  4. * -- paulus
  5. *
  6. * Derived from arch/ppc/mm/init.c:
  7. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8. *
  9. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  10. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  11. * Copyright (C) 1996 Paul Mackerras
  12. *
  13. * Derived from "arch/i386/mm/init.c"
  14. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License
  18. * as published by the Free Software Foundation; either version
  19. * 2 of the License, or (at your option) any later version.
  20. *
  21. */
  22. #include <linux/signal.h>
  23. #include <linux/sched.h>
  24. #include <linux/kernel.h>
  25. #include <linux/errno.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/ptrace.h>
  29. #include <linux/mman.h>
  30. #include <linux/mm.h>
  31. #include <linux/swap.h>
  32. #include <linux/stddef.h>
  33. #include <linux/vmalloc.h>
  34. #include <linux/init.h>
  35. #include <linux/delay.h>
  36. #include <linux/highmem.h>
  37. #include <asm/pgalloc.h>
  38. #include <asm/prom.h>
  39. #include <asm/io.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/mmu.h>
  43. #include <asm/uaccess.h>
  44. #include <asm/smp.h>
  45. #include <asm/bootx.h>
  46. #include <asm/machdep.h>
  47. #include <asm/setup.h>
  48. #include "mmu_decl.h"
  49. extern int __map_without_ltlbs;
  50. /*
  51. * MMU_init_hw does the chip-specific initialization of the MMU hardware.
  52. */
  53. void __init MMU_init_hw(void)
  54. {
  55. /*
  56. * The Zone Protection Register (ZPR) defines how protection will
  57. * be applied to every page which is a member of a given zone. At
  58. * present, we utilize only two of the 4xx's zones.
  59. * The zone index bits (of ZSEL) in the PTE are used for software
  60. * indicators, except the LSB. For user access, zone 1 is used,
  61. * for kernel access, zone 0 is used. We set all but zone 1
  62. * to zero, allowing only kernel access as indicated in the PTE.
  63. * For zone 1, we set a 01 binary (a value of 10 will not work)
  64. * to allow user access as indicated in the PTE. This also allows
  65. * kernel access as indicated in the PTE.
  66. */
  67. mtspr(SPRN_ZPR, 0x10000000);
  68. flush_instruction_cache();
  69. /*
  70. * Set up the real-mode cache parameters for the exception vector
  71. * handlers (which are run in real-mode).
  72. */
  73. mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */
  74. /*
  75. * Cache instruction and data space where the exception
  76. * vectors and the kernel live in real-mode.
  77. */
  78. mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */
  79. mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */
  80. }
  81. #define LARGE_PAGE_SIZE_16M (1<<24)
  82. #define LARGE_PAGE_SIZE_4M (1<<22)
  83. unsigned long __init mmu_mapin_ram(void)
  84. {
  85. unsigned long v, s, mapped;
  86. phys_addr_t p;
  87. v = KERNELBASE;
  88. p = 0;
  89. s = total_lowmem;
  90. if (__map_without_ltlbs)
  91. return 0;
  92. while (s >= LARGE_PAGE_SIZE_16M) {
  93. pmd_t *pmdp;
  94. unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
  95. pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
  96. pmd_val(*pmdp++) = val;
  97. pmd_val(*pmdp++) = val;
  98. pmd_val(*pmdp++) = val;
  99. pmd_val(*pmdp++) = val;
  100. v += LARGE_PAGE_SIZE_16M;
  101. p += LARGE_PAGE_SIZE_16M;
  102. s -= LARGE_PAGE_SIZE_16M;
  103. }
  104. while (s >= LARGE_PAGE_SIZE_4M) {
  105. pmd_t *pmdp;
  106. unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
  107. pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
  108. pmd_val(*pmdp) = val;
  109. v += LARGE_PAGE_SIZE_4M;
  110. p += LARGE_PAGE_SIZE_4M;
  111. s -= LARGE_PAGE_SIZE_4M;
  112. }
  113. mapped = total_lowmem - s;
  114. /* If the size of RAM is not an exact power of two, we may not
  115. * have covered RAM in its entirety with 16 and 4 MiB
  116. * pages. Consequently, restrict the top end of RAM currently
  117. * allocable so that calls to the LMB to allocate PTEs for "tail"
  118. * coverage with normal-sized pages (or other reasons) do not
  119. * attempt to allocate outside the allowed range.
  120. */
  121. __initial_memory_limit_addr = memstart_addr + mapped;
  122. return mapped;
  123. }