motorola.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * linux/arch/m68k/motorola.c
  3. *
  4. * Routines specific to the Motorola MMU, originally from:
  5. * linux/arch/m68k/init.c
  6. * which are Copyright (C) 1995 Hamish Macdonald
  7. *
  8. * Moved 8/20/1999 Sam Creasey
  9. */
  10. #include <linux/config.h>
  11. #include <linux/module.h>
  12. #include <linux/signal.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/swap.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/types.h>
  19. #include <linux/init.h>
  20. #include <linux/bootmem.h>
  21. #include <asm/setup.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/page.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/system.h>
  26. #include <asm/machdep.h>
  27. #include <asm/io.h>
  28. #include <asm/dma.h>
  29. #ifdef CONFIG_ATARI
  30. #include <asm/atari_stram.h>
  31. #endif
  32. #undef DEBUG
  33. #ifndef mm_cachebits
  34. /*
  35. * Bits to add to page descriptors for "normal" caching mode.
  36. * For 68020/030 this is 0.
  37. * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
  38. */
  39. unsigned long mm_cachebits;
  40. EXPORT_SYMBOL(mm_cachebits);
  41. #endif
  42. static pte_t * __init kernel_page_table(void)
  43. {
  44. pte_t *ptablep;
  45. ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  46. clear_page(ptablep);
  47. __flush_page_to_ram(ptablep);
  48. flush_tlb_kernel_page(ptablep);
  49. nocache_page(ptablep);
  50. return ptablep;
  51. }
  52. static pmd_t *last_pgtable __initdata = NULL;
  53. pmd_t *zero_pgtable __initdata = NULL;
  54. static pmd_t * __init kernel_ptr_table(void)
  55. {
  56. if (!last_pgtable) {
  57. unsigned long pmd, last;
  58. int i;
  59. /* Find the last ptr table that was used in head.S and
  60. * reuse the remaining space in that page for further
  61. * ptr tables.
  62. */
  63. last = (unsigned long)kernel_pg_dir;
  64. for (i = 0; i < PTRS_PER_PGD; i++) {
  65. if (!pgd_present(kernel_pg_dir[i]))
  66. continue;
  67. pmd = __pgd_page(kernel_pg_dir[i]);
  68. if (pmd > last)
  69. last = pmd;
  70. }
  71. last_pgtable = (pmd_t *)last;
  72. #ifdef DEBUG
  73. printk("kernel_ptr_init: %p\n", last_pgtable);
  74. #endif
  75. }
  76. last_pgtable += PTRS_PER_PMD;
  77. if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
  78. last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  79. clear_page(last_pgtable);
  80. __flush_page_to_ram(last_pgtable);
  81. flush_tlb_kernel_page(last_pgtable);
  82. nocache_page(last_pgtable);
  83. }
  84. return last_pgtable;
  85. }
  86. static unsigned long __init
  87. map_chunk (unsigned long addr, long size)
  88. {
  89. #define PTRTREESIZE (256*1024)
  90. #define ROOTTREESIZE (32*1024*1024)
  91. static unsigned long virtaddr = PAGE_OFFSET;
  92. unsigned long physaddr;
  93. pgd_t *pgd_dir;
  94. pmd_t *pmd_dir;
  95. pte_t *pte_dir;
  96. physaddr = (addr | m68k_supervisor_cachemode |
  97. _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
  98. if (CPU_IS_040_OR_060)
  99. physaddr |= _PAGE_GLOBAL040;
  100. while (size > 0) {
  101. #ifdef DEBUG
  102. if (!(virtaddr & (PTRTREESIZE-1)))
  103. printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
  104. virtaddr);
  105. #endif
  106. pgd_dir = pgd_offset_k(virtaddr);
  107. if (virtaddr && CPU_IS_020_OR_030) {
  108. if (!(virtaddr & (ROOTTREESIZE-1)) &&
  109. size >= ROOTTREESIZE) {
  110. #ifdef DEBUG
  111. printk ("[very early term]");
  112. #endif
  113. pgd_val(*pgd_dir) = physaddr;
  114. size -= ROOTTREESIZE;
  115. virtaddr += ROOTTREESIZE;
  116. physaddr += ROOTTREESIZE;
  117. continue;
  118. }
  119. }
  120. if (!pgd_present(*pgd_dir)) {
  121. pmd_dir = kernel_ptr_table();
  122. #ifdef DEBUG
  123. printk ("[new pointer %p]", pmd_dir);
  124. #endif
  125. pgd_set(pgd_dir, pmd_dir);
  126. } else
  127. pmd_dir = pmd_offset(pgd_dir, virtaddr);
  128. if (CPU_IS_020_OR_030) {
  129. if (virtaddr) {
  130. #ifdef DEBUG
  131. printk ("[early term]");
  132. #endif
  133. pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
  134. physaddr += PTRTREESIZE;
  135. } else {
  136. int i;
  137. #ifdef DEBUG
  138. printk ("[zero map]");
  139. #endif
  140. zero_pgtable = kernel_ptr_table();
  141. pte_dir = (pte_t *)zero_pgtable;
  142. pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
  143. _PAGE_TABLE | _PAGE_ACCESSED;
  144. pte_val(*pte_dir++) = 0;
  145. physaddr += PAGE_SIZE;
  146. for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
  147. pte_val(*pte_dir++) = physaddr;
  148. }
  149. size -= PTRTREESIZE;
  150. virtaddr += PTRTREESIZE;
  151. } else {
  152. if (!pmd_present(*pmd_dir)) {
  153. #ifdef DEBUG
  154. printk ("[new table]");
  155. #endif
  156. pte_dir = kernel_page_table();
  157. pmd_set(pmd_dir, pte_dir);
  158. }
  159. pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
  160. if (virtaddr) {
  161. if (!pte_present(*pte_dir))
  162. pte_val(*pte_dir) = physaddr;
  163. } else
  164. pte_val(*pte_dir) = 0;
  165. size -= PAGE_SIZE;
  166. virtaddr += PAGE_SIZE;
  167. physaddr += PAGE_SIZE;
  168. }
  169. }
  170. #ifdef DEBUG
  171. printk("\n");
  172. #endif
  173. return virtaddr;
  174. }
  175. /*
  176. * paging_init() continues the virtual memory environment setup which
  177. * was begun by the code in arch/head.S.
  178. */
  179. void __init paging_init(void)
  180. {
  181. int chunk;
  182. unsigned long mem_avail = 0;
  183. unsigned long zones_size[3] = { 0, };
  184. #ifdef DEBUG
  185. {
  186. extern unsigned long availmem;
  187. printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
  188. kernel_pg_dir, availmem, start_mem, end_mem);
  189. }
  190. #endif
  191. /* Fix the cache mode in the page descriptors for the 680[46]0. */
  192. if (CPU_IS_040_OR_060) {
  193. int i;
  194. #ifndef mm_cachebits
  195. mm_cachebits = _PAGE_CACHE040;
  196. #endif
  197. for (i = 0; i < 16; i++)
  198. pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
  199. }
  200. /*
  201. * Map the physical memory available into the kernel virtual
  202. * address space. It may allocate some memory for page
  203. * tables and thus modify availmem.
  204. */
  205. for (chunk = 0; chunk < m68k_num_memory; chunk++) {
  206. mem_avail = map_chunk (m68k_memory[chunk].addr,
  207. m68k_memory[chunk].size);
  208. }
  209. flush_tlb_all();
  210. #ifdef DEBUG
  211. printk ("memory available is %ldKB\n", mem_avail >> 10);
  212. printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
  213. start_mem, end_mem);
  214. #endif
  215. /*
  216. * initialize the bad page table and bad page to point
  217. * to a couple of allocated pages
  218. */
  219. empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
  220. memset(empty_zero_page, 0, PAGE_SIZE);
  221. /*
  222. * Set up SFC/DFC registers
  223. */
  224. set_fs(KERNEL_DS);
  225. #ifdef DEBUG
  226. printk ("before free_area_init\n");
  227. #endif
  228. zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ?
  229. (mach_max_dma_address+1) : (unsigned long)high_memory);
  230. zones_size[1] = (unsigned long)high_memory - zones_size[0];
  231. zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT;
  232. zones_size[1] >>= PAGE_SHIFT;
  233. free_area_init(zones_size);
  234. }
  235. extern char __init_begin, __init_end;
  236. void free_initmem(void)
  237. {
  238. unsigned long addr;
  239. addr = (unsigned long)&__init_begin;
  240. for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
  241. virt_to_page(addr)->flags &= ~(1 << PG_reserved);
  242. set_page_count(virt_to_page(addr), 1);
  243. free_page(addr);
  244. totalram_pages++;
  245. }
  246. }