motorola.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * linux/arch/m68k/mm/motorola.c
  3. *
  4. * Routines specific to the Motorola MMU, originally from:
  5. * linux/arch/m68k/init.c
  6. * which are Copyright (C) 1995 Hamish Macdonald
  7. *
  8. * Moved 8/20/1999 Sam Creasey
  9. */
  10. #include <linux/module.h>
  11. #include <linux/signal.h>
  12. #include <linux/sched.h>
  13. #include <linux/mm.h>
  14. #include <linux/swap.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/types.h>
  18. #include <linux/init.h>
  19. #include <linux/bootmem.h>
  20. #include <asm/setup.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/page.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/system.h>
  25. #include <asm/machdep.h>
  26. #include <asm/io.h>
  27. #include <asm/dma.h>
  28. #ifdef CONFIG_ATARI
  29. #include <asm/atari_stram.h>
  30. #endif
  31. #undef DEBUG
  32. #ifndef mm_cachebits
  33. /*
  34. * Bits to add to page descriptors for "normal" caching mode.
  35. * For 68020/030 this is 0.
  36. * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
  37. */
  38. unsigned long mm_cachebits;
  39. EXPORT_SYMBOL(mm_cachebits);
  40. #endif
  41. static pte_t * __init kernel_page_table(void)
  42. {
  43. pte_t *ptablep;
  44. ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  45. clear_page(ptablep);
  46. __flush_page_to_ram(ptablep);
  47. flush_tlb_kernel_page(ptablep);
  48. nocache_page(ptablep);
  49. return ptablep;
  50. }
  51. static pmd_t *last_pgtable __initdata = NULL;
  52. pmd_t *zero_pgtable __initdata = NULL;
  53. static pmd_t * __init kernel_ptr_table(void)
  54. {
  55. if (!last_pgtable) {
  56. unsigned long pmd, last;
  57. int i;
  58. /* Find the last ptr table that was used in head.S and
  59. * reuse the remaining space in that page for further
  60. * ptr tables.
  61. */
  62. last = (unsigned long)kernel_pg_dir;
  63. for (i = 0; i < PTRS_PER_PGD; i++) {
  64. if (!pgd_present(kernel_pg_dir[i]))
  65. continue;
  66. pmd = __pgd_page(kernel_pg_dir[i]);
  67. if (pmd > last)
  68. last = pmd;
  69. }
  70. last_pgtable = (pmd_t *)last;
  71. #ifdef DEBUG
  72. printk("kernel_ptr_init: %p\n", last_pgtable);
  73. #endif
  74. }
  75. last_pgtable += PTRS_PER_PMD;
  76. if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
  77. last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  78. clear_page(last_pgtable);
  79. __flush_page_to_ram(last_pgtable);
  80. flush_tlb_kernel_page(last_pgtable);
  81. nocache_page(last_pgtable);
  82. }
  83. return last_pgtable;
  84. }
  85. static unsigned long __init
  86. map_chunk (unsigned long addr, long size)
  87. {
  88. #define PTRTREESIZE (256*1024)
  89. #define ROOTTREESIZE (32*1024*1024)
  90. static unsigned long virtaddr = PAGE_OFFSET;
  91. unsigned long physaddr;
  92. pgd_t *pgd_dir;
  93. pmd_t *pmd_dir;
  94. pte_t *pte_dir;
  95. physaddr = (addr | m68k_supervisor_cachemode |
  96. _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
  97. if (CPU_IS_040_OR_060)
  98. physaddr |= _PAGE_GLOBAL040;
  99. while (size > 0) {
  100. #ifdef DEBUG
  101. if (!(virtaddr & (PTRTREESIZE-1)))
  102. printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
  103. virtaddr);
  104. #endif
  105. pgd_dir = pgd_offset_k(virtaddr);
  106. if (virtaddr && CPU_IS_020_OR_030) {
  107. if (!(virtaddr & (ROOTTREESIZE-1)) &&
  108. size >= ROOTTREESIZE) {
  109. #ifdef DEBUG
  110. printk ("[very early term]");
  111. #endif
  112. pgd_val(*pgd_dir) = physaddr;
  113. size -= ROOTTREESIZE;
  114. virtaddr += ROOTTREESIZE;
  115. physaddr += ROOTTREESIZE;
  116. continue;
  117. }
  118. }
  119. if (!pgd_present(*pgd_dir)) {
  120. pmd_dir = kernel_ptr_table();
  121. #ifdef DEBUG
  122. printk ("[new pointer %p]", pmd_dir);
  123. #endif
  124. pgd_set(pgd_dir, pmd_dir);
  125. } else
  126. pmd_dir = pmd_offset(pgd_dir, virtaddr);
  127. if (CPU_IS_020_OR_030) {
  128. if (virtaddr) {
  129. #ifdef DEBUG
  130. printk ("[early term]");
  131. #endif
  132. pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
  133. physaddr += PTRTREESIZE;
  134. } else {
  135. int i;
  136. #ifdef DEBUG
  137. printk ("[zero map]");
  138. #endif
  139. zero_pgtable = kernel_ptr_table();
  140. pte_dir = (pte_t *)zero_pgtable;
  141. pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
  142. _PAGE_TABLE | _PAGE_ACCESSED;
  143. pte_val(*pte_dir++) = 0;
  144. physaddr += PAGE_SIZE;
  145. for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
  146. pte_val(*pte_dir++) = physaddr;
  147. }
  148. size -= PTRTREESIZE;
  149. virtaddr += PTRTREESIZE;
  150. } else {
  151. if (!pmd_present(*pmd_dir)) {
  152. #ifdef DEBUG
  153. printk ("[new table]");
  154. #endif
  155. pte_dir = kernel_page_table();
  156. pmd_set(pmd_dir, pte_dir);
  157. }
  158. pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
  159. if (virtaddr) {
  160. if (!pte_present(*pte_dir))
  161. pte_val(*pte_dir) = physaddr;
  162. } else
  163. pte_val(*pte_dir) = 0;
  164. size -= PAGE_SIZE;
  165. virtaddr += PAGE_SIZE;
  166. physaddr += PAGE_SIZE;
  167. }
  168. }
  169. #ifdef DEBUG
  170. printk("\n");
  171. #endif
  172. return virtaddr;
  173. }
  174. /*
  175. * paging_init() continues the virtual memory environment setup which
  176. * was begun by the code in arch/head.S.
  177. */
  178. void __init paging_init(void)
  179. {
  180. int chunk;
  181. unsigned long mem_avail = 0;
  182. unsigned long zones_size[MAX_NR_ZONES] = { 0, };
  183. #ifdef DEBUG
  184. {
  185. extern unsigned long availmem;
  186. printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
  187. kernel_pg_dir, availmem, start_mem, end_mem);
  188. }
  189. #endif
  190. /* Fix the cache mode in the page descriptors for the 680[46]0. */
  191. if (CPU_IS_040_OR_060) {
  192. int i;
  193. #ifndef mm_cachebits
  194. mm_cachebits = _PAGE_CACHE040;
  195. #endif
  196. for (i = 0; i < 16; i++)
  197. pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
  198. }
  199. /*
  200. * Map the physical memory available into the kernel virtual
  201. * address space. It may allocate some memory for page
  202. * tables and thus modify availmem.
  203. */
  204. for (chunk = 0; chunk < m68k_num_memory; chunk++) {
  205. mem_avail = map_chunk (m68k_memory[chunk].addr,
  206. m68k_memory[chunk].size);
  207. }
  208. flush_tlb_all();
  209. #ifdef DEBUG
  210. printk ("memory available is %ldKB\n", mem_avail >> 10);
  211. printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
  212. start_mem, end_mem);
  213. #endif
  214. /*
  215. * initialize the bad page table and bad page to point
  216. * to a couple of allocated pages
  217. */
  218. empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
  219. memset(empty_zero_page, 0, PAGE_SIZE);
  220. /*
  221. * Set up SFC/DFC registers
  222. */
  223. set_fs(KERNEL_DS);
  224. #ifdef DEBUG
  225. printk ("before free_area_init\n");
  226. #endif
  227. zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ?
  228. (mach_max_dma_address+1) : (unsigned long)high_memory);
  229. zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0];
  230. zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT;
  231. zones_size[ZONE_NORMAL] >>= PAGE_SHIFT;
  232. free_area_init(zones_size);
  233. }
  234. extern char __init_begin, __init_end;
  235. void free_initmem(void)
  236. {
  237. unsigned long addr;
  238. addr = (unsigned long)&__init_begin;
  239. for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
  240. virt_to_page(addr)->flags &= ~(1 << PG_reserved);
  241. init_page_count(virt_to_page(addr));
  242. free_page(addr);
  243. totalram_pages++;
  244. }
  245. }