sun3mmu.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. /*
  2. * linux/arch/m68k/mm/sun3mmu.c
  3. *
  4. * Implementations of mm routines specific to the sun3 MMU.
  5. *
  6. * Moved here 8/20/1999 Sam Creasey
  7. *
  8. */
  9. #include <linux/signal.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/swap.h>
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <linux/types.h>
  16. #include <linux/init.h>
  17. #include <linux/bootmem.h>
  18. #include <asm/setup.h>
  19. #include <asm/uaccess.h>
  20. #include <asm/page.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/system.h>
  23. #include <asm/machdep.h>
  24. #include <asm/io.h>
  25. extern void mmu_emu_init (unsigned long bootmem_end);
  26. const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
  27. extern unsigned long num_pages;
  28. void free_initmem(void)
  29. {
  30. }
  31. /* For the sun3 we try to follow the i386 paging_init() more closely */
  32. /* start_mem and end_mem have PAGE_OFFSET added already */
  33. /* now sets up tables using sun3 PTEs rather than i386 as before. --m */
  34. void __init paging_init(void)
  35. {
  36. pgd_t * pg_dir;
  37. pte_t * pg_table;
  38. int i;
  39. unsigned long address;
  40. unsigned long next_pgtable;
  41. unsigned long bootmem_end;
  42. unsigned long zones_size[MAX_NR_ZONES] = { 0, };
  43. unsigned long size;
  44. #ifdef TEST_VERIFY_AREA
  45. wp_works_ok = 0;
  46. #endif
  47. empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
  48. address = PAGE_OFFSET;
  49. pg_dir = swapper_pg_dir;
  50. memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
  51. memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir));
  52. size = num_pages * sizeof(pte_t);
  53. size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
  54. next_pgtable = (unsigned long)alloc_bootmem_pages(size);
  55. bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
  56. /* Map whole memory from PAGE_OFFSET (0x0E000000) */
  57. pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
  58. while (address < (unsigned long)high_memory) {
  59. pg_table = (pte_t *) __pa (next_pgtable);
  60. next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
  61. pgd_val(*pg_dir) = (unsigned long) pg_table;
  62. pg_dir++;
  63. /* now change pg_table to kernel virtual addresses */
  64. pg_table = (pte_t *) __va ((unsigned long) pg_table);
  65. for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
  66. pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
  67. if (address >= (unsigned long)high_memory)
  68. pte_val (pte) = 0;
  69. set_pte (pg_table, pte);
  70. address += PAGE_SIZE;
  71. }
  72. }
  73. mmu_emu_init(bootmem_end);
  74. current->mm = NULL;
  75. /* memory sizing is a hack stolen from motorola.c.. hope it works for us */
  76. zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
  77. /* I really wish I knew why the following change made things better... -- Sam */
  78. /* free_area_init(zones_size); */
  79. free_area_init_node(0, zones_size,
  80. (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL);
  81. }