mcfmmu.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * Based upon linux/arch/m68k/mm/sun3mmu.c
  3. * Based upon linux/arch/ppc/mm/mmu_context.c
  4. *
  5. * Implementations of mm routines specific to the Coldfire MMU.
  6. *
  7. * Copyright (c) 2008 Freescale Semiconductor, Inc.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/mm.h>
  12. #include <linux/init.h>
  13. #include <linux/string.h>
  14. #include <linux/bootmem.h>
  15. #include <asm/setup.h>
  16. #include <asm/page.h>
  17. #include <asm/pgtable.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/mcf_pgalloc.h>
  20. #include <asm/tlbflush.h>
  21. #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
  22. mm_context_t next_mmu_context;
  23. unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
  24. atomic_t nr_free_contexts;
  25. struct mm_struct *context_mm[LAST_CONTEXT+1];
  26. extern unsigned long num_pages;
  27. void free_initmem(void)
  28. {
  29. }
  30. /*
  31. * ColdFire paging_init derived from sun3.
  32. */
  33. void __init paging_init(void)
  34. {
  35. pgd_t *pg_dir;
  36. pte_t *pg_table;
  37. unsigned long address, size;
  38. unsigned long next_pgtable, bootmem_end;
  39. unsigned long zones_size[MAX_NR_ZONES];
  40. enum zone_type zone;
  41. int i;
  42. empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
  43. memset((void *) empty_zero_page, 0, PAGE_SIZE);
  44. pg_dir = swapper_pg_dir;
  45. memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
  46. size = num_pages * sizeof(pte_t);
  47. size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
  48. next_pgtable = (unsigned long) alloc_bootmem_pages(size);
  49. bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
  50. pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
  51. address = PAGE_OFFSET;
  52. while (address < (unsigned long)high_memory) {
  53. pg_table = (pte_t *) next_pgtable;
  54. next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
  55. pgd_val(*pg_dir) = (unsigned long) pg_table;
  56. pg_dir++;
  57. /* now change pg_table to kernel virtual addresses */
  58. for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
  59. pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
  60. if (address >= (unsigned long) high_memory)
  61. pte_val(pte) = 0;
  62. set_pte(pg_table, pte);
  63. address += PAGE_SIZE;
  64. }
  65. }
  66. current->mm = NULL;
  67. for (zone = 0; zone < MAX_NR_ZONES; zone++)
  68. zones_size[zone] = 0x0;
  69. zones_size[ZONE_DMA] = num_pages;
  70. free_area_init(zones_size);
  71. }
  72. int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
  73. {
  74. unsigned long flags, mmuar;
  75. struct mm_struct *mm;
  76. pgd_t *pgd;
  77. pmd_t *pmd;
  78. pte_t *pte;
  79. int asid;
  80. local_irq_save(flags);
  81. mmuar = (dtlb) ? mmu_read(MMUAR) :
  82. regs->pc + (extension_word * sizeof(long));
  83. mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
  84. if (!mm) {
  85. local_irq_restore(flags);
  86. return -1;
  87. }
  88. pgd = pgd_offset(mm, mmuar);
  89. if (pgd_none(*pgd)) {
  90. local_irq_restore(flags);
  91. return -1;
  92. }
  93. pmd = pmd_offset(pgd, mmuar);
  94. if (pmd_none(*pmd)) {
  95. local_irq_restore(flags);
  96. return -1;
  97. }
  98. pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
  99. : pte_offset_map(pmd, mmuar);
  100. if (pte_none(*pte) || !pte_present(*pte)) {
  101. local_irq_restore(flags);
  102. return -1;
  103. }
  104. if (write) {
  105. if (!pte_write(*pte)) {
  106. local_irq_restore(flags);
  107. return -1;
  108. }
  109. set_pte(pte, pte_mkdirty(*pte));
  110. }
  111. set_pte(pte, pte_mkyoung(*pte));
  112. asid = mm->context & 0xff;
  113. if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
  114. set_pte(pte, pte_wrprotect(*pte));
  115. mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
  116. (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
  117. >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
  118. mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
  119. ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
  120. if (dtlb)
  121. mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
  122. else
  123. mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
  124. local_irq_restore(flags);
  125. return 0;
  126. }
  127. /*
  128. * Initialize the context management stuff.
  129. * The following was taken from arch/ppc/mmu_context.c
  130. */
  131. void __init mmu_context_init(void)
  132. {
  133. /*
  134. * Some processors have too few contexts to reserve one for
  135. * init_mm, and require using context 0 for a normal task.
  136. * Other processors reserve the use of context zero for the kernel.
  137. * This code assumes FIRST_CONTEXT < 32.
  138. */
  139. context_map[0] = (1 << FIRST_CONTEXT) - 1;
  140. next_mmu_context = FIRST_CONTEXT;
  141. atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
  142. }
  143. /*
  144. * Steal a context from a task that has one at the moment.
  145. * This is only used on 8xx and 4xx and we presently assume that
  146. * they don't do SMP. If they do then thicfpgalloc.hs will have to check
  147. * whether the MM we steal is in use.
  148. * We also assume that this is only used on systems that don't
  149. * use an MMU hash table - this is true for 8xx and 4xx.
  150. * This isn't an LRU system, it just frees up each context in
  151. * turn (sort-of pseudo-random replacement :). This would be the
  152. * place to implement an LRU scheme if anyone was motivated to do it.
  153. * -- paulus
  154. */
  155. void steal_context(void)
  156. {
  157. struct mm_struct *mm;
  158. /*
  159. * free up context `next_mmu_context'
  160. * if we shouldn't free context 0, don't...
  161. */
  162. if (next_mmu_context < FIRST_CONTEXT)
  163. next_mmu_context = FIRST_CONTEXT;
  164. mm = context_mm[next_mmu_context];
  165. flush_tlb_mm(mm);
  166. destroy_context(mm);
  167. }