tlb.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * linux/arch/cris/arch-v10/mm/tlb.c
  3. *
  4. * Low level TLB handling
  5. *
  6. *
  7. * Copyright (C) 2000-2002 Axis Communications AB
  8. *
  9. * Authors: Bjorn Wesen (bjornw@axis.com)
  10. *
  11. */
  12. #include <asm/tlb.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/arch/svinto.h>
  15. #define D(x)
  16. /* The TLB can host up to 64 different mm contexts at the same time.
  17. * The running context is R_MMU_CONTEXT, and each TLB entry contains a
  18. * page_id that has to match to give a hit. In page_id_map, we keep track
  19. * of which mm's we have assigned which page_id's, so that we know when
  20. * to invalidate TLB entries.
  21. *
  22. * The last page_id is never running - it is used as an invalid page_id
  23. * so we can make TLB entries that will never match.
  24. *
  25. * Notice that we need to make the flushes atomic, otherwise an interrupt
  26. * handler that uses vmalloced memory might cause a TLB load in the middle
  27. * of a flush causing.
  28. */
  29. /* invalidate all TLB entries */
  30. void
  31. flush_tlb_all(void)
  32. {
  33. int i;
  34. unsigned long flags;
  35. /* the vpn of i & 0xf is so we dont write similar TLB entries
  36. * in the same 4-way entry group. details..
  37. */
  38. local_save_flags(flags);
  39. local_irq_disable();
  40. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  41. *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
  42. *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  43. IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
  44. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  45. IO_STATE(R_TLB_LO, valid, no ) |
  46. IO_STATE(R_TLB_LO, kernel,no ) |
  47. IO_STATE(R_TLB_LO, we, no ) |
  48. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  49. }
  50. local_irq_restore(flags);
  51. D(printk("tlb: flushed all\n"));
  52. }
  53. /* invalidate the selected mm context only */
  54. void
  55. flush_tlb_mm(struct mm_struct *mm)
  56. {
  57. int i;
  58. int page_id = mm->context.page_id;
  59. unsigned long flags;
  60. D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
  61. if(page_id == NO_CONTEXT)
  62. return;
  63. /* mark the TLB entries that match the page_id as invalid.
  64. * here we could also check the _PAGE_GLOBAL bit and NOT flush
  65. * global pages. is it worth the extra I/O ?
  66. */
  67. local_save_flags(flags);
  68. local_irq_disable();
  69. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  70. *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
  71. if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
  72. *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  73. IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
  74. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  75. IO_STATE(R_TLB_LO, valid, no ) |
  76. IO_STATE(R_TLB_LO, kernel,no ) |
  77. IO_STATE(R_TLB_LO, we, no ) |
  78. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  79. }
  80. }
  81. local_irq_restore(flags);
  82. }
  83. /* invalidate a single page */
  84. void
  85. flush_tlb_page(struct vm_area_struct *vma,
  86. unsigned long addr)
  87. {
  88. struct mm_struct *mm = vma->vm_mm;
  89. int page_id = mm->context.page_id;
  90. int i;
  91. unsigned long flags;
  92. D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
  93. if(page_id == NO_CONTEXT)
  94. return;
  95. addr &= PAGE_MASK; /* perhaps not necessary */
  96. /* invalidate those TLB entries that match both the mm context
  97. * and the virtual address requested
  98. */
  99. local_save_flags(flags);
  100. local_irq_disable();
  101. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  102. unsigned long tlb_hi;
  103. *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
  104. tlb_hi = *R_TLB_HI;
  105. if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
  106. (tlb_hi & PAGE_MASK) == addr) {
  107. *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  108. addr; /* same addr as before works. */
  109. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  110. IO_STATE(R_TLB_LO, valid, no ) |
  111. IO_STATE(R_TLB_LO, kernel,no ) |
  112. IO_STATE(R_TLB_LO, we, no ) |
  113. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  114. }
  115. }
  116. local_irq_restore(flags);
  117. }
  118. /* dump the entire TLB for debug purposes */
  119. #if 0
  120. void
  121. dump_tlb_all(void)
  122. {
  123. int i;
  124. unsigned long flags;
  125. printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n");
  126. local_save_flags(flags);
  127. local_irq_disable();
  128. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  129. *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
  130. printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
  131. i, *R_TLB_HI, *R_TLB_LO);
  132. }
  133. local_irq_restore(flags);
  134. }
  135. #endif
  136. /*
  137. * Initialize the context related info for a new mm_struct
  138. * instance.
  139. */
  140. int
  141. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  142. {
  143. mm->context.page_id = NO_CONTEXT;
  144. return 0;
  145. }
  146. /* called in schedule() just before actually doing the switch_to */
  147. void
  148. switch_mm(struct mm_struct *prev, struct mm_struct *next,
  149. struct task_struct *tsk)
  150. {
  151. /* make sure we have a context */
  152. get_mmu_context(next);
  153. /* remember the pgd for the fault handlers
  154. * this is similar to the pgd register in some other CPU's.
  155. * we need our own copy of it because current and active_mm
  156. * might be invalid at points where we still need to derefer
  157. * the pgd.
  158. */
  159. per_cpu(current_pgd, smp_processor_id()) = next->pgd;
  160. /* switch context in the MMU */
  161. D(printk("switching mmu_context to %d (%p)\n", next->context, next));
  162. *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id);
  163. }