tlb.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * linux/arch/cris/arch-v10/mm/tlb.c
  3. *
  4. * Low level TLB handling
  5. *
  6. *
  7. * Copyright (C) 2000-2002 Axis Communications AB
  8. *
  9. * Authors: Bjorn Wesen (bjornw@axis.com)
  10. *
  11. */
  12. #include <asm/tlb.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/arch/svinto.h>
  15. #define D(x)
  16. /* The TLB can host up to 64 different mm contexts at the same time.
  17. * The running context is R_MMU_CONTEXT, and each TLB entry contains a
  18. * page_id that has to match to give a hit. In page_id_map, we keep track
  19. * of which mm's we have assigned which page_id's, so that we know when
  20. * to invalidate TLB entries.
  21. *
  22. * The last page_id is never running - it is used as an invalid page_id
  23. * so we can make TLB entries that will never match.
  24. *
  25. * Notice that we need to make the flushes atomic, otherwise an interrupt
  26. * handler that uses vmalloced memory might cause a TLB load in the middle
  27. * of a flush causing.
  28. */
  29. /* invalidate all TLB entries */
  30. void
  31. flush_tlb_all(void)
  32. {
  33. int i;
  34. unsigned long flags;
  35. /* the vpn of i & 0xf is so we dont write similar TLB entries
  36. * in the same 4-way entry group. details..
  37. */
  38. local_irq_save(flags);
  39. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  40. *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
  41. *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  42. IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
  43. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  44. IO_STATE(R_TLB_LO, valid, no ) |
  45. IO_STATE(R_TLB_LO, kernel,no ) |
  46. IO_STATE(R_TLB_LO, we, no ) |
  47. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  48. }
  49. local_irq_restore(flags);
  50. D(printk("tlb: flushed all\n"));
  51. }
  52. /* invalidate the selected mm context only */
  53. void
  54. flush_tlb_mm(struct mm_struct *mm)
  55. {
  56. int i;
  57. int page_id = mm->context.page_id;
  58. unsigned long flags;
  59. D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
  60. if(page_id == NO_CONTEXT)
  61. return;
  62. /* mark the TLB entries that match the page_id as invalid.
  63. * here we could also check the _PAGE_GLOBAL bit and NOT flush
  64. * global pages. is it worth the extra I/O ?
  65. */
  66. local_irq_save(flags);
  67. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  68. *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
  69. if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
  70. *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  71. IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
  72. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  73. IO_STATE(R_TLB_LO, valid, no ) |
  74. IO_STATE(R_TLB_LO, kernel,no ) |
  75. IO_STATE(R_TLB_LO, we, no ) |
  76. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  77. }
  78. }
  79. local_irq_restore(flags);
  80. }
  81. /* invalidate a single page */
  82. void
  83. flush_tlb_page(struct vm_area_struct *vma,
  84. unsigned long addr)
  85. {
  86. struct mm_struct *mm = vma->vm_mm;
  87. int page_id = mm->context.page_id;
  88. int i;
  89. unsigned long flags;
  90. D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
  91. if(page_id == NO_CONTEXT)
  92. return;
  93. addr &= PAGE_MASK; /* perhaps not necessary */
  94. /* invalidate those TLB entries that match both the mm context
  95. * and the virtual address requested
  96. */
  97. local_irq_save(flags);
  98. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  99. unsigned long tlb_hi;
  100. *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
  101. tlb_hi = *R_TLB_HI;
  102. if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
  103. (tlb_hi & PAGE_MASK) == addr) {
  104. *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  105. addr; /* same addr as before works. */
  106. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  107. IO_STATE(R_TLB_LO, valid, no ) |
  108. IO_STATE(R_TLB_LO, kernel,no ) |
  109. IO_STATE(R_TLB_LO, we, no ) |
  110. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  111. }
  112. }
  113. local_irq_restore(flags);
  114. }
  115. /* dump the entire TLB for debug purposes */
  116. #if 0
  117. void
  118. dump_tlb_all(void)
  119. {
  120. int i;
  121. unsigned long flags;
  122. printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n");
  123. local_save_flags(flags);
  124. local_irq_disable();
  125. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  126. *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
  127. printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
  128. i, *R_TLB_HI, *R_TLB_LO);
  129. }
  130. local_irq_restore(flags);
  131. }
  132. #endif
  133. /*
  134. * Initialize the context related info for a new mm_struct
  135. * instance.
  136. */
  137. int
  138. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  139. {
  140. mm->context.page_id = NO_CONTEXT;
  141. return 0;
  142. }
  143. /* called in schedule() just before actually doing the switch_to */
  144. void
  145. switch_mm(struct mm_struct *prev, struct mm_struct *next,
  146. struct task_struct *tsk)
  147. {
  148. /* make sure we have a context */
  149. get_mmu_context(next);
  150. /* remember the pgd for the fault handlers
  151. * this is similar to the pgd register in some other CPU's.
  152. * we need our own copy of it because current and active_mm
  153. * might be invalid at points where we still need to derefer
  154. * the pgd.
  155. */
  156. per_cpu(current_pgd, smp_processor_id()) = next->pgd;
  157. /* switch context in the MMU */
  158. D(printk("switching mmu_context to %d (%p)\n", next->context, next));
  159. *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id);
  160. }