tlb-sh5.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * arch/sh/mm/tlb-sh5.c
  3. *
  4. * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
  5. * Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com>
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/mm.h>
  12. #include <linux/init.h>
  13. #include <asm/page.h>
  14. #include <asm/tlb.h>
  15. #include <asm/mmu_context.h>
  16. /**
  17. * sh64_tlb_init
  18. *
  19. * Perform initial setup for the DTLB and ITLB.
  20. */
  21. int __init sh64_tlb_init(void)
  22. {
  23. /* Assign some sane DTLB defaults */
  24. cpu_data->dtlb.entries = 64;
  25. cpu_data->dtlb.step = 0x10;
  26. cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step;
  27. cpu_data->dtlb.next = cpu_data->dtlb.first;
  28. cpu_data->dtlb.last = DTLB_FIXED |
  29. ((cpu_data->dtlb.entries - 1) *
  30. cpu_data->dtlb.step);
  31. /* And again for the ITLB */
  32. cpu_data->itlb.entries = 64;
  33. cpu_data->itlb.step = 0x10;
  34. cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step;
  35. cpu_data->itlb.next = cpu_data->itlb.first;
  36. cpu_data->itlb.last = ITLB_FIXED |
  37. ((cpu_data->itlb.entries - 1) *
  38. cpu_data->itlb.step);
  39. return 0;
  40. }
  41. /**
  42. * sh64_next_free_dtlb_entry
  43. *
  44. * Find the next available DTLB entry
  45. */
  46. unsigned long long sh64_next_free_dtlb_entry(void)
  47. {
  48. return cpu_data->dtlb.next;
  49. }
  50. /**
  51. * sh64_get_wired_dtlb_entry
  52. *
  53. * Allocate a wired (locked-in) entry in the DTLB
  54. */
  55. unsigned long long sh64_get_wired_dtlb_entry(void)
  56. {
  57. unsigned long long entry = sh64_next_free_dtlb_entry();
  58. cpu_data->dtlb.first += cpu_data->dtlb.step;
  59. cpu_data->dtlb.next += cpu_data->dtlb.step;
  60. return entry;
  61. }
  62. /**
  63. * sh64_put_wired_dtlb_entry
  64. *
  65. * @entry: Address of TLB slot.
  66. *
  67. * Free a wired (locked-in) entry in the DTLB.
  68. *
  69. * Works like a stack, last one to allocate must be first one to free.
  70. */
  71. int sh64_put_wired_dtlb_entry(unsigned long long entry)
  72. {
  73. __flush_tlb_slot(entry);
  74. /*
  75. * We don't do any particularly useful tracking of wired entries,
  76. * so this approach works like a stack .. last one to be allocated
  77. * has to be the first one to be freed.
  78. *
  79. * We could potentially load wired entries into a list and work on
  80. * rebalancing the list periodically (which also entails moving the
  81. * contents of a TLB entry) .. though I have a feeling that this is
  82. * more trouble than it's worth.
  83. */
  84. /*
  85. * Entry must be valid .. we don't want any ITLB addresses!
  86. */
  87. if (entry <= DTLB_FIXED)
  88. return -EINVAL;
  89. /*
  90. * Next, check if we're within range to be freed. (ie, must be the
  91. * entry beneath the first 'free' entry!
  92. */
  93. if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
  94. return -EINVAL;
  95. /* If we are, then bring this entry back into the list */
  96. cpu_data->dtlb.first -= cpu_data->dtlb.step;
  97. cpu_data->dtlb.next = entry;
  98. return 0;
  99. }
  100. /**
  101. * sh64_setup_tlb_slot
  102. *
  103. * @config_addr: Address of TLB slot.
  104. * @eaddr: Virtual address.
  105. * @asid: Address Space Identifier.
  106. * @paddr: Physical address.
  107. *
  108. * Load up a virtual<->physical translation for @eaddr<->@paddr in the
  109. * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
  110. */
  111. inline void sh64_setup_tlb_slot(unsigned long long config_addr,
  112. unsigned long eaddr,
  113. unsigned long asid,
  114. unsigned long paddr)
  115. {
  116. unsigned long long pteh, ptel;
  117. /* Sign extension */
  118. #if (NEFF == 32)
  119. pteh = (unsigned long long)(signed long long)(signed long) eaddr;
  120. #else
  121. #error "Can't sign extend more than 32 bits yet"
  122. #endif
  123. pteh &= PAGE_MASK;
  124. pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
  125. #if (NEFF == 32)
  126. ptel = (unsigned long long)(signed long long)(signed long) paddr;
  127. #else
  128. #error "Can't sign extend more than 32 bits yet"
  129. #endif
  130. ptel &= PAGE_MASK;
  131. ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
  132. asm volatile("putcfg %0, 1, %1\n\t"
  133. "putcfg %0, 0, %2\n"
  134. : : "r" (config_addr), "r" (ptel), "r" (pteh));
  135. }
  136. /**
  137. * sh64_teardown_tlb_slot
  138. *
  139. * @config_addr: Address of TLB slot.
  140. *
  141. * Teardown any existing mapping in the TLB slot @config_addr.
  142. */
  143. inline void sh64_teardown_tlb_slot(unsigned long long config_addr)
  144. __attribute__ ((alias("__flush_tlb_slot")));