tlb.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. /*
  2. * TLB support routines.
  3. *
  4. * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. *
  7. * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
  8. * Modified RID allocation for SMP
  9. * Goutham Rao <goutham.rao@intel.com>
  10. * IPI based ptc implementation and A-step IPI implementation.
  11. * Rohit Seth <rohit.seth@intel.com>
  12. * Ken Chen <kenneth.w.chen@intel.com>
  13. * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
  14. */
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/smp.h>
  20. #include <linux/mm.h>
  21. #include <linux/bootmem.h>
  22. #include <asm/delay.h>
  23. #include <asm/mmu_context.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/pal.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/dma.h>
  28. #include <asm/processor.h>
  29. #include <asm/tlb.h>
  30. static struct {
  31. unsigned long mask; /* mask of supported purge page-sizes */
  32. unsigned long max_bits; /* log2 of largest supported purge page-size */
  33. } purge;
  34. struct ia64_ctx ia64_ctx = {
  35. .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
  36. .next = 1,
  37. .max_ctx = ~0U
  38. };
  39. DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
  40. DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
  41. DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
  42. struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
  43. /*
  44. * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
  45. * Called after cpu_init() has setup ia64_ctx.max_ctx based on
  46. * maximum RID that is supported by boot CPU.
  47. */
  48. void __init
  49. mmu_context_init (void)
  50. {
  51. ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
  52. ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
  53. }
  54. /*
  55. * Acquire the ia64_ctx.lock before calling this function!
  56. */
  57. void
  58. wrap_mmu_context (struct mm_struct *mm)
  59. {
  60. int i, cpu;
  61. unsigned long flush_bit;
  62. for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
  63. flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
  64. ia64_ctx.bitmap[i] ^= flush_bit;
  65. }
  66. /* use offset at 300 to skip daemons */
  67. ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
  68. ia64_ctx.max_ctx, 300);
  69. ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
  70. ia64_ctx.max_ctx, ia64_ctx.next);
  71. /*
  72. * can't call flush_tlb_all() here because of race condition
  73. * with O(1) scheduler [EF]
  74. */
  75. cpu = get_cpu(); /* prevent preemption/migration */
  76. for_each_online_cpu(i)
  77. if (i != cpu)
  78. per_cpu(ia64_need_tlb_flush, i) = 1;
  79. put_cpu();
  80. local_flush_tlb_all();
  81. }
  82. void
  83. ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
  84. unsigned long end, unsigned long nbits)
  85. {
  86. static DEFINE_SPINLOCK(ptcg_lock);
  87. struct mm_struct *active_mm = current->active_mm;
  88. if (mm != active_mm) {
  89. /* Restore region IDs for mm */
  90. if (mm && active_mm) {
  91. activate_context(mm);
  92. } else {
  93. flush_tlb_all();
  94. return;
  95. }
  96. }
  97. /* HW requires global serialization of ptc.ga. */
  98. spin_lock(&ptcg_lock);
  99. {
  100. do {
  101. /*
  102. * Flush ALAT entries also.
  103. */
  104. ia64_ptcga(start, (nbits<<2));
  105. ia64_srlz_i();
  106. start += (1UL << nbits);
  107. } while (start < end);
  108. }
  109. spin_unlock(&ptcg_lock);
  110. if (mm != active_mm) {
  111. activate_context(active_mm);
  112. }
  113. }
  114. void
  115. local_flush_tlb_all (void)
  116. {
  117. unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
  118. addr = local_cpu_data->ptce_base;
  119. count0 = local_cpu_data->ptce_count[0];
  120. count1 = local_cpu_data->ptce_count[1];
  121. stride0 = local_cpu_data->ptce_stride[0];
  122. stride1 = local_cpu_data->ptce_stride[1];
  123. local_irq_save(flags);
  124. for (i = 0; i < count0; ++i) {
  125. for (j = 0; j < count1; ++j) {
  126. ia64_ptce(addr);
  127. addr += stride1;
  128. }
  129. addr += stride0;
  130. }
  131. local_irq_restore(flags);
  132. ia64_srlz_i(); /* srlz.i implies srlz.d */
  133. }
  134. void
  135. flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
  136. unsigned long end)
  137. {
  138. struct mm_struct *mm = vma->vm_mm;
  139. unsigned long size = end - start;
  140. unsigned long nbits;
  141. #ifndef CONFIG_SMP
  142. if (mm != current->active_mm) {
  143. mm->context = 0;
  144. return;
  145. }
  146. #endif
  147. nbits = ia64_fls(size + 0xfff);
  148. while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
  149. (nbits < purge.max_bits))
  150. ++nbits;
  151. if (nbits > purge.max_bits)
  152. nbits = purge.max_bits;
  153. start &= ~((1UL << nbits) - 1);
  154. preempt_disable();
  155. #ifdef CONFIG_SMP
  156. if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
  157. platform_global_tlb_purge(mm, start, end, nbits);
  158. preempt_enable();
  159. return;
  160. }
  161. #endif
  162. do {
  163. ia64_ptcl(start, (nbits<<2));
  164. start += (1UL << nbits);
  165. } while (start < end);
  166. preempt_enable();
  167. ia64_srlz_i(); /* srlz.i implies srlz.d */
  168. }
  169. EXPORT_SYMBOL(flush_tlb_range);
  170. void __devinit
  171. ia64_tlb_init (void)
  172. {
  173. ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
  174. unsigned long tr_pgbits;
  175. long status;
  176. pal_vm_info_1_u_t vm_info_1;
  177. pal_vm_info_2_u_t vm_info_2;
  178. int cpu = smp_processor_id();
  179. if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
  180. printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
  181. "defaulting to architected purge page-sizes.\n", status);
  182. purge.mask = 0x115557000UL;
  183. }
  184. purge.max_bits = ia64_fls(purge.mask);
  185. ia64_get_ptce(&ptce_info);
  186. local_cpu_data->ptce_base = ptce_info.base;
  187. local_cpu_data->ptce_count[0] = ptce_info.count[0];
  188. local_cpu_data->ptce_count[1] = ptce_info.count[1];
  189. local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
  190. local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
  191. local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
  192. status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
  193. if (status) {
  194. printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
  195. per_cpu(ia64_tr_num, cpu) = 8;
  196. return;
  197. }
  198. per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
  199. if (per_cpu(ia64_tr_num, cpu) >
  200. (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
  201. per_cpu(ia64_tr_num, cpu) =
  202. vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
  203. if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
  204. per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
  205. printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!"
  206. "IA64_TR_ALLOC_MAX should be extended\n");
  207. }
  208. }
  209. /*
  210. * is_tr_overlap
  211. *
  212. * Check overlap with inserted TRs.
  213. */
  214. static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
  215. {
  216. u64 tr_log_size;
  217. u64 tr_end;
  218. u64 va_rr = ia64_get_rr(va);
  219. u64 va_rid = RR_TO_RID(va_rr);
  220. u64 va_end = va + (1<<log_size) - 1;
  221. if (va_rid != RR_TO_RID(p->rr))
  222. return 0;
  223. tr_log_size = (p->itir & 0xff) >> 2;
  224. tr_end = p->ifa + (1<<tr_log_size) - 1;
  225. if (va > tr_end || p->ifa > va_end)
  226. return 0;
  227. return 1;
  228. }
  229. /*
  230. * ia64_insert_tr in virtual mode. Allocate a TR slot
  231. *
  232. * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
  233. *
  234. * va : virtual address.
  235. * pte : pte entries inserted.
  236. * log_size: range to be covered.
  237. *
  238. * Return value: <0 : error No.
  239. *
  240. * >=0 : slot number allocated for TR.
  241. * Must be called with preemption disabled.
  242. */
  243. int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
  244. {
  245. int i, r;
  246. unsigned long psr;
  247. struct ia64_tr_entry *p;
  248. int cpu = smp_processor_id();
  249. r = -EINVAL;
  250. /*Check overlap with existing TR entries*/
  251. if (target_mask & 0x1) {
  252. p = &__per_cpu_idtrs[cpu][0][0];
  253. for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
  254. i++, p++) {
  255. if (p->pte & 0x1)
  256. if (is_tr_overlap(p, va, log_size)) {
  257. printk(KERN_DEBUG "Overlapped Entry"
  258. "Inserted for TR Reigster!!\n");
  259. goto out;
  260. }
  261. }
  262. }
  263. if (target_mask & 0x2) {
  264. p = &__per_cpu_idtrs[cpu][1][0];
  265. for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
  266. i++, p++) {
  267. if (p->pte & 0x1)
  268. if (is_tr_overlap(p, va, log_size)) {
  269. printk(KERN_DEBUG "Overlapped Entry"
  270. "Inserted for TR Reigster!!\n");
  271. goto out;
  272. }
  273. }
  274. }
  275. for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
  276. switch (target_mask & 0x3) {
  277. case 1:
  278. if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
  279. goto found;
  280. continue;
  281. case 2:
  282. if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
  283. goto found;
  284. continue;
  285. case 3:
  286. if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
  287. !(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
  288. goto found;
  289. continue;
  290. default:
  291. r = -EINVAL;
  292. goto out;
  293. }
  294. }
  295. found:
  296. if (i >= per_cpu(ia64_tr_num, cpu))
  297. return -EBUSY;
  298. /*Record tr info for mca hander use!*/
  299. if (i > per_cpu(ia64_tr_used, cpu))
  300. per_cpu(ia64_tr_used, cpu) = i;
  301. psr = ia64_clear_ic();
  302. if (target_mask & 0x1) {
  303. ia64_itr(0x1, i, va, pte, log_size);
  304. ia64_srlz_i();
  305. p = &__per_cpu_idtrs[cpu][0][i];
  306. p->ifa = va;
  307. p->pte = pte;
  308. p->itir = log_size << 2;
  309. p->rr = ia64_get_rr(va);
  310. }
  311. if (target_mask & 0x2) {
  312. ia64_itr(0x2, i, va, pte, log_size);
  313. ia64_srlz_i();
  314. p = &__per_cpu_idtrs[cpu][1][i];
  315. p->ifa = va;
  316. p->pte = pte;
  317. p->itir = log_size << 2;
  318. p->rr = ia64_get_rr(va);
  319. }
  320. ia64_set_psr(psr);
  321. r = i;
  322. out:
  323. return r;
  324. }
  325. EXPORT_SYMBOL_GPL(ia64_itr_entry);
  326. /*
  327. * ia64_purge_tr
  328. *
  329. * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
  330. * slot: slot number to be freed.
  331. *
  332. * Must be called with preemption disabled.
  333. */
  334. void ia64_ptr_entry(u64 target_mask, int slot)
  335. {
  336. int cpu = smp_processor_id();
  337. int i;
  338. struct ia64_tr_entry *p;
  339. if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
  340. return;
  341. if (target_mask & 0x1) {
  342. p = &__per_cpu_idtrs[cpu][0][slot];
  343. if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
  344. p->pte = 0;
  345. ia64_ptr(0x1, p->ifa, p->itir>>2);
  346. ia64_srlz_i();
  347. }
  348. }
  349. if (target_mask & 0x2) {
  350. p = &__per_cpu_idtrs[cpu][1][slot];
  351. if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
  352. p->pte = 0;
  353. ia64_ptr(0x2, p->ifa, p->itir>>2);
  354. ia64_srlz_i();
  355. }
  356. }
  357. for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
  358. if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
  359. (__per_cpu_idtrs[cpu][1][i].pte & 0x1))
  360. break;
  361. }
  362. per_cpu(ia64_tr_used, cpu) = i;
  363. }
  364. EXPORT_SYMBOL_GPL(ia64_ptr_entry);