tsb.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. /* arch/sparc64/mm/tsb.c
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <asm/system.h>
  7. #include <asm/page.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/tlb.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/tsb.h>
  13. #include <asm/oplib.h>
  14. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  15. static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
  16. {
  17. vaddr >>= PAGE_SHIFT;
  18. return vaddr & (nentries - 1);
  19. }
  20. static inline int tag_compare(unsigned long tag, unsigned long vaddr)
  21. {
  22. return (tag == (vaddr >> 22));
  23. }
  24. /* TSB flushes need only occur on the processor initiating the address
  25. * space modification, not on each cpu the address space has run on.
  26. * Only the TLB flush needs that treatment.
  27. */
  28. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  29. {
  30. unsigned long v;
  31. for (v = start; v < end; v += PAGE_SIZE) {
  32. unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
  33. struct tsb *ent = &swapper_tsb[hash];
  34. if (tag_compare(ent->tag, v)) {
  35. ent->tag = (1UL << TSB_TAG_INVALID_BIT);
  36. membar_storeload_storestore();
  37. }
  38. }
  39. }
  40. void flush_tsb_user(struct mmu_gather *mp)
  41. {
  42. struct mm_struct *mm = mp->mm;
  43. unsigned long nentries, base, flags;
  44. struct tsb *tsb;
  45. int i;
  46. spin_lock_irqsave(&mm->context.lock, flags);
  47. tsb = mm->context.tsb;
  48. nentries = mm->context.tsb_nentries;
  49. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  50. base = __pa(tsb);
  51. else
  52. base = (unsigned long) tsb;
  53. for (i = 0; i < mp->tlb_nr; i++) {
  54. unsigned long v = mp->vaddrs[i];
  55. unsigned long tag, ent, hash;
  56. v &= ~0x1UL;
  57. hash = tsb_hash(v, nentries);
  58. ent = base + (hash * sizeof(struct tsb));
  59. tag = (v >> 22UL);
  60. tsb_flush(ent, tag);
  61. }
  62. spin_unlock_irqrestore(&mm->context.lock, flags);
  63. }
  64. static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
  65. {
  66. unsigned long tsb_reg, base, tsb_paddr;
  67. unsigned long page_sz, tte;
  68. mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
  69. base = TSBMAP_BASE;
  70. tte = pgprot_val(PAGE_KERNEL_LOCKED);
  71. tsb_paddr = __pa(mm->context.tsb);
  72. BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
  73. /* Use the smallest page size that can map the whole TSB
  74. * in one TLB entry.
  75. */
  76. switch (tsb_bytes) {
  77. case 8192 << 0:
  78. tsb_reg = 0x0UL;
  79. #ifdef DCACHE_ALIASING_POSSIBLE
  80. base += (tsb_paddr & 8192);
  81. #endif
  82. page_sz = 8192;
  83. break;
  84. case 8192 << 1:
  85. tsb_reg = 0x1UL;
  86. page_sz = 64 * 1024;
  87. break;
  88. case 8192 << 2:
  89. tsb_reg = 0x2UL;
  90. page_sz = 64 * 1024;
  91. break;
  92. case 8192 << 3:
  93. tsb_reg = 0x3UL;
  94. page_sz = 64 * 1024;
  95. break;
  96. case 8192 << 4:
  97. tsb_reg = 0x4UL;
  98. page_sz = 512 * 1024;
  99. break;
  100. case 8192 << 5:
  101. tsb_reg = 0x5UL;
  102. page_sz = 512 * 1024;
  103. break;
  104. case 8192 << 6:
  105. tsb_reg = 0x6UL;
  106. page_sz = 512 * 1024;
  107. break;
  108. case 8192 << 7:
  109. tsb_reg = 0x7UL;
  110. page_sz = 4 * 1024 * 1024;
  111. break;
  112. default:
  113. BUG();
  114. };
  115. tte |= pte_sz_bits(page_sz);
  116. if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
  117. /* Physical mapping, no locked TLB entry for TSB. */
  118. tsb_reg |= tsb_paddr;
  119. mm->context.tsb_reg_val = tsb_reg;
  120. mm->context.tsb_map_vaddr = 0;
  121. mm->context.tsb_map_pte = 0;
  122. } else {
  123. tsb_reg |= base;
  124. tsb_reg |= (tsb_paddr & (page_sz - 1UL));
  125. tte |= (tsb_paddr & ~(page_sz - 1UL));
  126. mm->context.tsb_reg_val = tsb_reg;
  127. mm->context.tsb_map_vaddr = base;
  128. mm->context.tsb_map_pte = tte;
  129. }
  130. /* Setup the Hypervisor TSB descriptor. */
  131. if (tlb_type == hypervisor) {
  132. struct hv_tsb_descr *hp = &mm->context.tsb_descr;
  133. switch (PAGE_SIZE) {
  134. case 8192:
  135. default:
  136. hp->pgsz_idx = HV_PGSZ_IDX_8K;
  137. break;
  138. case 64 * 1024:
  139. hp->pgsz_idx = HV_PGSZ_IDX_64K;
  140. break;
  141. case 512 * 1024:
  142. hp->pgsz_idx = HV_PGSZ_IDX_512K;
  143. break;
  144. case 4 * 1024 * 1024:
  145. hp->pgsz_idx = HV_PGSZ_IDX_4MB;
  146. break;
  147. };
  148. hp->assoc = 1;
  149. hp->num_ttes = tsb_bytes / 16;
  150. hp->ctx_idx = 0;
  151. switch (PAGE_SIZE) {
  152. case 8192:
  153. default:
  154. hp->pgsz_mask = HV_PGSZ_MASK_8K;
  155. break;
  156. case 64 * 1024:
  157. hp->pgsz_mask = HV_PGSZ_MASK_64K;
  158. break;
  159. case 512 * 1024:
  160. hp->pgsz_mask = HV_PGSZ_MASK_512K;
  161. break;
  162. case 4 * 1024 * 1024:
  163. hp->pgsz_mask = HV_PGSZ_MASK_4MB;
  164. break;
  165. };
  166. hp->tsb_base = tsb_paddr;
  167. hp->resv = 0;
  168. }
  169. }
  170. static kmem_cache_t *tsb_caches[8] __read_mostly;
  171. static const char *tsb_cache_names[8] = {
  172. "tsb_8KB",
  173. "tsb_16KB",
  174. "tsb_32KB",
  175. "tsb_64KB",
  176. "tsb_128KB",
  177. "tsb_256KB",
  178. "tsb_512KB",
  179. "tsb_1MB",
  180. };
  181. void __init tsb_cache_init(void)
  182. {
  183. unsigned long i;
  184. for (i = 0; i < 8; i++) {
  185. unsigned long size = 8192 << i;
  186. const char *name = tsb_cache_names[i];
  187. tsb_caches[i] = kmem_cache_create(name,
  188. size, size,
  189. SLAB_HWCACHE_ALIGN |
  190. SLAB_MUST_HWCACHE_ALIGN,
  191. NULL, NULL);
  192. if (!tsb_caches[i]) {
  193. prom_printf("Could not create %s cache\n", name);
  194. prom_halt();
  195. }
  196. }
  197. }
  198. /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
  199. * do_sparc64_fault() invokes this routine to try and grow the TSB.
  200. *
  201. * When we reach the maximum TSB size supported, we stick ~0UL into
  202. * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
  203. * will not trigger any longer.
  204. *
  205. * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
  206. * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
  207. * must be 512K aligned. It also must be physically contiguous, so we
  208. * cannot use vmalloc().
  209. *
  210. * The idea here is to grow the TSB when the RSS of the process approaches
  211. * the number of entries that the current TSB can hold at once. Currently,
  212. * we trigger when the RSS hits 3/4 of the TSB capacity.
  213. */
  214. void tsb_grow(struct mm_struct *mm, unsigned long rss)
  215. {
  216. unsigned long max_tsb_size = 1 * 1024 * 1024;
  217. unsigned long new_size, old_size, flags;
  218. struct tsb *old_tsb, *new_tsb;
  219. unsigned long new_cache_index, old_cache_index;
  220. unsigned long new_rss_limit;
  221. gfp_t gfp_flags;
  222. if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
  223. max_tsb_size = (PAGE_SIZE << MAX_ORDER);
  224. new_cache_index = 0;
  225. for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
  226. unsigned long n_entries = new_size / sizeof(struct tsb);
  227. n_entries = (n_entries * 3) / 4;
  228. if (n_entries > rss)
  229. break;
  230. new_cache_index++;
  231. }
  232. if (new_size == max_tsb_size)
  233. new_rss_limit = ~0UL;
  234. else
  235. new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
  236. retry_tsb_alloc:
  237. gfp_flags = GFP_KERNEL;
  238. if (new_size > (PAGE_SIZE * 2))
  239. gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
  240. new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags);
  241. if (unlikely(!new_tsb)) {
  242. /* Not being able to fork due to a high-order TSB
  243. * allocation failure is very bad behavior. Just back
  244. * down to a 0-order allocation and force no TSB
  245. * growing for this address space.
  246. */
  247. if (mm->context.tsb == NULL && new_cache_index > 0) {
  248. new_cache_index = 0;
  249. new_size = 8192;
  250. new_rss_limit = ~0UL;
  251. goto retry_tsb_alloc;
  252. }
  253. /* If we failed on a TSB grow, we are under serious
  254. * memory pressure so don't try to grow any more.
  255. */
  256. if (mm->context.tsb != NULL)
  257. mm->context.tsb_rss_limit = ~0UL;
  258. return;
  259. }
  260. /* Mark all tags as invalid. */
  261. tsb_init(new_tsb, new_size);
  262. /* Ok, we are about to commit the changes. If we are
  263. * growing an existing TSB the locking is very tricky,
  264. * so WATCH OUT!
  265. *
  266. * We have to hold mm->context.lock while committing to the
  267. * new TSB, this synchronizes us with processors in
  268. * flush_tsb_user() and switch_mm() for this address space.
  269. *
  270. * But even with that lock held, processors run asynchronously
  271. * accessing the old TSB via TLB miss handling. This is OK
  272. * because those actions are just propagating state from the
  273. * Linux page tables into the TSB, page table mappings are not
  274. * being changed. If a real fault occurs, the processor will
  275. * synchronize with us when it hits flush_tsb_user(), this is
  276. * also true for the case where vmscan is modifying the page
  277. * tables. The only thing we need to be careful with is to
  278. * skip any locked TSB entries during copy_tsb().
  279. *
  280. * When we finish committing to the new TSB, we have to drop
  281. * the lock and ask all other cpus running this address space
  282. * to run tsb_context_switch() to see the new TSB table.
  283. */
  284. spin_lock_irqsave(&mm->context.lock, flags);
  285. old_tsb = mm->context.tsb;
  286. old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
  287. old_size = mm->context.tsb_nentries * sizeof(struct tsb);
  288. /* Handle multiple threads trying to grow the TSB at the same time.
  289. * One will get in here first, and bump the size and the RSS limit.
  290. * The others will get in here next and hit this check.
  291. */
  292. if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
  293. spin_unlock_irqrestore(&mm->context.lock, flags);
  294. kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
  295. return;
  296. }
  297. mm->context.tsb_rss_limit = new_rss_limit;
  298. if (old_tsb) {
  299. extern void copy_tsb(unsigned long old_tsb_base,
  300. unsigned long old_tsb_size,
  301. unsigned long new_tsb_base,
  302. unsigned long new_tsb_size);
  303. unsigned long old_tsb_base = (unsigned long) old_tsb;
  304. unsigned long new_tsb_base = (unsigned long) new_tsb;
  305. if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
  306. old_tsb_base = __pa(old_tsb_base);
  307. new_tsb_base = __pa(new_tsb_base);
  308. }
  309. copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
  310. }
  311. mm->context.tsb = new_tsb;
  312. setup_tsb_params(mm, new_size);
  313. spin_unlock_irqrestore(&mm->context.lock, flags);
  314. /* If old_tsb is NULL, we're being invoked for the first time
  315. * from init_new_context().
  316. */
  317. if (old_tsb) {
  318. /* Reload it on the local cpu. */
  319. tsb_context_switch(mm);
  320. /* Now force other processors to do the same. */
  321. smp_tsb_sync(mm);
  322. /* Now it is safe to free the old tsb. */
  323. kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
  324. }
  325. }
  326. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  327. {
  328. spin_lock_init(&mm->context.lock);
  329. mm->context.sparc64_ctx_val = 0UL;
  330. /* copy_mm() copies over the parent's mm_struct before calling
  331. * us, so we need to zero out the TSB pointer or else tsb_grow()
  332. * will be confused and think there is an older TSB to free up.
  333. */
  334. mm->context.tsb = NULL;
  335. /* If this is fork, inherit the parent's TSB size. We would
  336. * grow it to that size on the first page fault anyways.
  337. */
  338. tsb_grow(mm, get_mm_rss(mm));
  339. if (unlikely(!mm->context.tsb))
  340. return -ENOMEM;
  341. return 0;
  342. }
  343. void destroy_context(struct mm_struct *mm)
  344. {
  345. unsigned long flags, cache_index;
  346. cache_index = (mm->context.tsb_reg_val & 0x7UL);
  347. kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
  348. /* We can remove these later, but for now it's useful
  349. * to catch any bogus post-destroy_context() references
  350. * to the TSB.
  351. */
  352. mm->context.tsb = NULL;
  353. mm->context.tsb_reg_val = 0UL;
  354. spin_lock_irqsave(&ctx_alloc_lock, flags);
  355. if (CTX_VALID(mm->context)) {
  356. unsigned long nr = CTX_NRBITS(mm->context);
  357. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  358. }
  359. spin_unlock_irqrestore(&ctx_alloc_lock, flags);
  360. }