123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332 |
- /* arch/sparc64/mm/tsb.c
- *
- * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
- */
- #include <linux/kernel.h>
- #include <asm/system.h>
- #include <asm/page.h>
- #include <asm/tlbflush.h>
- #include <asm/tlb.h>
- #include <asm/mmu_context.h>
- #include <asm/pgtable.h>
- #include <asm/tsb.h>
- /* We use an 8K TSB for the whole kernel, this allows to
- * handle about 4MB of modules and vmalloc mappings without
- * incurring many hash conflicts.
- */
- #define KERNEL_TSB_SIZE_BYTES 8192
- #define KERNEL_TSB_NENTRIES \
- (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb))
- extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
- static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
- {
- vaddr >>= PAGE_SHIFT;
- return vaddr & (nentries - 1);
- }
- static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
- {
- if (context == ~0UL)
- return 1;
- return (entry->tag == ((vaddr >> 22) | (context << 48)));
- }
- /* TSB flushes need only occur on the processor initiating the address
- * space modification, not on each cpu the address space has run on.
- * Only the TLB flush needs that treatment.
- */
- void flush_tsb_kernel_range(unsigned long start, unsigned long end)
- {
- unsigned long v;
- for (v = start; v < end; v += PAGE_SIZE) {
- unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
- struct tsb *ent = &swapper_tsb[hash];
- if (tag_compare(ent, v, 0)) {
- ent->tag = 0UL;
- membar_storeload_storestore();
- }
- }
- }
- void flush_tsb_user(struct mmu_gather *mp)
- {
- struct mm_struct *mm = mp->mm;
- struct tsb *tsb = mm->context.tsb;
- unsigned long ctx = ~0UL;
- unsigned long nentries = mm->context.tsb_nentries;
- int i;
- if (CTX_VALID(mm->context))
- ctx = CTX_HWBITS(mm->context);
- for (i = 0; i < mp->tlb_nr; i++) {
- unsigned long v = mp->vaddrs[i];
- struct tsb *ent;
- v &= ~0x1UL;
- ent = &tsb[tsb_hash(v, nentries)];
- if (tag_compare(ent, v, ctx)) {
- ent->tag = 0UL;
- membar_storeload_storestore();
- }
- }
- }
- static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
- {
- unsigned long tsb_reg, base, tsb_paddr;
- unsigned long page_sz, tte;
- mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
- base = TSBMAP_BASE;
- tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
- _PAGE_CV | _PAGE_P | _PAGE_W);
- tsb_paddr = __pa(mm->context.tsb);
- /* Use the smallest page size that can map the whole TSB
- * in one TLB entry.
- */
- switch (tsb_bytes) {
- case 8192 << 0:
- tsb_reg = 0x0UL;
- #ifdef DCACHE_ALIASING_POSSIBLE
- base += (tsb_paddr & 8192);
- #endif
- tte |= _PAGE_SZ8K;
- page_sz = 8192;
- break;
- case 8192 << 1:
- tsb_reg = 0x1UL;
- tte |= _PAGE_SZ64K;
- page_sz = 64 * 1024;
- break;
- case 8192 << 2:
- tsb_reg = 0x2UL;
- tte |= _PAGE_SZ64K;
- page_sz = 64 * 1024;
- break;
- case 8192 << 3:
- tsb_reg = 0x3UL;
- tte |= _PAGE_SZ64K;
- page_sz = 64 * 1024;
- break;
- case 8192 << 4:
- tsb_reg = 0x4UL;
- tte |= _PAGE_SZ512K;
- page_sz = 512 * 1024;
- break;
- case 8192 << 5:
- tsb_reg = 0x5UL;
- tte |= _PAGE_SZ512K;
- page_sz = 512 * 1024;
- break;
- case 8192 << 6:
- tsb_reg = 0x6UL;
- tte |= _PAGE_SZ512K;
- page_sz = 512 * 1024;
- break;
- case 8192 << 7:
- tsb_reg = 0x7UL;
- tte |= _PAGE_SZ4MB;
- page_sz = 4 * 1024 * 1024;
- break;
- default:
- BUG();
- };
- tsb_reg |= base;
- tsb_reg |= (tsb_paddr & (page_sz - 1UL));
- tte |= (tsb_paddr & ~(page_sz - 1UL));
- mm->context.tsb_reg_val = tsb_reg;
- mm->context.tsb_map_vaddr = base;
- mm->context.tsb_map_pte = tte;
- }
- /* The page tables are locked against modifications while this
- * runs.
- *
- * XXX do some prefetching...
- */
- static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
- struct tsb *new_tsb, unsigned long new_size)
- {
- unsigned long old_nentries = old_size / sizeof(struct tsb);
- unsigned long new_nentries = new_size / sizeof(struct tsb);
- unsigned long i;
- for (i = 0; i < old_nentries; i++) {
- register unsigned long tag asm("o4");
- register unsigned long pte asm("o5");
- unsigned long v;
- unsigned int hash;
- __asm__ __volatile__(
- "ldda [%2] %3, %0"
- : "=r" (tag), "=r" (pte)
- : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD));
- if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
- continue;
- /* We only put base page size PTEs into the TSB,
- * but that might change in the future. This code
- * would need to be changed if we start putting larger
- * page size PTEs into there.
- */
- WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
- /* The tag holds bits 22 to 63 of the virtual address
- * and the context. Clear out the context, and shift
- * up to make a virtual address.
- */
- v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
- /* The implied bits of the tag (bits 13 to 21) are
- * determined by the TSB entry index, so fill that in.
- */
- v |= (i & (512UL - 1UL)) << 13UL;
- hash = tsb_hash(v, new_nentries);
- new_tsb[hash].tag = tag;
- new_tsb[hash].pte = pte;
- }
- }
- /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
- * update_mmu_cache() invokes this routine to try and grow the TSB.
- * When we reach the maximum TSB size supported, we stick ~0UL into
- * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
- * will not trigger any longer.
- *
- * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
- * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
- * must be 512K aligned.
- *
- * The idea here is to grow the TSB when the RSS of the process approaches
- * the number of entries that the current TSB can hold at once. Currently,
- * we trigger when the RSS hits 3/4 of the TSB capacity.
- */
- void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
- {
- unsigned long max_tsb_size = 1 * 1024 * 1024;
- unsigned long size, old_size;
- struct page *page;
- struct tsb *old_tsb;
- if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
- max_tsb_size = (PAGE_SIZE << MAX_ORDER);
- for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
- unsigned long n_entries = size / sizeof(struct tsb);
- n_entries = (n_entries * 3) / 4;
- if (n_entries > rss)
- break;
- }
- page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
- if (unlikely(!page))
- return;
- if (size == max_tsb_size)
- mm->context.tsb_rss_limit = ~0UL;
- else
- mm->context.tsb_rss_limit =
- ((size / sizeof(struct tsb)) * 3) / 4;
- old_tsb = mm->context.tsb;
- old_size = mm->context.tsb_nentries * sizeof(struct tsb);
- if (old_tsb)
- copy_tsb(old_tsb, old_size, page_address(page), size);
- mm->context.tsb = page_address(page);
- setup_tsb_params(mm, size);
- /* If old_tsb is NULL, we're being invoked for the first time
- * from init_new_context().
- */
- if (old_tsb) {
- /* Now force all other processors to reload the new
- * TSB state.
- */
- smp_tsb_sync(mm);
- /* Finally reload it on the local cpu. No further
- * references will remain to the old TSB and we can
- * thus free it up.
- */
- tsb_context_switch(mm);
- free_pages((unsigned long) old_tsb, get_order(old_size));
- }
- }
- int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
- {
- unsigned long initial_rss;
- mm->context.sparc64_ctx_val = 0UL;
- /* copy_mm() copies over the parent's mm_struct before calling
- * us, so we need to zero out the TSB pointer or else tsb_grow()
- * will be confused and think there is an older TSB to free up.
- */
- mm->context.tsb = NULL;
- /* If this is fork, inherit the parent's TSB size. We would
- * grow it to that size on the first page fault anyways.
- */
- initial_rss = mm->context.tsb_nentries;
- if (initial_rss)
- initial_rss -= 1;
- tsb_grow(mm, initial_rss, GFP_KERNEL);
- if (unlikely(!mm->context.tsb))
- return -ENOMEM;
- return 0;
- }
- void destroy_context(struct mm_struct *mm)
- {
- unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
- free_pages((unsigned long) mm->context.tsb, get_order(size));
- /* We can remove these later, but for now it's useful
- * to catch any bogus post-destroy_context() references
- * to the TSB.
- */
- mm->context.tsb = NULL;
- mm->context.tsb_reg_val = 0UL;
- spin_lock(&ctx_alloc_lock);
- if (CTX_VALID(mm->context)) {
- unsigned long nr = CTX_NRBITS(mm->context);
- mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
- }
- spin_unlock(&ctx_alloc_lock);
- }
|