init.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930
  1. /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
  2. * arch/sparc64/mm/init.c
  3. *
  4. * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/string.h>
  11. #include <linux/init.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/slab.h>
  16. #include <linux/initrd.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/poison.h>
  20. #include <linux/fs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/kprobes.h>
  23. #include <linux/cache.h>
  24. #include <linux/sort.h>
  25. #include <linux/percpu.h>
  26. #include <asm/head.h>
  27. #include <asm/system.h>
  28. #include <asm/page.h>
  29. #include <asm/pgalloc.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/oplib.h>
  32. #include <asm/iommu.h>
  33. #include <asm/io.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/mmu_context.h>
  36. #include <asm/tlbflush.h>
  37. #include <asm/dma.h>
  38. #include <asm/starfire.h>
  39. #include <asm/tlb.h>
  40. #include <asm/spitfire.h>
  41. #include <asm/sections.h>
  42. #include <asm/tsb.h>
  43. #include <asm/hypervisor.h>
  44. #include <asm/prom.h>
  45. #include <asm/sstate.h>
  46. #include <asm/mdesc.h>
  47. #define MAX_PHYS_ADDRESS (1UL << 42UL)
  48. #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
  49. #define KPTE_BITMAP_BYTES \
  50. ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
  51. unsigned long kern_linear_pte_xor[2] __read_mostly;
  52. /* A bitmap, one bit for every 256MB of physical memory. If the bit
  53. * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
  54. * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
  55. */
  56. unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
  57. #ifndef CONFIG_DEBUG_PAGEALLOC
  58. /* A special kernel TSB for 4MB and 256MB linear mappings.
  59. * Space is allocated for this right after the trap table
  60. * in arch/sparc64/kernel/head.S
  61. */
  62. extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
  63. #endif
  64. #define MAX_BANKS 32
  65. static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
  66. static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
  67. static int pavail_ents __initdata;
  68. static int pavail_rescan_ents __initdata;
  69. static int cmp_p64(const void *a, const void *b)
  70. {
  71. const struct linux_prom64_registers *x = a, *y = b;
  72. if (x->phys_addr > y->phys_addr)
  73. return 1;
  74. if (x->phys_addr < y->phys_addr)
  75. return -1;
  76. return 0;
  77. }
  78. static void __init read_obp_memory(const char *property,
  79. struct linux_prom64_registers *regs,
  80. int *num_ents)
  81. {
  82. int node = prom_finddevice("/memory");
  83. int prop_size = prom_getproplen(node, property);
  84. int ents, ret, i;
  85. ents = prop_size / sizeof(struct linux_prom64_registers);
  86. if (ents > MAX_BANKS) {
  87. prom_printf("The machine has more %s property entries than "
  88. "this kernel can support (%d).\n",
  89. property, MAX_BANKS);
  90. prom_halt();
  91. }
  92. ret = prom_getproperty(node, property, (char *) regs, prop_size);
  93. if (ret == -1) {
  94. prom_printf("Couldn't get %s property from /memory.\n");
  95. prom_halt();
  96. }
  97. /* Sanitize what we got from the firmware, by page aligning
  98. * everything.
  99. */
  100. for (i = 0; i < ents; i++) {
  101. unsigned long base, size;
  102. base = regs[i].phys_addr;
  103. size = regs[i].reg_size;
  104. size &= PAGE_MASK;
  105. if (base & ~PAGE_MASK) {
  106. unsigned long new_base = PAGE_ALIGN(base);
  107. size -= new_base - base;
  108. if ((long) size < 0L)
  109. size = 0UL;
  110. base = new_base;
  111. }
  112. if (size == 0UL) {
  113. /* If it is empty, simply get rid of it.
  114. * This simplifies the logic of the other
  115. * functions that process these arrays.
  116. */
  117. memmove(&regs[i], &regs[i + 1],
  118. (ents - i - 1) * sizeof(regs[0]));
  119. i--;
  120. ents--;
  121. continue;
  122. }
  123. regs[i].phys_addr = base;
  124. regs[i].reg_size = size;
  125. }
  126. *num_ents = ents;
  127. sort(regs, ents, sizeof(struct linux_prom64_registers),
  128. cmp_p64, NULL);
  129. }
  130. unsigned long *sparc64_valid_addr_bitmap __read_mostly;
  131. /* Kernel physical address base and size in bytes. */
  132. unsigned long kern_base __read_mostly;
  133. unsigned long kern_size __read_mostly;
  134. /* Initial ramdisk setup */
  135. extern unsigned long sparc_ramdisk_image64;
  136. extern unsigned int sparc_ramdisk_image;
  137. extern unsigned int sparc_ramdisk_size;
  138. struct page *mem_map_zero __read_mostly;
  139. unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
  140. unsigned long sparc64_kern_pri_context __read_mostly;
  141. unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
  142. unsigned long sparc64_kern_sec_context __read_mostly;
  143. int num_kernel_image_mappings;
  144. #ifdef CONFIG_DEBUG_DCFLUSH
  145. atomic_t dcpage_flushes = ATOMIC_INIT(0);
  146. #ifdef CONFIG_SMP
  147. atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
  148. #endif
  149. #endif
  150. inline void flush_dcache_page_impl(struct page *page)
  151. {
  152. BUG_ON(tlb_type == hypervisor);
  153. #ifdef CONFIG_DEBUG_DCFLUSH
  154. atomic_inc(&dcpage_flushes);
  155. #endif
  156. #ifdef DCACHE_ALIASING_POSSIBLE
  157. __flush_dcache_page(page_address(page),
  158. ((tlb_type == spitfire) &&
  159. page_mapping(page) != NULL));
  160. #else
  161. if (page_mapping(page) != NULL &&
  162. tlb_type == spitfire)
  163. __flush_icache_page(__pa(page_address(page)));
  164. #endif
  165. }
  166. #define PG_dcache_dirty PG_arch_1
  167. #define PG_dcache_cpu_shift 32UL
  168. #define PG_dcache_cpu_mask \
  169. ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
  170. #define dcache_dirty_cpu(page) \
  171. (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
  172. static inline void set_dcache_dirty(struct page *page, int this_cpu)
  173. {
  174. unsigned long mask = this_cpu;
  175. unsigned long non_cpu_bits;
  176. non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
  177. mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
  178. __asm__ __volatile__("1:\n\t"
  179. "ldx [%2], %%g7\n\t"
  180. "and %%g7, %1, %%g1\n\t"
  181. "or %%g1, %0, %%g1\n\t"
  182. "casx [%2], %%g7, %%g1\n\t"
  183. "cmp %%g7, %%g1\n\t"
  184. "membar #StoreLoad | #StoreStore\n\t"
  185. "bne,pn %%xcc, 1b\n\t"
  186. " nop"
  187. : /* no outputs */
  188. : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
  189. : "g1", "g7");
  190. }
  191. static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
  192. {
  193. unsigned long mask = (1UL << PG_dcache_dirty);
  194. __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
  195. "1:\n\t"
  196. "ldx [%2], %%g7\n\t"
  197. "srlx %%g7, %4, %%g1\n\t"
  198. "and %%g1, %3, %%g1\n\t"
  199. "cmp %%g1, %0\n\t"
  200. "bne,pn %%icc, 2f\n\t"
  201. " andn %%g7, %1, %%g1\n\t"
  202. "casx [%2], %%g7, %%g1\n\t"
  203. "cmp %%g7, %%g1\n\t"
  204. "membar #StoreLoad | #StoreStore\n\t"
  205. "bne,pn %%xcc, 1b\n\t"
  206. " nop\n"
  207. "2:"
  208. : /* no outputs */
  209. : "r" (cpu), "r" (mask), "r" (&page->flags),
  210. "i" (PG_dcache_cpu_mask),
  211. "i" (PG_dcache_cpu_shift)
  212. : "g1", "g7");
  213. }
  214. static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
  215. {
  216. unsigned long tsb_addr = (unsigned long) ent;
  217. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  218. tsb_addr = __pa(tsb_addr);
  219. __tsb_insert(tsb_addr, tag, pte);
  220. }
  221. unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
  222. unsigned long _PAGE_SZBITS __read_mostly;
  223. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  224. {
  225. struct mm_struct *mm;
  226. struct tsb *tsb;
  227. unsigned long tag, flags;
  228. unsigned long tsb_index, tsb_hash_shift;
  229. if (tlb_type != hypervisor) {
  230. unsigned long pfn = pte_pfn(pte);
  231. unsigned long pg_flags;
  232. struct page *page;
  233. if (pfn_valid(pfn) &&
  234. (page = pfn_to_page(pfn), page_mapping(page)) &&
  235. ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
  236. int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
  237. PG_dcache_cpu_mask);
  238. int this_cpu = get_cpu();
  239. /* This is just to optimize away some function calls
  240. * in the SMP case.
  241. */
  242. if (cpu == this_cpu)
  243. flush_dcache_page_impl(page);
  244. else
  245. smp_flush_dcache_page_impl(page, cpu);
  246. clear_dcache_dirty_cpu(page, cpu);
  247. put_cpu();
  248. }
  249. }
  250. mm = vma->vm_mm;
  251. tsb_index = MM_TSB_BASE;
  252. tsb_hash_shift = PAGE_SHIFT;
  253. spin_lock_irqsave(&mm->context.lock, flags);
  254. #ifdef CONFIG_HUGETLB_PAGE
  255. if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
  256. if ((tlb_type == hypervisor &&
  257. (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
  258. (tlb_type != hypervisor &&
  259. (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
  260. tsb_index = MM_TSB_HUGE;
  261. tsb_hash_shift = HPAGE_SHIFT;
  262. }
  263. }
  264. #endif
  265. tsb = mm->context.tsb_block[tsb_index].tsb;
  266. tsb += ((address >> tsb_hash_shift) &
  267. (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
  268. tag = (address >> 22UL);
  269. tsb_insert(tsb, tag, pte_val(pte));
  270. spin_unlock_irqrestore(&mm->context.lock, flags);
  271. }
  272. void flush_dcache_page(struct page *page)
  273. {
  274. struct address_space *mapping;
  275. int this_cpu;
  276. if (tlb_type == hypervisor)
  277. return;
  278. /* Do not bother with the expensive D-cache flush if it
  279. * is merely the zero page. The 'bigcore' testcase in GDB
  280. * causes this case to run millions of times.
  281. */
  282. if (page == ZERO_PAGE(0))
  283. return;
  284. this_cpu = get_cpu();
  285. mapping = page_mapping(page);
  286. if (mapping && !mapping_mapped(mapping)) {
  287. int dirty = test_bit(PG_dcache_dirty, &page->flags);
  288. if (dirty) {
  289. int dirty_cpu = dcache_dirty_cpu(page);
  290. if (dirty_cpu == this_cpu)
  291. goto out;
  292. smp_flush_dcache_page_impl(page, dirty_cpu);
  293. }
  294. set_dcache_dirty(page, this_cpu);
  295. } else {
  296. /* We could delay the flush for the !page_mapping
  297. * case too. But that case is for exec env/arg
  298. * pages and those are %99 certainly going to get
  299. * faulted into the tlb (and thus flushed) anyways.
  300. */
  301. flush_dcache_page_impl(page);
  302. }
  303. out:
  304. put_cpu();
  305. }
  306. void __kprobes flush_icache_range(unsigned long start, unsigned long end)
  307. {
  308. /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
  309. if (tlb_type == spitfire) {
  310. unsigned long kaddr;
  311. /* This code only runs on Spitfire cpus so this is
  312. * why we can assume _PAGE_PADDR_4U.
  313. */
  314. for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
  315. unsigned long paddr, mask = _PAGE_PADDR_4U;
  316. if (kaddr >= PAGE_OFFSET)
  317. paddr = kaddr & mask;
  318. else {
  319. pgd_t *pgdp = pgd_offset_k(kaddr);
  320. pud_t *pudp = pud_offset(pgdp, kaddr);
  321. pmd_t *pmdp = pmd_offset(pudp, kaddr);
  322. pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
  323. paddr = pte_val(*ptep) & mask;
  324. }
  325. __flush_icache_page(paddr);
  326. }
  327. }
  328. }
  329. void show_mem(void)
  330. {
  331. unsigned long total = 0, reserved = 0;
  332. unsigned long shared = 0, cached = 0;
  333. pg_data_t *pgdat;
  334. printk(KERN_INFO "Mem-info:\n");
  335. show_free_areas();
  336. printk(KERN_INFO "Free swap: %6ldkB\n",
  337. nr_swap_pages << (PAGE_SHIFT-10));
  338. for_each_online_pgdat(pgdat) {
  339. unsigned long i, flags;
  340. pgdat_resize_lock(pgdat, &flags);
  341. for (i = 0; i < pgdat->node_spanned_pages; i++) {
  342. struct page *page = pgdat_page_nr(pgdat, i);
  343. total++;
  344. if (PageReserved(page))
  345. reserved++;
  346. else if (PageSwapCache(page))
  347. cached++;
  348. else if (page_count(page))
  349. shared += page_count(page) - 1;
  350. }
  351. pgdat_resize_unlock(pgdat, &flags);
  352. }
  353. printk(KERN_INFO "%lu pages of RAM\n", total);
  354. printk(KERN_INFO "%lu reserved pages\n", reserved);
  355. printk(KERN_INFO "%lu pages shared\n", shared);
  356. printk(KERN_INFO "%lu pages swap cached\n", cached);
  357. printk(KERN_INFO "%lu pages dirty\n",
  358. global_page_state(NR_FILE_DIRTY));
  359. printk(KERN_INFO "%lu pages writeback\n",
  360. global_page_state(NR_WRITEBACK));
  361. printk(KERN_INFO "%lu pages mapped\n",
  362. global_page_state(NR_FILE_MAPPED));
  363. printk(KERN_INFO "%lu pages slab\n",
  364. global_page_state(NR_SLAB_RECLAIMABLE) +
  365. global_page_state(NR_SLAB_UNRECLAIMABLE));
  366. printk(KERN_INFO "%lu pages pagetables\n",
  367. global_page_state(NR_PAGETABLE));
  368. }
  369. void mmu_info(struct seq_file *m)
  370. {
  371. if (tlb_type == cheetah)
  372. seq_printf(m, "MMU Type\t: Cheetah\n");
  373. else if (tlb_type == cheetah_plus)
  374. seq_printf(m, "MMU Type\t: Cheetah+\n");
  375. else if (tlb_type == spitfire)
  376. seq_printf(m, "MMU Type\t: Spitfire\n");
  377. else if (tlb_type == hypervisor)
  378. seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
  379. else
  380. seq_printf(m, "MMU Type\t: ???\n");
  381. #ifdef CONFIG_DEBUG_DCFLUSH
  382. seq_printf(m, "DCPageFlushes\t: %d\n",
  383. atomic_read(&dcpage_flushes));
  384. #ifdef CONFIG_SMP
  385. seq_printf(m, "DCPageFlushesXC\t: %d\n",
  386. atomic_read(&dcpage_flushes_xcall));
  387. #endif /* CONFIG_SMP */
  388. #endif /* CONFIG_DEBUG_DCFLUSH */
  389. }
  390. struct linux_prom_translation {
  391. unsigned long virt;
  392. unsigned long size;
  393. unsigned long data;
  394. };
  395. /* Exported for kernel TLB miss handling in ktlb.S */
  396. struct linux_prom_translation prom_trans[512] __read_mostly;
  397. unsigned int prom_trans_ents __read_mostly;
  398. /* Exported for SMP bootup purposes. */
  399. unsigned long kern_locked_tte_data;
  400. /* The obp translations are saved based on 8k pagesize, since obp can
  401. * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
  402. * HI_OBP_ADDRESS range are handled in ktlb.S.
  403. */
  404. static inline int in_obp_range(unsigned long vaddr)
  405. {
  406. return (vaddr >= LOW_OBP_ADDRESS &&
  407. vaddr < HI_OBP_ADDRESS);
  408. }
  409. static int cmp_ptrans(const void *a, const void *b)
  410. {
  411. const struct linux_prom_translation *x = a, *y = b;
  412. if (x->virt > y->virt)
  413. return 1;
  414. if (x->virt < y->virt)
  415. return -1;
  416. return 0;
  417. }
  418. /* Read OBP translations property into 'prom_trans[]'. */
  419. static void __init read_obp_translations(void)
  420. {
  421. int n, node, ents, first, last, i;
  422. node = prom_finddevice("/virtual-memory");
  423. n = prom_getproplen(node, "translations");
  424. if (unlikely(n == 0 || n == -1)) {
  425. prom_printf("prom_mappings: Couldn't get size.\n");
  426. prom_halt();
  427. }
  428. if (unlikely(n > sizeof(prom_trans))) {
  429. prom_printf("prom_mappings: Size %Zd is too big.\n", n);
  430. prom_halt();
  431. }
  432. if ((n = prom_getproperty(node, "translations",
  433. (char *)&prom_trans[0],
  434. sizeof(prom_trans))) == -1) {
  435. prom_printf("prom_mappings: Couldn't get property.\n");
  436. prom_halt();
  437. }
  438. n = n / sizeof(struct linux_prom_translation);
  439. ents = n;
  440. sort(prom_trans, ents, sizeof(struct linux_prom_translation),
  441. cmp_ptrans, NULL);
  442. /* Now kick out all the non-OBP entries. */
  443. for (i = 0; i < ents; i++) {
  444. if (in_obp_range(prom_trans[i].virt))
  445. break;
  446. }
  447. first = i;
  448. for (; i < ents; i++) {
  449. if (!in_obp_range(prom_trans[i].virt))
  450. break;
  451. }
  452. last = i;
  453. for (i = 0; i < (last - first); i++) {
  454. struct linux_prom_translation *src = &prom_trans[i + first];
  455. struct linux_prom_translation *dest = &prom_trans[i];
  456. *dest = *src;
  457. }
  458. for (; i < ents; i++) {
  459. struct linux_prom_translation *dest = &prom_trans[i];
  460. dest->virt = dest->size = dest->data = 0x0UL;
  461. }
  462. prom_trans_ents = last - first;
  463. if (tlb_type == spitfire) {
  464. /* Clear diag TTE bits. */
  465. for (i = 0; i < prom_trans_ents; i++)
  466. prom_trans[i].data &= ~0x0003fe0000000000UL;
  467. }
  468. }
  469. static void __init hypervisor_tlb_lock(unsigned long vaddr,
  470. unsigned long pte,
  471. unsigned long mmu)
  472. {
  473. unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
  474. if (ret != 0) {
  475. prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
  476. "errors with %lx\n", vaddr, 0, pte, mmu, ret);
  477. prom_halt();
  478. }
  479. }
  480. static unsigned long kern_large_tte(unsigned long paddr);
  481. static void __init remap_kernel(void)
  482. {
  483. unsigned long phys_page, tte_vaddr, tte_data;
  484. int i, tlb_ent = sparc64_highest_locked_tlbent();
  485. tte_vaddr = (unsigned long) KERNBASE;
  486. phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  487. tte_data = kern_large_tte(phys_page);
  488. kern_locked_tte_data = tte_data;
  489. /* Now lock us into the TLBs via Hypervisor or OBP. */
  490. if (tlb_type == hypervisor) {
  491. for (i = 0; i < num_kernel_image_mappings; i++) {
  492. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
  493. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
  494. tte_vaddr += 0x400000;
  495. tte_data += 0x400000;
  496. }
  497. } else {
  498. for (i = 0; i < num_kernel_image_mappings; i++) {
  499. prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
  500. prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
  501. tte_vaddr += 0x400000;
  502. tte_data += 0x400000;
  503. }
  504. sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
  505. }
  506. if (tlb_type == cheetah_plus) {
  507. sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
  508. CTX_CHEETAH_PLUS_NUC);
  509. sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
  510. sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
  511. }
  512. }
  513. static void __init inherit_prom_mappings(void)
  514. {
  515. read_obp_translations();
  516. /* Now fixup OBP's idea about where we really are mapped. */
  517. printk("Remapping the kernel... ");
  518. remap_kernel();
  519. printk("done.\n");
  520. }
  521. void prom_world(int enter)
  522. {
  523. if (!enter)
  524. set_fs((mm_segment_t) { get_thread_current_ds() });
  525. __asm__ __volatile__("flushw");
  526. }
  527. void __flush_dcache_range(unsigned long start, unsigned long end)
  528. {
  529. unsigned long va;
  530. if (tlb_type == spitfire) {
  531. int n = 0;
  532. for (va = start; va < end; va += 32) {
  533. spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
  534. if (++n >= 512)
  535. break;
  536. }
  537. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  538. start = __pa(start);
  539. end = __pa(end);
  540. for (va = start; va < end; va += 32)
  541. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  542. "membar #Sync"
  543. : /* no outputs */
  544. : "r" (va),
  545. "i" (ASI_DCACHE_INVALIDATE));
  546. }
  547. }
  548. /* get_new_mmu_context() uses "cache + 1". */
  549. DEFINE_SPINLOCK(ctx_alloc_lock);
  550. unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
  551. #define MAX_CTX_NR (1UL << CTX_NR_BITS)
  552. #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
  553. DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
  554. /* Caller does TLB context flushing on local CPU if necessary.
  555. * The caller also ensures that CTX_VALID(mm->context) is false.
  556. *
  557. * We must be careful about boundary cases so that we never
  558. * let the user have CTX 0 (nucleus) or we ever use a CTX
  559. * version of zero (and thus NO_CONTEXT would not be caught
  560. * by version mis-match tests in mmu_context.h).
  561. *
  562. * Always invoked with interrupts disabled.
  563. */
  564. void get_new_mmu_context(struct mm_struct *mm)
  565. {
  566. unsigned long ctx, new_ctx;
  567. unsigned long orig_pgsz_bits;
  568. unsigned long flags;
  569. int new_version;
  570. spin_lock_irqsave(&ctx_alloc_lock, flags);
  571. orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
  572. ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
  573. new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
  574. new_version = 0;
  575. if (new_ctx >= (1 << CTX_NR_BITS)) {
  576. new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
  577. if (new_ctx >= ctx) {
  578. int i;
  579. new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
  580. CTX_FIRST_VERSION;
  581. if (new_ctx == 1)
  582. new_ctx = CTX_FIRST_VERSION;
  583. /* Don't call memset, for 16 entries that's just
  584. * plain silly...
  585. */
  586. mmu_context_bmap[0] = 3;
  587. mmu_context_bmap[1] = 0;
  588. mmu_context_bmap[2] = 0;
  589. mmu_context_bmap[3] = 0;
  590. for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
  591. mmu_context_bmap[i + 0] = 0;
  592. mmu_context_bmap[i + 1] = 0;
  593. mmu_context_bmap[i + 2] = 0;
  594. mmu_context_bmap[i + 3] = 0;
  595. }
  596. new_version = 1;
  597. goto out;
  598. }
  599. }
  600. mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
  601. new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
  602. out:
  603. tlb_context_cache = new_ctx;
  604. mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
  605. spin_unlock_irqrestore(&ctx_alloc_lock, flags);
  606. if (unlikely(new_version))
  607. smp_new_mmu_context_version();
  608. }
  609. /* Find a free area for the bootmem map, avoiding the kernel image
  610. * and the initial ramdisk.
  611. */
  612. static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
  613. unsigned long end_pfn)
  614. {
  615. unsigned long avoid_start, avoid_end, bootmap_size;
  616. int i;
  617. bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn);
  618. bootmap_size <<= PAGE_SHIFT;
  619. avoid_start = avoid_end = 0;
  620. #ifdef CONFIG_BLK_DEV_INITRD
  621. avoid_start = initrd_start;
  622. avoid_end = PAGE_ALIGN(initrd_end);
  623. #endif
  624. for (i = 0; i < pavail_ents; i++) {
  625. unsigned long start, end;
  626. start = pavail[i].phys_addr;
  627. end = start + pavail[i].reg_size;
  628. while (start < end) {
  629. if (start >= kern_base &&
  630. start < PAGE_ALIGN(kern_base + kern_size)) {
  631. start = PAGE_ALIGN(kern_base + kern_size);
  632. continue;
  633. }
  634. if (start >= avoid_start && start < avoid_end) {
  635. start = avoid_end;
  636. continue;
  637. }
  638. if ((end - start) < bootmap_size)
  639. break;
  640. if (start < kern_base &&
  641. (start + bootmap_size) > kern_base) {
  642. start = PAGE_ALIGN(kern_base + kern_size);
  643. continue;
  644. }
  645. if (start < avoid_start &&
  646. (start + bootmap_size) > avoid_start) {
  647. start = avoid_end;
  648. continue;
  649. }
  650. /* OK, it doesn't overlap anything, use it. */
  651. return start >> PAGE_SHIFT;
  652. }
  653. }
  654. prom_printf("Cannot find free area for bootmap, aborting.\n");
  655. prom_halt();
  656. }
  657. static void __init trim_pavail(unsigned long *cur_size_p,
  658. unsigned long *end_of_phys_p)
  659. {
  660. unsigned long to_trim = *cur_size_p - cmdline_memory_size;
  661. unsigned long avoid_start, avoid_end;
  662. int i;
  663. to_trim = PAGE_ALIGN(to_trim);
  664. avoid_start = avoid_end = 0;
  665. #ifdef CONFIG_BLK_DEV_INITRD
  666. avoid_start = initrd_start;
  667. avoid_end = PAGE_ALIGN(initrd_end);
  668. #endif
  669. /* Trim some pavail[] entries in order to satisfy the
  670. * requested "mem=xxx" kernel command line specification.
  671. *
  672. * We must not trim off the kernel image area nor the
  673. * initial ramdisk range (if any). Also, we must not trim
  674. * any pavail[] entry down to zero in order to preserve
  675. * the invariant that all pavail[] entries have a non-zero
  676. * size which is assumed by all of the code in here.
  677. */
  678. for (i = 0; i < pavail_ents; i++) {
  679. unsigned long start, end, kern_end;
  680. unsigned long trim_low, trim_high, n;
  681. kern_end = PAGE_ALIGN(kern_base + kern_size);
  682. trim_low = start = pavail[i].phys_addr;
  683. trim_high = end = start + pavail[i].reg_size;
  684. if (kern_base >= start &&
  685. kern_base < end) {
  686. trim_low = kern_base;
  687. if (kern_end >= end)
  688. continue;
  689. }
  690. if (kern_end >= start &&
  691. kern_end < end) {
  692. trim_high = kern_end;
  693. }
  694. if (avoid_start &&
  695. avoid_start >= start &&
  696. avoid_start < end) {
  697. if (trim_low > avoid_start)
  698. trim_low = avoid_start;
  699. if (avoid_end >= end)
  700. continue;
  701. }
  702. if (avoid_end &&
  703. avoid_end >= start &&
  704. avoid_end < end) {
  705. if (trim_high < avoid_end)
  706. trim_high = avoid_end;
  707. }
  708. if (trim_high <= trim_low)
  709. continue;
  710. if (trim_low == start && trim_high == end) {
  711. /* Whole chunk is available for trimming.
  712. * Trim all except one page, in order to keep
  713. * entry non-empty.
  714. */
  715. n = (end - start) - PAGE_SIZE;
  716. if (n > to_trim)
  717. n = to_trim;
  718. if (n) {
  719. pavail[i].phys_addr += n;
  720. pavail[i].reg_size -= n;
  721. to_trim -= n;
  722. }
  723. } else {
  724. n = (trim_low - start);
  725. if (n > to_trim)
  726. n = to_trim;
  727. if (n) {
  728. pavail[i].phys_addr += n;
  729. pavail[i].reg_size -= n;
  730. to_trim -= n;
  731. }
  732. if (to_trim) {
  733. n = end - trim_high;
  734. if (n > to_trim)
  735. n = to_trim;
  736. if (n) {
  737. pavail[i].reg_size -= n;
  738. to_trim -= n;
  739. }
  740. }
  741. }
  742. if (!to_trim)
  743. break;
  744. }
  745. /* Recalculate. */
  746. *cur_size_p = 0UL;
  747. for (i = 0; i < pavail_ents; i++) {
  748. *end_of_phys_p = pavail[i].phys_addr +
  749. pavail[i].reg_size;
  750. *cur_size_p += pavail[i].reg_size;
  751. }
  752. }
  753. /* About pages_avail, this is the value we will use to calculate
  754. * the zholes_size[] argument given to free_area_init_node(). The
  755. * page allocator uses this to calculate nr_kernel_pages,
  756. * nr_all_pages and zone->present_pages. On NUMA it is used
  757. * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
  758. *
  759. * So this number should really be set to what the page allocator
  760. * actually ends up with. This means:
  761. * 1) It should include bootmem map pages, we'll release those.
  762. * 2) It should not include the kernel image, except for the
  763. * __init sections which we will also release.
  764. * 3) It should include the initrd image, since we'll release
  765. * that too.
  766. */
  767. static unsigned long __init bootmem_init(unsigned long *pages_avail,
  768. unsigned long phys_base)
  769. {
  770. unsigned long bootmap_size, end_pfn;
  771. unsigned long end_of_phys_memory = 0UL;
  772. unsigned long bootmap_pfn, bytes_avail, size;
  773. int i;
  774. bytes_avail = 0UL;
  775. for (i = 0; i < pavail_ents; i++) {
  776. end_of_phys_memory = pavail[i].phys_addr +
  777. pavail[i].reg_size;
  778. bytes_avail += pavail[i].reg_size;
  779. }
  780. /* Determine the location of the initial ramdisk before trying
  781. * to honor the "mem=xxx" command line argument. We must know
  782. * where the kernel image and the ramdisk image are so that we
  783. * do not trim those two areas from the physical memory map.
  784. */
  785. #ifdef CONFIG_BLK_DEV_INITRD
  786. /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
  787. if (sparc_ramdisk_image || sparc_ramdisk_image64) {
  788. unsigned long ramdisk_image = sparc_ramdisk_image ?
  789. sparc_ramdisk_image : sparc_ramdisk_image64;
  790. ramdisk_image -= KERNBASE;
  791. initrd_start = ramdisk_image + phys_base;
  792. initrd_end = initrd_start + sparc_ramdisk_size;
  793. if (initrd_end > end_of_phys_memory) {
  794. printk(KERN_CRIT "initrd extends beyond end of memory "
  795. "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
  796. initrd_end, end_of_phys_memory);
  797. initrd_start = 0;
  798. initrd_end = 0;
  799. }
  800. }
  801. #endif
  802. if (cmdline_memory_size &&
  803. bytes_avail > cmdline_memory_size)
  804. trim_pavail(&bytes_avail,
  805. &end_of_phys_memory);
  806. *pages_avail = bytes_avail >> PAGE_SHIFT;
  807. end_pfn = end_of_phys_memory >> PAGE_SHIFT;
  808. /* Initialize the boot-time allocator. */
  809. max_pfn = max_low_pfn = end_pfn;
  810. min_low_pfn = (phys_base >> PAGE_SHIFT);
  811. bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);
  812. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
  813. min_low_pfn, end_pfn);
  814. /* Now register the available physical memory with the
  815. * allocator.
  816. */
  817. for (i = 0; i < pavail_ents; i++)
  818. free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
  819. #ifdef CONFIG_BLK_DEV_INITRD
  820. if (initrd_start) {
  821. size = initrd_end - initrd_start;
  822. /* Reserve the initrd image area. */
  823. reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
  824. initrd_start += PAGE_OFFSET;
  825. initrd_end += PAGE_OFFSET;
  826. }
  827. #endif
  828. /* Reserve the kernel text/data/bss. */
  829. reserve_bootmem(kern_base, kern_size, BOOTMEM_DEFAULT);
  830. *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
  831. /* Add back in the initmem pages. */
  832. size = ((unsigned long)(__init_end) & PAGE_MASK) -
  833. PAGE_ALIGN((unsigned long)__init_begin);
  834. *pages_avail += size >> PAGE_SHIFT;
  835. /* Reserve the bootmem map. We do not account for it
  836. * in pages_avail because we will release that memory
  837. * in free_all_bootmem.
  838. */
  839. size = bootmap_size;
  840. reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
  841. for (i = 0; i < pavail_ents; i++) {
  842. unsigned long start_pfn, end_pfn;
  843. start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
  844. end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
  845. memory_present(0, start_pfn, end_pfn);
  846. }
  847. sparse_init();
  848. return end_pfn;
  849. }
  850. static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
  851. static int pall_ents __initdata;
  852. #ifdef CONFIG_DEBUG_PAGEALLOC
  853. static unsigned long __ref kernel_map_range(unsigned long pstart,
  854. unsigned long pend, pgprot_t prot)
  855. {
  856. unsigned long vstart = PAGE_OFFSET + pstart;
  857. unsigned long vend = PAGE_OFFSET + pend;
  858. unsigned long alloc_bytes = 0UL;
  859. if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
  860. prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
  861. vstart, vend);
  862. prom_halt();
  863. }
  864. while (vstart < vend) {
  865. unsigned long this_end, paddr = __pa(vstart);
  866. pgd_t *pgd = pgd_offset_k(vstart);
  867. pud_t *pud;
  868. pmd_t *pmd;
  869. pte_t *pte;
  870. pud = pud_offset(pgd, vstart);
  871. if (pud_none(*pud)) {
  872. pmd_t *new;
  873. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  874. alloc_bytes += PAGE_SIZE;
  875. pud_populate(&init_mm, pud, new);
  876. }
  877. pmd = pmd_offset(pud, vstart);
  878. if (!pmd_present(*pmd)) {
  879. pte_t *new;
  880. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  881. alloc_bytes += PAGE_SIZE;
  882. pmd_populate_kernel(&init_mm, pmd, new);
  883. }
  884. pte = pte_offset_kernel(pmd, vstart);
  885. this_end = (vstart + PMD_SIZE) & PMD_MASK;
  886. if (this_end > vend)
  887. this_end = vend;
  888. while (vstart < this_end) {
  889. pte_val(*pte) = (paddr | pgprot_val(prot));
  890. vstart += PAGE_SIZE;
  891. paddr += PAGE_SIZE;
  892. pte++;
  893. }
  894. }
  895. return alloc_bytes;
  896. }
  897. extern unsigned int kvmap_linear_patch[1];
  898. #endif /* CONFIG_DEBUG_PAGEALLOC */
  899. static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
  900. {
  901. const unsigned long shift_256MB = 28;
  902. const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
  903. const unsigned long size_256MB = (1UL << shift_256MB);
  904. while (start < end) {
  905. long remains;
  906. remains = end - start;
  907. if (remains < size_256MB)
  908. break;
  909. if (start & mask_256MB) {
  910. start = (start + size_256MB) & ~mask_256MB;
  911. continue;
  912. }
  913. while (remains >= size_256MB) {
  914. unsigned long index = start >> shift_256MB;
  915. __set_bit(index, kpte_linear_bitmap);
  916. start += size_256MB;
  917. remains -= size_256MB;
  918. }
  919. }
  920. }
  921. static void __init init_kpte_bitmap(void)
  922. {
  923. unsigned long i;
  924. for (i = 0; i < pall_ents; i++) {
  925. unsigned long phys_start, phys_end;
  926. phys_start = pall[i].phys_addr;
  927. phys_end = phys_start + pall[i].reg_size;
  928. mark_kpte_bitmap(phys_start, phys_end);
  929. }
  930. }
  931. static void __init kernel_physical_mapping_init(void)
  932. {
  933. #ifdef CONFIG_DEBUG_PAGEALLOC
  934. unsigned long i, mem_alloced = 0UL;
  935. for (i = 0; i < pall_ents; i++) {
  936. unsigned long phys_start, phys_end;
  937. phys_start = pall[i].phys_addr;
  938. phys_end = phys_start + pall[i].reg_size;
  939. mem_alloced += kernel_map_range(phys_start, phys_end,
  940. PAGE_KERNEL);
  941. }
  942. printk("Allocated %ld bytes for kernel page tables.\n",
  943. mem_alloced);
  944. kvmap_linear_patch[0] = 0x01000000; /* nop */
  945. flushi(&kvmap_linear_patch[0]);
  946. __flush_tlb_all();
  947. #endif
  948. }
  949. #ifdef CONFIG_DEBUG_PAGEALLOC
  950. void kernel_map_pages(struct page *page, int numpages, int enable)
  951. {
  952. unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
  953. unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
  954. kernel_map_range(phys_start, phys_end,
  955. (enable ? PAGE_KERNEL : __pgprot(0)));
  956. flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
  957. PAGE_OFFSET + phys_end);
  958. /* we should perform an IPI and flush all tlbs,
  959. * but that can deadlock->flush only current cpu.
  960. */
  961. __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
  962. PAGE_OFFSET + phys_end);
  963. }
  964. #endif
  965. unsigned long __init find_ecache_flush_span(unsigned long size)
  966. {
  967. int i;
  968. for (i = 0; i < pavail_ents; i++) {
  969. if (pavail[i].reg_size >= size)
  970. return pavail[i].phys_addr;
  971. }
  972. return ~0UL;
  973. }
  974. static void __init tsb_phys_patch(void)
  975. {
  976. struct tsb_ldquad_phys_patch_entry *pquad;
  977. struct tsb_phys_patch_entry *p;
  978. pquad = &__tsb_ldquad_phys_patch;
  979. while (pquad < &__tsb_ldquad_phys_patch_end) {
  980. unsigned long addr = pquad->addr;
  981. if (tlb_type == hypervisor)
  982. *(unsigned int *) addr = pquad->sun4v_insn;
  983. else
  984. *(unsigned int *) addr = pquad->sun4u_insn;
  985. wmb();
  986. __asm__ __volatile__("flush %0"
  987. : /* no outputs */
  988. : "r" (addr));
  989. pquad++;
  990. }
  991. p = &__tsb_phys_patch;
  992. while (p < &__tsb_phys_patch_end) {
  993. unsigned long addr = p->addr;
  994. *(unsigned int *) addr = p->insn;
  995. wmb();
  996. __asm__ __volatile__("flush %0"
  997. : /* no outputs */
  998. : "r" (addr));
  999. p++;
  1000. }
  1001. }
  1002. /* Don't mark as init, we give this to the Hypervisor. */
  1003. #ifndef CONFIG_DEBUG_PAGEALLOC
  1004. #define NUM_KTSB_DESCR 2
  1005. #else
  1006. #define NUM_KTSB_DESCR 1
  1007. #endif
  1008. static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
  1009. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  1010. static void __init sun4v_ktsb_init(void)
  1011. {
  1012. unsigned long ktsb_pa;
  1013. /* First KTSB for PAGE_SIZE mappings. */
  1014. ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
  1015. switch (PAGE_SIZE) {
  1016. case 8 * 1024:
  1017. default:
  1018. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
  1019. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
  1020. break;
  1021. case 64 * 1024:
  1022. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
  1023. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
  1024. break;
  1025. case 512 * 1024:
  1026. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
  1027. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
  1028. break;
  1029. case 4 * 1024 * 1024:
  1030. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
  1031. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
  1032. break;
  1033. };
  1034. ktsb_descr[0].assoc = 1;
  1035. ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
  1036. ktsb_descr[0].ctx_idx = 0;
  1037. ktsb_descr[0].tsb_base = ktsb_pa;
  1038. ktsb_descr[0].resv = 0;
  1039. #ifndef CONFIG_DEBUG_PAGEALLOC
  1040. /* Second KTSB for 4MB/256MB mappings. */
  1041. ktsb_pa = (kern_base +
  1042. ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
  1043. ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
  1044. ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
  1045. HV_PGSZ_MASK_256MB);
  1046. ktsb_descr[1].assoc = 1;
  1047. ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
  1048. ktsb_descr[1].ctx_idx = 0;
  1049. ktsb_descr[1].tsb_base = ktsb_pa;
  1050. ktsb_descr[1].resv = 0;
  1051. #endif
  1052. }
  1053. void __cpuinit sun4v_ktsb_register(void)
  1054. {
  1055. unsigned long pa, ret;
  1056. pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
  1057. ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
  1058. if (ret != 0) {
  1059. prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
  1060. "errors with %lx\n", pa, ret);
  1061. prom_halt();
  1062. }
  1063. }
  1064. /* paging_init() sets up the page tables */
  1065. extern void cheetah_ecache_flush_init(void);
  1066. extern void sun4v_patch_tlb_handlers(void);
  1067. extern void cpu_probe(void);
  1068. extern void central_probe(void);
  1069. static unsigned long last_valid_pfn;
  1070. pgd_t swapper_pg_dir[2048];
  1071. static void sun4u_pgprot_init(void);
  1072. static void sun4v_pgprot_init(void);
  1073. /* Dummy function */
  1074. void __init setup_per_cpu_areas(void)
  1075. {
  1076. }
  1077. void __init paging_init(void)
  1078. {
  1079. unsigned long end_pfn, pages_avail, shift, phys_base;
  1080. unsigned long real_end, i;
  1081. /* These build time checkes make sure that the dcache_dirty_cpu()
  1082. * page->flags usage will work.
  1083. *
  1084. * When a page gets marked as dcache-dirty, we store the
  1085. * cpu number starting at bit 32 in the page->flags. Also,
  1086. * functions like clear_dcache_dirty_cpu use the cpu mask
  1087. * in 13-bit signed-immediate instruction fields.
  1088. */
  1089. BUILD_BUG_ON(FLAGS_RESERVED != 32);
  1090. BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
  1091. ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
  1092. BUILD_BUG_ON(NR_CPUS > 4096);
  1093. kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  1094. kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
  1095. sstate_booting();
  1096. /* Invalidate both kernel TSBs. */
  1097. memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
  1098. #ifndef CONFIG_DEBUG_PAGEALLOC
  1099. memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
  1100. #endif
  1101. if (tlb_type == hypervisor)
  1102. sun4v_pgprot_init();
  1103. else
  1104. sun4u_pgprot_init();
  1105. if (tlb_type == cheetah_plus ||
  1106. tlb_type == hypervisor)
  1107. tsb_phys_patch();
  1108. if (tlb_type == hypervisor) {
  1109. sun4v_patch_tlb_handlers();
  1110. sun4v_ktsb_init();
  1111. }
  1112. /* Find available physical memory... */
  1113. read_obp_memory("available", &pavail[0], &pavail_ents);
  1114. phys_base = 0xffffffffffffffffUL;
  1115. for (i = 0; i < pavail_ents; i++)
  1116. phys_base = min(phys_base, pavail[i].phys_addr);
  1117. set_bit(0, mmu_context_bmap);
  1118. shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
  1119. real_end = (unsigned long)_end;
  1120. num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
  1121. printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
  1122. num_kernel_image_mappings);
  1123. /* Set kernel pgd to upper alias so physical page computations
  1124. * work.
  1125. */
  1126. init_mm.pgd += ((shift) / (sizeof(pgd_t)));
  1127. memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
  1128. /* Now can init the kernel/bad page tables. */
  1129. pud_set(pud_offset(&swapper_pg_dir[0], 0),
  1130. swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
  1131. inherit_prom_mappings();
  1132. read_obp_memory("reg", &pall[0], &pall_ents);
  1133. init_kpte_bitmap();
  1134. /* Ok, we can use our TLB miss and window trap handlers safely. */
  1135. setup_tba();
  1136. __flush_tlb_all();
  1137. if (tlb_type == hypervisor)
  1138. sun4v_ktsb_register();
  1139. /* Setup bootmem... */
  1140. pages_avail = 0;
  1141. last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
  1142. max_mapnr = last_valid_pfn;
  1143. kernel_physical_mapping_init();
  1144. real_setup_per_cpu_areas();
  1145. prom_build_devicetree();
  1146. if (tlb_type == hypervisor)
  1147. sun4v_mdesc_init();
  1148. {
  1149. unsigned long zones_size[MAX_NR_ZONES];
  1150. unsigned long zholes_size[MAX_NR_ZONES];
  1151. int znum;
  1152. for (znum = 0; znum < MAX_NR_ZONES; znum++)
  1153. zones_size[znum] = zholes_size[znum] = 0;
  1154. zones_size[ZONE_NORMAL] = end_pfn;
  1155. zholes_size[ZONE_NORMAL] = end_pfn - pages_avail;
  1156. free_area_init_node(0, &contig_page_data, zones_size,
  1157. __pa(PAGE_OFFSET) >> PAGE_SHIFT,
  1158. zholes_size);
  1159. }
  1160. printk("Booting Linux...\n");
  1161. central_probe();
  1162. cpu_probe();
  1163. }
  1164. static void __init taint_real_pages(void)
  1165. {
  1166. int i;
  1167. read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
  1168. /* Find changes discovered in the physmem available rescan and
  1169. * reserve the lost portions in the bootmem maps.
  1170. */
  1171. for (i = 0; i < pavail_ents; i++) {
  1172. unsigned long old_start, old_end;
  1173. old_start = pavail[i].phys_addr;
  1174. old_end = old_start +
  1175. pavail[i].reg_size;
  1176. while (old_start < old_end) {
  1177. int n;
  1178. for (n = 0; n < pavail_rescan_ents; n++) {
  1179. unsigned long new_start, new_end;
  1180. new_start = pavail_rescan[n].phys_addr;
  1181. new_end = new_start +
  1182. pavail_rescan[n].reg_size;
  1183. if (new_start <= old_start &&
  1184. new_end >= (old_start + PAGE_SIZE)) {
  1185. set_bit(old_start >> 22,
  1186. sparc64_valid_addr_bitmap);
  1187. goto do_next_page;
  1188. }
  1189. }
  1190. reserve_bootmem(old_start, PAGE_SIZE, BOOTMEM_DEFAULT);
  1191. do_next_page:
  1192. old_start += PAGE_SIZE;
  1193. }
  1194. }
  1195. }
  1196. int __init page_in_phys_avail(unsigned long paddr)
  1197. {
  1198. int i;
  1199. paddr &= PAGE_MASK;
  1200. for (i = 0; i < pavail_rescan_ents; i++) {
  1201. unsigned long start, end;
  1202. start = pavail_rescan[i].phys_addr;
  1203. end = start + pavail_rescan[i].reg_size;
  1204. if (paddr >= start && paddr < end)
  1205. return 1;
  1206. }
  1207. if (paddr >= kern_base && paddr < (kern_base + kern_size))
  1208. return 1;
  1209. #ifdef CONFIG_BLK_DEV_INITRD
  1210. if (paddr >= __pa(initrd_start) &&
  1211. paddr < __pa(PAGE_ALIGN(initrd_end)))
  1212. return 1;
  1213. #endif
  1214. return 0;
  1215. }
  1216. void __init mem_init(void)
  1217. {
  1218. unsigned long codepages, datapages, initpages;
  1219. unsigned long addr, last;
  1220. int i;
  1221. i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
  1222. i += 1;
  1223. sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
  1224. if (sparc64_valid_addr_bitmap == NULL) {
  1225. prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
  1226. prom_halt();
  1227. }
  1228. memset(sparc64_valid_addr_bitmap, 0, i << 3);
  1229. addr = PAGE_OFFSET + kern_base;
  1230. last = PAGE_ALIGN(kern_size) + addr;
  1231. while (addr < last) {
  1232. set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
  1233. addr += PAGE_SIZE;
  1234. }
  1235. taint_real_pages();
  1236. high_memory = __va(last_valid_pfn << PAGE_SHIFT);
  1237. /* We subtract one to account for the mem_map_zero page
  1238. * allocated below.
  1239. */
  1240. totalram_pages = num_physpages = free_all_bootmem() - 1;
  1241. /*
  1242. * Set up the zero page, mark it reserved, so that page count
  1243. * is not manipulated when freeing the page from user ptes.
  1244. */
  1245. mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
  1246. if (mem_map_zero == NULL) {
  1247. prom_printf("paging_init: Cannot alloc zero page.\n");
  1248. prom_halt();
  1249. }
  1250. SetPageReserved(mem_map_zero);
  1251. codepages = (((unsigned long) _etext) - ((unsigned long) _start));
  1252. codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
  1253. datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
  1254. datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
  1255. initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
  1256. initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
  1257. printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
  1258. nr_free_pages() << (PAGE_SHIFT-10),
  1259. codepages << (PAGE_SHIFT-10),
  1260. datapages << (PAGE_SHIFT-10),
  1261. initpages << (PAGE_SHIFT-10),
  1262. PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
  1263. if (tlb_type == cheetah || tlb_type == cheetah_plus)
  1264. cheetah_ecache_flush_init();
  1265. }
  1266. void free_initmem(void)
  1267. {
  1268. unsigned long addr, initend;
  1269. /*
  1270. * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
  1271. */
  1272. addr = PAGE_ALIGN((unsigned long)(__init_begin));
  1273. initend = (unsigned long)(__init_end) & PAGE_MASK;
  1274. for (; addr < initend; addr += PAGE_SIZE) {
  1275. unsigned long page;
  1276. struct page *p;
  1277. page = (addr +
  1278. ((unsigned long) __va(kern_base)) -
  1279. ((unsigned long) KERNBASE));
  1280. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  1281. p = virt_to_page(page);
  1282. ClearPageReserved(p);
  1283. init_page_count(p);
  1284. __free_page(p);
  1285. num_physpages++;
  1286. totalram_pages++;
  1287. }
  1288. }
  1289. #ifdef CONFIG_BLK_DEV_INITRD
  1290. void free_initrd_mem(unsigned long start, unsigned long end)
  1291. {
  1292. if (start < end)
  1293. printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  1294. for (; start < end; start += PAGE_SIZE) {
  1295. struct page *p = virt_to_page(start);
  1296. ClearPageReserved(p);
  1297. init_page_count(p);
  1298. __free_page(p);
  1299. num_physpages++;
  1300. totalram_pages++;
  1301. }
  1302. }
  1303. #endif
  1304. #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
  1305. #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
  1306. #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
  1307. #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
  1308. #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
  1309. #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
  1310. pgprot_t PAGE_KERNEL __read_mostly;
  1311. EXPORT_SYMBOL(PAGE_KERNEL);
  1312. pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
  1313. pgprot_t PAGE_COPY __read_mostly;
  1314. pgprot_t PAGE_SHARED __read_mostly;
  1315. EXPORT_SYMBOL(PAGE_SHARED);
  1316. pgprot_t PAGE_EXEC __read_mostly;
  1317. unsigned long pg_iobits __read_mostly;
  1318. unsigned long _PAGE_IE __read_mostly;
  1319. EXPORT_SYMBOL(_PAGE_IE);
  1320. unsigned long _PAGE_E __read_mostly;
  1321. EXPORT_SYMBOL(_PAGE_E);
  1322. unsigned long _PAGE_CACHE __read_mostly;
  1323. EXPORT_SYMBOL(_PAGE_CACHE);
  1324. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  1325. #define VMEMMAP_CHUNK_SHIFT 22
  1326. #define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
  1327. #define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
  1328. #define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
  1329. #define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
  1330. sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
  1331. unsigned long vmemmap_table[VMEMMAP_SIZE];
  1332. int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
  1333. {
  1334. unsigned long vstart = (unsigned long) start;
  1335. unsigned long vend = (unsigned long) (start + nr);
  1336. unsigned long phys_start = (vstart - VMEMMAP_BASE);
  1337. unsigned long phys_end = (vend - VMEMMAP_BASE);
  1338. unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
  1339. unsigned long end = VMEMMAP_ALIGN(phys_end);
  1340. unsigned long pte_base;
  1341. pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
  1342. _PAGE_CP_4U | _PAGE_CV_4U |
  1343. _PAGE_P_4U | _PAGE_W_4U);
  1344. if (tlb_type == hypervisor)
  1345. pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
  1346. _PAGE_CP_4V | _PAGE_CV_4V |
  1347. _PAGE_P_4V | _PAGE_W_4V);
  1348. for (; addr < end; addr += VMEMMAP_CHUNK) {
  1349. unsigned long *vmem_pp =
  1350. vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
  1351. void *block;
  1352. if (!(*vmem_pp & _PAGE_VALID)) {
  1353. block = vmemmap_alloc_block(1UL << 22, node);
  1354. if (!block)
  1355. return -ENOMEM;
  1356. *vmem_pp = pte_base | __pa(block);
  1357. printk(KERN_INFO "[%p-%p] page_structs=%lu "
  1358. "node=%d entry=%lu/%lu\n", start, block, nr,
  1359. node,
  1360. addr >> VMEMMAP_CHUNK_SHIFT,
  1361. VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
  1362. }
  1363. }
  1364. return 0;
  1365. }
  1366. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  1367. static void prot_init_common(unsigned long page_none,
  1368. unsigned long page_shared,
  1369. unsigned long page_copy,
  1370. unsigned long page_readonly,
  1371. unsigned long page_exec_bit)
  1372. {
  1373. PAGE_COPY = __pgprot(page_copy);
  1374. PAGE_SHARED = __pgprot(page_shared);
  1375. protection_map[0x0] = __pgprot(page_none);
  1376. protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
  1377. protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
  1378. protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
  1379. protection_map[0x4] = __pgprot(page_readonly);
  1380. protection_map[0x5] = __pgprot(page_readonly);
  1381. protection_map[0x6] = __pgprot(page_copy);
  1382. protection_map[0x7] = __pgprot(page_copy);
  1383. protection_map[0x8] = __pgprot(page_none);
  1384. protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
  1385. protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
  1386. protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
  1387. protection_map[0xc] = __pgprot(page_readonly);
  1388. protection_map[0xd] = __pgprot(page_readonly);
  1389. protection_map[0xe] = __pgprot(page_shared);
  1390. protection_map[0xf] = __pgprot(page_shared);
  1391. }
  1392. static void __init sun4u_pgprot_init(void)
  1393. {
  1394. unsigned long page_none, page_shared, page_copy, page_readonly;
  1395. unsigned long page_exec_bit;
  1396. PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
  1397. _PAGE_CACHE_4U | _PAGE_P_4U |
  1398. __ACCESS_BITS_4U | __DIRTY_BITS_4U |
  1399. _PAGE_EXEC_4U);
  1400. PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
  1401. _PAGE_CACHE_4U | _PAGE_P_4U |
  1402. __ACCESS_BITS_4U | __DIRTY_BITS_4U |
  1403. _PAGE_EXEC_4U | _PAGE_L_4U);
  1404. PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
  1405. _PAGE_IE = _PAGE_IE_4U;
  1406. _PAGE_E = _PAGE_E_4U;
  1407. _PAGE_CACHE = _PAGE_CACHE_4U;
  1408. pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
  1409. __ACCESS_BITS_4U | _PAGE_E_4U);
  1410. #ifdef CONFIG_DEBUG_PAGEALLOC
  1411. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
  1412. 0xfffff80000000000;
  1413. #else
  1414. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
  1415. 0xfffff80000000000;
  1416. #endif
  1417. kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
  1418. _PAGE_P_4U | _PAGE_W_4U);
  1419. /* XXX Should use 256MB on Panther. XXX */
  1420. kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
  1421. _PAGE_SZBITS = _PAGE_SZBITS_4U;
  1422. _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
  1423. _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
  1424. _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
  1425. page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
  1426. page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  1427. __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
  1428. page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  1429. __ACCESS_BITS_4U | _PAGE_EXEC_4U);
  1430. page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  1431. __ACCESS_BITS_4U | _PAGE_EXEC_4U);
  1432. page_exec_bit = _PAGE_EXEC_4U;
  1433. prot_init_common(page_none, page_shared, page_copy, page_readonly,
  1434. page_exec_bit);
  1435. }
  1436. static void __init sun4v_pgprot_init(void)
  1437. {
  1438. unsigned long page_none, page_shared, page_copy, page_readonly;
  1439. unsigned long page_exec_bit;
  1440. PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
  1441. _PAGE_CACHE_4V | _PAGE_P_4V |
  1442. __ACCESS_BITS_4V | __DIRTY_BITS_4V |
  1443. _PAGE_EXEC_4V);
  1444. PAGE_KERNEL_LOCKED = PAGE_KERNEL;
  1445. PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
  1446. _PAGE_IE = _PAGE_IE_4V;
  1447. _PAGE_E = _PAGE_E_4V;
  1448. _PAGE_CACHE = _PAGE_CACHE_4V;
  1449. #ifdef CONFIG_DEBUG_PAGEALLOC
  1450. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
  1451. 0xfffff80000000000;
  1452. #else
  1453. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
  1454. 0xfffff80000000000;
  1455. #endif
  1456. kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
  1457. _PAGE_P_4V | _PAGE_W_4V);
  1458. #ifdef CONFIG_DEBUG_PAGEALLOC
  1459. kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
  1460. 0xfffff80000000000;
  1461. #else
  1462. kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
  1463. 0xfffff80000000000;
  1464. #endif
  1465. kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
  1466. _PAGE_P_4V | _PAGE_W_4V);
  1467. pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
  1468. __ACCESS_BITS_4V | _PAGE_E_4V);
  1469. _PAGE_SZBITS = _PAGE_SZBITS_4V;
  1470. _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
  1471. _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
  1472. _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
  1473. _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
  1474. page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
  1475. page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
  1476. __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
  1477. page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
  1478. __ACCESS_BITS_4V | _PAGE_EXEC_4V);
  1479. page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
  1480. __ACCESS_BITS_4V | _PAGE_EXEC_4V);
  1481. page_exec_bit = _PAGE_EXEC_4V;
  1482. prot_init_common(page_none, page_shared, page_copy, page_readonly,
  1483. page_exec_bit);
  1484. }
  1485. unsigned long pte_sz_bits(unsigned long sz)
  1486. {
  1487. if (tlb_type == hypervisor) {
  1488. switch (sz) {
  1489. case 8 * 1024:
  1490. default:
  1491. return _PAGE_SZ8K_4V;
  1492. case 64 * 1024:
  1493. return _PAGE_SZ64K_4V;
  1494. case 512 * 1024:
  1495. return _PAGE_SZ512K_4V;
  1496. case 4 * 1024 * 1024:
  1497. return _PAGE_SZ4MB_4V;
  1498. };
  1499. } else {
  1500. switch (sz) {
  1501. case 8 * 1024:
  1502. default:
  1503. return _PAGE_SZ8K_4U;
  1504. case 64 * 1024:
  1505. return _PAGE_SZ64K_4U;
  1506. case 512 * 1024:
  1507. return _PAGE_SZ512K_4U;
  1508. case 4 * 1024 * 1024:
  1509. return _PAGE_SZ4MB_4U;
  1510. };
  1511. }
  1512. }
  1513. pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
  1514. {
  1515. pte_t pte;
  1516. pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
  1517. pte_val(pte) |= (((unsigned long)space) << 32);
  1518. pte_val(pte) |= pte_sz_bits(page_size);
  1519. return pte;
  1520. }
  1521. static unsigned long kern_large_tte(unsigned long paddr)
  1522. {
  1523. unsigned long val;
  1524. val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
  1525. _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
  1526. _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
  1527. if (tlb_type == hypervisor)
  1528. val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
  1529. _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
  1530. _PAGE_EXEC_4V | _PAGE_W_4V);
  1531. return val | paddr;
  1532. }
  1533. /* If not locked, zap it. */
  1534. void __flush_tlb_all(void)
  1535. {
  1536. unsigned long pstate;
  1537. int i;
  1538. __asm__ __volatile__("flushw\n\t"
  1539. "rdpr %%pstate, %0\n\t"
  1540. "wrpr %0, %1, %%pstate"
  1541. : "=r" (pstate)
  1542. : "i" (PSTATE_IE));
  1543. if (tlb_type == hypervisor) {
  1544. sun4v_mmu_demap_all();
  1545. } else if (tlb_type == spitfire) {
  1546. for (i = 0; i < 64; i++) {
  1547. /* Spitfire Errata #32 workaround */
  1548. /* NOTE: Always runs on spitfire, so no
  1549. * cheetah+ page size encodings.
  1550. */
  1551. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  1552. "flush %%g6"
  1553. : /* No outputs */
  1554. : "r" (0),
  1555. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  1556. if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
  1557. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  1558. "membar #Sync"
  1559. : /* no outputs */
  1560. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  1561. spitfire_put_dtlb_data(i, 0x0UL);
  1562. }
  1563. /* Spitfire Errata #32 workaround */
  1564. /* NOTE: Always runs on spitfire, so no
  1565. * cheetah+ page size encodings.
  1566. */
  1567. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  1568. "flush %%g6"
  1569. : /* No outputs */
  1570. : "r" (0),
  1571. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  1572. if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
  1573. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  1574. "membar #Sync"
  1575. : /* no outputs */
  1576. : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  1577. spitfire_put_itlb_data(i, 0x0UL);
  1578. }
  1579. }
  1580. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1581. cheetah_flush_dtlb_all();
  1582. cheetah_flush_itlb_all();
  1583. }
  1584. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  1585. : : "r" (pstate));
  1586. }
  1587. #ifdef CONFIG_MEMORY_HOTPLUG
  1588. void online_page(struct page *page)
  1589. {
  1590. ClearPageReserved(page);
  1591. init_page_count(page);
  1592. __free_page(page);
  1593. totalram_pages++;
  1594. num_physpages++;
  1595. }
  1596. #endif /* CONFIG_MEMORY_HOTPLUG */