init.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362
  1. /*
  2. * arch/sparc64/mm/init.c
  3. *
  4. * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/string.h>
  11. #include <linux/init.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/slab.h>
  16. #include <linux/initrd.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/poison.h>
  20. #include <linux/fs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/kprobes.h>
  23. #include <linux/cache.h>
  24. #include <linux/sort.h>
  25. #include <linux/percpu.h>
  26. #include <linux/lmb.h>
  27. #include <linux/mmzone.h>
  28. #include <asm/head.h>
  29. #include <asm/system.h>
  30. #include <asm/page.h>
  31. #include <asm/pgalloc.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/oplib.h>
  34. #include <asm/iommu.h>
  35. #include <asm/io.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/tlbflush.h>
  39. #include <asm/dma.h>
  40. #include <asm/starfire.h>
  41. #include <asm/tlb.h>
  42. #include <asm/spitfire.h>
  43. #include <asm/sections.h>
  44. #include <asm/tsb.h>
  45. #include <asm/hypervisor.h>
  46. #include <asm/prom.h>
  47. #include <asm/mdesc.h>
  48. #include <asm/cpudata.h>
  49. #include <asm/irq.h>
  50. #include "init.h"
  51. unsigned long kern_linear_pte_xor[2] __read_mostly;
  52. /* A bitmap, one bit for every 256MB of physical memory. If the bit
  53. * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
  54. * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
  55. */
  56. unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
  57. #ifndef CONFIG_DEBUG_PAGEALLOC
  58. /* A special kernel TSB for 4MB and 256MB linear mappings.
  59. * Space is allocated for this right after the trap table
  60. * in arch/sparc64/kernel/head.S
  61. */
  62. extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
  63. #endif
  64. #define MAX_BANKS 32
  65. static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
  66. static int pavail_ents __initdata;
  67. static int cmp_p64(const void *a, const void *b)
  68. {
  69. const struct linux_prom64_registers *x = a, *y = b;
  70. if (x->phys_addr > y->phys_addr)
  71. return 1;
  72. if (x->phys_addr < y->phys_addr)
  73. return -1;
  74. return 0;
  75. }
  76. static void __init read_obp_memory(const char *property,
  77. struct linux_prom64_registers *regs,
  78. int *num_ents)
  79. {
  80. int node = prom_finddevice("/memory");
  81. int prop_size = prom_getproplen(node, property);
  82. int ents, ret, i;
  83. ents = prop_size / sizeof(struct linux_prom64_registers);
  84. if (ents > MAX_BANKS) {
  85. prom_printf("The machine has more %s property entries than "
  86. "this kernel can support (%d).\n",
  87. property, MAX_BANKS);
  88. prom_halt();
  89. }
  90. ret = prom_getproperty(node, property, (char *) regs, prop_size);
  91. if (ret == -1) {
  92. prom_printf("Couldn't get %s property from /memory.\n");
  93. prom_halt();
  94. }
  95. /* Sanitize what we got from the firmware, by page aligning
  96. * everything.
  97. */
  98. for (i = 0; i < ents; i++) {
  99. unsigned long base, size;
  100. base = regs[i].phys_addr;
  101. size = regs[i].reg_size;
  102. size &= PAGE_MASK;
  103. if (base & ~PAGE_MASK) {
  104. unsigned long new_base = PAGE_ALIGN(base);
  105. size -= new_base - base;
  106. if ((long) size < 0L)
  107. size = 0UL;
  108. base = new_base;
  109. }
  110. if (size == 0UL) {
  111. /* If it is empty, simply get rid of it.
  112. * This simplifies the logic of the other
  113. * functions that process these arrays.
  114. */
  115. memmove(&regs[i], &regs[i + 1],
  116. (ents - i - 1) * sizeof(regs[0]));
  117. i--;
  118. ents--;
  119. continue;
  120. }
  121. regs[i].phys_addr = base;
  122. regs[i].reg_size = size;
  123. }
  124. *num_ents = ents;
  125. sort(regs, ents, sizeof(struct linux_prom64_registers),
  126. cmp_p64, NULL);
  127. }
  128. unsigned long *sparc64_valid_addr_bitmap __read_mostly;
  129. /* Kernel physical address base and size in bytes. */
  130. unsigned long kern_base __read_mostly;
  131. unsigned long kern_size __read_mostly;
  132. /* Initial ramdisk setup */
  133. extern unsigned long sparc_ramdisk_image64;
  134. extern unsigned int sparc_ramdisk_image;
  135. extern unsigned int sparc_ramdisk_size;
  136. struct page *mem_map_zero __read_mostly;
  137. EXPORT_SYMBOL(mem_map_zero);
  138. unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
  139. unsigned long sparc64_kern_pri_context __read_mostly;
  140. unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
  141. unsigned long sparc64_kern_sec_context __read_mostly;
  142. int num_kernel_image_mappings;
  143. #ifdef CONFIG_DEBUG_DCFLUSH
  144. atomic_t dcpage_flushes = ATOMIC_INIT(0);
  145. #ifdef CONFIG_SMP
  146. atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
  147. #endif
  148. #endif
  149. inline void flush_dcache_page_impl(struct page *page)
  150. {
  151. BUG_ON(tlb_type == hypervisor);
  152. #ifdef CONFIG_DEBUG_DCFLUSH
  153. atomic_inc(&dcpage_flushes);
  154. #endif
  155. #ifdef DCACHE_ALIASING_POSSIBLE
  156. __flush_dcache_page(page_address(page),
  157. ((tlb_type == spitfire) &&
  158. page_mapping(page) != NULL));
  159. #else
  160. if (page_mapping(page) != NULL &&
  161. tlb_type == spitfire)
  162. __flush_icache_page(__pa(page_address(page)));
  163. #endif
  164. }
  165. #define PG_dcache_dirty PG_arch_1
  166. #define PG_dcache_cpu_shift 32UL
  167. #define PG_dcache_cpu_mask \
  168. ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
  169. #define dcache_dirty_cpu(page) \
  170. (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
  171. static inline void set_dcache_dirty(struct page *page, int this_cpu)
  172. {
  173. unsigned long mask = this_cpu;
  174. unsigned long non_cpu_bits;
  175. non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
  176. mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
  177. __asm__ __volatile__("1:\n\t"
  178. "ldx [%2], %%g7\n\t"
  179. "and %%g7, %1, %%g1\n\t"
  180. "or %%g1, %0, %%g1\n\t"
  181. "casx [%2], %%g7, %%g1\n\t"
  182. "cmp %%g7, %%g1\n\t"
  183. "membar #StoreLoad | #StoreStore\n\t"
  184. "bne,pn %%xcc, 1b\n\t"
  185. " nop"
  186. : /* no outputs */
  187. : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
  188. : "g1", "g7");
  189. }
  190. static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
  191. {
  192. unsigned long mask = (1UL << PG_dcache_dirty);
  193. __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
  194. "1:\n\t"
  195. "ldx [%2], %%g7\n\t"
  196. "srlx %%g7, %4, %%g1\n\t"
  197. "and %%g1, %3, %%g1\n\t"
  198. "cmp %%g1, %0\n\t"
  199. "bne,pn %%icc, 2f\n\t"
  200. " andn %%g7, %1, %%g1\n\t"
  201. "casx [%2], %%g7, %%g1\n\t"
  202. "cmp %%g7, %%g1\n\t"
  203. "membar #StoreLoad | #StoreStore\n\t"
  204. "bne,pn %%xcc, 1b\n\t"
  205. " nop\n"
  206. "2:"
  207. : /* no outputs */
  208. : "r" (cpu), "r" (mask), "r" (&page->flags),
  209. "i" (PG_dcache_cpu_mask),
  210. "i" (PG_dcache_cpu_shift)
  211. : "g1", "g7");
  212. }
  213. static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
  214. {
  215. unsigned long tsb_addr = (unsigned long) ent;
  216. if (tlb_type == cheetah_plus || tlb_type == hypervisor)
  217. tsb_addr = __pa(tsb_addr);
  218. __tsb_insert(tsb_addr, tag, pte);
  219. }
  220. unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
  221. unsigned long _PAGE_SZBITS __read_mostly;
  222. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  223. {
  224. struct mm_struct *mm;
  225. struct tsb *tsb;
  226. unsigned long tag, flags;
  227. unsigned long tsb_index, tsb_hash_shift;
  228. if (tlb_type != hypervisor) {
  229. unsigned long pfn = pte_pfn(pte);
  230. unsigned long pg_flags;
  231. struct page *page;
  232. if (pfn_valid(pfn) &&
  233. (page = pfn_to_page(pfn), page_mapping(page)) &&
  234. ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
  235. int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
  236. PG_dcache_cpu_mask);
  237. int this_cpu = get_cpu();
  238. /* This is just to optimize away some function calls
  239. * in the SMP case.
  240. */
  241. if (cpu == this_cpu)
  242. flush_dcache_page_impl(page);
  243. else
  244. smp_flush_dcache_page_impl(page, cpu);
  245. clear_dcache_dirty_cpu(page, cpu);
  246. put_cpu();
  247. }
  248. }
  249. mm = vma->vm_mm;
  250. tsb_index = MM_TSB_BASE;
  251. tsb_hash_shift = PAGE_SHIFT;
  252. spin_lock_irqsave(&mm->context.lock, flags);
  253. #ifdef CONFIG_HUGETLB_PAGE
  254. if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
  255. if ((tlb_type == hypervisor &&
  256. (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
  257. (tlb_type != hypervisor &&
  258. (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
  259. tsb_index = MM_TSB_HUGE;
  260. tsb_hash_shift = HPAGE_SHIFT;
  261. }
  262. }
  263. #endif
  264. tsb = mm->context.tsb_block[tsb_index].tsb;
  265. tsb += ((address >> tsb_hash_shift) &
  266. (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
  267. tag = (address >> 22UL);
  268. tsb_insert(tsb, tag, pte_val(pte));
  269. spin_unlock_irqrestore(&mm->context.lock, flags);
  270. }
  271. void flush_dcache_page(struct page *page)
  272. {
  273. struct address_space *mapping;
  274. int this_cpu;
  275. if (tlb_type == hypervisor)
  276. return;
  277. /* Do not bother with the expensive D-cache flush if it
  278. * is merely the zero page. The 'bigcore' testcase in GDB
  279. * causes this case to run millions of times.
  280. */
  281. if (page == ZERO_PAGE(0))
  282. return;
  283. this_cpu = get_cpu();
  284. mapping = page_mapping(page);
  285. if (mapping && !mapping_mapped(mapping)) {
  286. int dirty = test_bit(PG_dcache_dirty, &page->flags);
  287. if (dirty) {
  288. int dirty_cpu = dcache_dirty_cpu(page);
  289. if (dirty_cpu == this_cpu)
  290. goto out;
  291. smp_flush_dcache_page_impl(page, dirty_cpu);
  292. }
  293. set_dcache_dirty(page, this_cpu);
  294. } else {
  295. /* We could delay the flush for the !page_mapping
  296. * case too. But that case is for exec env/arg
  297. * pages and those are %99 certainly going to get
  298. * faulted into the tlb (and thus flushed) anyways.
  299. */
  300. flush_dcache_page_impl(page);
  301. }
  302. out:
  303. put_cpu();
  304. }
  305. void __kprobes flush_icache_range(unsigned long start, unsigned long end)
  306. {
  307. /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
  308. if (tlb_type == spitfire) {
  309. unsigned long kaddr;
  310. /* This code only runs on Spitfire cpus so this is
  311. * why we can assume _PAGE_PADDR_4U.
  312. */
  313. for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
  314. unsigned long paddr, mask = _PAGE_PADDR_4U;
  315. if (kaddr >= PAGE_OFFSET)
  316. paddr = kaddr & mask;
  317. else {
  318. pgd_t *pgdp = pgd_offset_k(kaddr);
  319. pud_t *pudp = pud_offset(pgdp, kaddr);
  320. pmd_t *pmdp = pmd_offset(pudp, kaddr);
  321. pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
  322. paddr = pte_val(*ptep) & mask;
  323. }
  324. __flush_icache_page(paddr);
  325. }
  326. }
  327. }
  328. void mmu_info(struct seq_file *m)
  329. {
  330. if (tlb_type == cheetah)
  331. seq_printf(m, "MMU Type\t: Cheetah\n");
  332. else if (tlb_type == cheetah_plus)
  333. seq_printf(m, "MMU Type\t: Cheetah+\n");
  334. else if (tlb_type == spitfire)
  335. seq_printf(m, "MMU Type\t: Spitfire\n");
  336. else if (tlb_type == hypervisor)
  337. seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
  338. else
  339. seq_printf(m, "MMU Type\t: ???\n");
  340. #ifdef CONFIG_DEBUG_DCFLUSH
  341. seq_printf(m, "DCPageFlushes\t: %d\n",
  342. atomic_read(&dcpage_flushes));
  343. #ifdef CONFIG_SMP
  344. seq_printf(m, "DCPageFlushesXC\t: %d\n",
  345. atomic_read(&dcpage_flushes_xcall));
  346. #endif /* CONFIG_SMP */
  347. #endif /* CONFIG_DEBUG_DCFLUSH */
  348. }
  349. struct linux_prom_translation prom_trans[512] __read_mostly;
  350. unsigned int prom_trans_ents __read_mostly;
  351. unsigned long kern_locked_tte_data;
  352. /* The obp translations are saved based on 8k pagesize, since obp can
  353. * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
  354. * HI_OBP_ADDRESS range are handled in ktlb.S.
  355. */
  356. static inline int in_obp_range(unsigned long vaddr)
  357. {
  358. return (vaddr >= LOW_OBP_ADDRESS &&
  359. vaddr < HI_OBP_ADDRESS);
  360. }
  361. static int cmp_ptrans(const void *a, const void *b)
  362. {
  363. const struct linux_prom_translation *x = a, *y = b;
  364. if (x->virt > y->virt)
  365. return 1;
  366. if (x->virt < y->virt)
  367. return -1;
  368. return 0;
  369. }
  370. /* Read OBP translations property into 'prom_trans[]'. */
  371. static void __init read_obp_translations(void)
  372. {
  373. int n, node, ents, first, last, i;
  374. node = prom_finddevice("/virtual-memory");
  375. n = prom_getproplen(node, "translations");
  376. if (unlikely(n == 0 || n == -1)) {
  377. prom_printf("prom_mappings: Couldn't get size.\n");
  378. prom_halt();
  379. }
  380. if (unlikely(n > sizeof(prom_trans))) {
  381. prom_printf("prom_mappings: Size %Zd is too big.\n", n);
  382. prom_halt();
  383. }
  384. if ((n = prom_getproperty(node, "translations",
  385. (char *)&prom_trans[0],
  386. sizeof(prom_trans))) == -1) {
  387. prom_printf("prom_mappings: Couldn't get property.\n");
  388. prom_halt();
  389. }
  390. n = n / sizeof(struct linux_prom_translation);
  391. ents = n;
  392. sort(prom_trans, ents, sizeof(struct linux_prom_translation),
  393. cmp_ptrans, NULL);
  394. /* Now kick out all the non-OBP entries. */
  395. for (i = 0; i < ents; i++) {
  396. if (in_obp_range(prom_trans[i].virt))
  397. break;
  398. }
  399. first = i;
  400. for (; i < ents; i++) {
  401. if (!in_obp_range(prom_trans[i].virt))
  402. break;
  403. }
  404. last = i;
  405. for (i = 0; i < (last - first); i++) {
  406. struct linux_prom_translation *src = &prom_trans[i + first];
  407. struct linux_prom_translation *dest = &prom_trans[i];
  408. *dest = *src;
  409. }
  410. for (; i < ents; i++) {
  411. struct linux_prom_translation *dest = &prom_trans[i];
  412. dest->virt = dest->size = dest->data = 0x0UL;
  413. }
  414. prom_trans_ents = last - first;
  415. if (tlb_type == spitfire) {
  416. /* Clear diag TTE bits. */
  417. for (i = 0; i < prom_trans_ents; i++)
  418. prom_trans[i].data &= ~0x0003fe0000000000UL;
  419. }
  420. }
  421. static void __init hypervisor_tlb_lock(unsigned long vaddr,
  422. unsigned long pte,
  423. unsigned long mmu)
  424. {
  425. unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
  426. if (ret != 0) {
  427. prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
  428. "errors with %lx\n", vaddr, 0, pte, mmu, ret);
  429. prom_halt();
  430. }
  431. }
  432. static unsigned long kern_large_tte(unsigned long paddr);
  433. static void __init remap_kernel(void)
  434. {
  435. unsigned long phys_page, tte_vaddr, tte_data;
  436. int i, tlb_ent = sparc64_highest_locked_tlbent();
  437. tte_vaddr = (unsigned long) KERNBASE;
  438. phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  439. tte_data = kern_large_tte(phys_page);
  440. kern_locked_tte_data = tte_data;
  441. /* Now lock us into the TLBs via Hypervisor or OBP. */
  442. if (tlb_type == hypervisor) {
  443. for (i = 0; i < num_kernel_image_mappings; i++) {
  444. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
  445. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
  446. tte_vaddr += 0x400000;
  447. tte_data += 0x400000;
  448. }
  449. } else {
  450. for (i = 0; i < num_kernel_image_mappings; i++) {
  451. prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
  452. prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
  453. tte_vaddr += 0x400000;
  454. tte_data += 0x400000;
  455. }
  456. sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
  457. }
  458. if (tlb_type == cheetah_plus) {
  459. sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
  460. CTX_CHEETAH_PLUS_NUC);
  461. sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
  462. sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
  463. }
  464. }
  465. static void __init inherit_prom_mappings(void)
  466. {
  467. /* Now fixup OBP's idea about where we really are mapped. */
  468. printk("Remapping the kernel... ");
  469. remap_kernel();
  470. printk("done.\n");
  471. }
  472. void prom_world(int enter)
  473. {
  474. if (!enter)
  475. set_fs((mm_segment_t) { get_thread_current_ds() });
  476. __asm__ __volatile__("flushw");
  477. }
  478. void __flush_dcache_range(unsigned long start, unsigned long end)
  479. {
  480. unsigned long va;
  481. if (tlb_type == spitfire) {
  482. int n = 0;
  483. for (va = start; va < end; va += 32) {
  484. spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
  485. if (++n >= 512)
  486. break;
  487. }
  488. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  489. start = __pa(start);
  490. end = __pa(end);
  491. for (va = start; va < end; va += 32)
  492. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  493. "membar #Sync"
  494. : /* no outputs */
  495. : "r" (va),
  496. "i" (ASI_DCACHE_INVALIDATE));
  497. }
  498. }
  499. /* get_new_mmu_context() uses "cache + 1". */
  500. DEFINE_SPINLOCK(ctx_alloc_lock);
  501. unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
  502. #define MAX_CTX_NR (1UL << CTX_NR_BITS)
  503. #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
  504. DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
  505. /* Caller does TLB context flushing on local CPU if necessary.
  506. * The caller also ensures that CTX_VALID(mm->context) is false.
  507. *
  508. * We must be careful about boundary cases so that we never
  509. * let the user have CTX 0 (nucleus) or we ever use a CTX
  510. * version of zero (and thus NO_CONTEXT would not be caught
  511. * by version mis-match tests in mmu_context.h).
  512. *
  513. * Always invoked with interrupts disabled.
  514. */
  515. void get_new_mmu_context(struct mm_struct *mm)
  516. {
  517. unsigned long ctx, new_ctx;
  518. unsigned long orig_pgsz_bits;
  519. unsigned long flags;
  520. int new_version;
  521. spin_lock_irqsave(&ctx_alloc_lock, flags);
  522. orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
  523. ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
  524. new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
  525. new_version = 0;
  526. if (new_ctx >= (1 << CTX_NR_BITS)) {
  527. new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
  528. if (new_ctx >= ctx) {
  529. int i;
  530. new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
  531. CTX_FIRST_VERSION;
  532. if (new_ctx == 1)
  533. new_ctx = CTX_FIRST_VERSION;
  534. /* Don't call memset, for 16 entries that's just
  535. * plain silly...
  536. */
  537. mmu_context_bmap[0] = 3;
  538. mmu_context_bmap[1] = 0;
  539. mmu_context_bmap[2] = 0;
  540. mmu_context_bmap[3] = 0;
  541. for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
  542. mmu_context_bmap[i + 0] = 0;
  543. mmu_context_bmap[i + 1] = 0;
  544. mmu_context_bmap[i + 2] = 0;
  545. mmu_context_bmap[i + 3] = 0;
  546. }
  547. new_version = 1;
  548. goto out;
  549. }
  550. }
  551. mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
  552. new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
  553. out:
  554. tlb_context_cache = new_ctx;
  555. mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
  556. spin_unlock_irqrestore(&ctx_alloc_lock, flags);
  557. if (unlikely(new_version))
  558. smp_new_mmu_context_version();
  559. }
  560. static int numa_enabled = 1;
  561. static int numa_debug;
  562. static int __init early_numa(char *p)
  563. {
  564. if (!p)
  565. return 0;
  566. if (strstr(p, "off"))
  567. numa_enabled = 0;
  568. if (strstr(p, "debug"))
  569. numa_debug = 1;
  570. return 0;
  571. }
  572. early_param("numa", early_numa);
  573. #define numadbg(f, a...) \
  574. do { if (numa_debug) \
  575. printk(KERN_INFO f, ## a); \
  576. } while (0)
  577. static void __init find_ramdisk(unsigned long phys_base)
  578. {
  579. #ifdef CONFIG_BLK_DEV_INITRD
  580. if (sparc_ramdisk_image || sparc_ramdisk_image64) {
  581. unsigned long ramdisk_image;
  582. /* Older versions of the bootloader only supported a
  583. * 32-bit physical address for the ramdisk image
  584. * location, stored at sparc_ramdisk_image. Newer
  585. * SILO versions set sparc_ramdisk_image to zero and
  586. * provide a full 64-bit physical address at
  587. * sparc_ramdisk_image64.
  588. */
  589. ramdisk_image = sparc_ramdisk_image;
  590. if (!ramdisk_image)
  591. ramdisk_image = sparc_ramdisk_image64;
  592. /* Another bootloader quirk. The bootloader normalizes
  593. * the physical address to KERNBASE, so we have to
  594. * factor that back out and add in the lowest valid
  595. * physical page address to get the true physical address.
  596. */
  597. ramdisk_image -= KERNBASE;
  598. ramdisk_image += phys_base;
  599. numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
  600. ramdisk_image, sparc_ramdisk_size);
  601. initrd_start = ramdisk_image;
  602. initrd_end = ramdisk_image + sparc_ramdisk_size;
  603. lmb_reserve(initrd_start, sparc_ramdisk_size);
  604. initrd_start += PAGE_OFFSET;
  605. initrd_end += PAGE_OFFSET;
  606. }
  607. #endif
  608. }
  609. struct node_mem_mask {
  610. unsigned long mask;
  611. unsigned long val;
  612. unsigned long bootmem_paddr;
  613. };
  614. static struct node_mem_mask node_masks[MAX_NUMNODES];
  615. static int num_node_masks;
  616. int numa_cpu_lookup_table[NR_CPUS];
  617. cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
  618. #ifdef CONFIG_NEED_MULTIPLE_NODES
  619. struct mdesc_mblock {
  620. u64 base;
  621. u64 size;
  622. u64 offset; /* RA-to-PA */
  623. };
  624. static struct mdesc_mblock *mblocks;
  625. static int num_mblocks;
  626. static unsigned long ra_to_pa(unsigned long addr)
  627. {
  628. int i;
  629. for (i = 0; i < num_mblocks; i++) {
  630. struct mdesc_mblock *m = &mblocks[i];
  631. if (addr >= m->base &&
  632. addr < (m->base + m->size)) {
  633. addr += m->offset;
  634. break;
  635. }
  636. }
  637. return addr;
  638. }
  639. static int find_node(unsigned long addr)
  640. {
  641. int i;
  642. addr = ra_to_pa(addr);
  643. for (i = 0; i < num_node_masks; i++) {
  644. struct node_mem_mask *p = &node_masks[i];
  645. if ((addr & p->mask) == p->val)
  646. return i;
  647. }
  648. return -1;
  649. }
  650. static unsigned long nid_range(unsigned long start, unsigned long end,
  651. int *nid)
  652. {
  653. *nid = find_node(start);
  654. start += PAGE_SIZE;
  655. while (start < end) {
  656. int n = find_node(start);
  657. if (n != *nid)
  658. break;
  659. start += PAGE_SIZE;
  660. }
  661. if (start > end)
  662. start = end;
  663. return start;
  664. }
  665. #else
  666. static unsigned long nid_range(unsigned long start, unsigned long end,
  667. int *nid)
  668. {
  669. *nid = 0;
  670. return end;
  671. }
  672. #endif
  673. /* This must be invoked after performing all of the necessary
  674. * add_active_range() calls for 'nid'. We need to be able to get
  675. * correct data from get_pfn_range_for_nid().
  676. */
  677. static void __init allocate_node_data(int nid)
  678. {
  679. unsigned long paddr, num_pages, start_pfn, end_pfn;
  680. struct pglist_data *p;
  681. #ifdef CONFIG_NEED_MULTIPLE_NODES
  682. paddr = lmb_alloc_nid(sizeof(struct pglist_data),
  683. SMP_CACHE_BYTES, nid, nid_range);
  684. if (!paddr) {
  685. prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
  686. prom_halt();
  687. }
  688. NODE_DATA(nid) = __va(paddr);
  689. memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
  690. NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
  691. #endif
  692. p = NODE_DATA(nid);
  693. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  694. p->node_start_pfn = start_pfn;
  695. p->node_spanned_pages = end_pfn - start_pfn;
  696. if (p->node_spanned_pages) {
  697. num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
  698. paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
  699. nid_range);
  700. if (!paddr) {
  701. prom_printf("Cannot allocate bootmap for nid[%d]\n",
  702. nid);
  703. prom_halt();
  704. }
  705. node_masks[nid].bootmem_paddr = paddr;
  706. }
  707. }
  708. static void init_node_masks_nonnuma(void)
  709. {
  710. int i;
  711. numadbg("Initializing tables for non-numa.\n");
  712. node_masks[0].mask = node_masks[0].val = 0;
  713. num_node_masks = 1;
  714. for (i = 0; i < NR_CPUS; i++)
  715. numa_cpu_lookup_table[i] = 0;
  716. numa_cpumask_lookup_table[0] = CPU_MASK_ALL;
  717. }
  718. #ifdef CONFIG_NEED_MULTIPLE_NODES
  719. struct pglist_data *node_data[MAX_NUMNODES];
  720. EXPORT_SYMBOL(numa_cpu_lookup_table);
  721. EXPORT_SYMBOL(numa_cpumask_lookup_table);
  722. EXPORT_SYMBOL(node_data);
  723. struct mdesc_mlgroup {
  724. u64 node;
  725. u64 latency;
  726. u64 match;
  727. u64 mask;
  728. };
  729. static struct mdesc_mlgroup *mlgroups;
  730. static int num_mlgroups;
  731. static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
  732. u32 cfg_handle)
  733. {
  734. u64 arc;
  735. mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
  736. u64 target = mdesc_arc_target(md, arc);
  737. const u64 *val;
  738. val = mdesc_get_property(md, target,
  739. "cfg-handle", NULL);
  740. if (val && *val == cfg_handle)
  741. return 0;
  742. }
  743. return -ENODEV;
  744. }
  745. static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
  746. u32 cfg_handle)
  747. {
  748. u64 arc, candidate, best_latency = ~(u64)0;
  749. candidate = MDESC_NODE_NULL;
  750. mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
  751. u64 target = mdesc_arc_target(md, arc);
  752. const char *name = mdesc_node_name(md, target);
  753. const u64 *val;
  754. if (strcmp(name, "pio-latency-group"))
  755. continue;
  756. val = mdesc_get_property(md, target, "latency", NULL);
  757. if (!val)
  758. continue;
  759. if (*val < best_latency) {
  760. candidate = target;
  761. best_latency = *val;
  762. }
  763. }
  764. if (candidate == MDESC_NODE_NULL)
  765. return -ENODEV;
  766. return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
  767. }
  768. int of_node_to_nid(struct device_node *dp)
  769. {
  770. const struct linux_prom64_registers *regs;
  771. struct mdesc_handle *md;
  772. u32 cfg_handle;
  773. int count, nid;
  774. u64 grp;
  775. /* This is the right thing to do on currently supported
  776. * SUN4U NUMA platforms as well, as the PCI controller does
  777. * not sit behind any particular memory controller.
  778. */
  779. if (!mlgroups)
  780. return -1;
  781. regs = of_get_property(dp, "reg", NULL);
  782. if (!regs)
  783. return -1;
  784. cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
  785. md = mdesc_grab();
  786. count = 0;
  787. nid = -1;
  788. mdesc_for_each_node_by_name(md, grp, "group") {
  789. if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
  790. nid = count;
  791. break;
  792. }
  793. count++;
  794. }
  795. mdesc_release(md);
  796. return nid;
  797. }
  798. static void add_node_ranges(void)
  799. {
  800. int i;
  801. for (i = 0; i < lmb.memory.cnt; i++) {
  802. unsigned long size = lmb_size_bytes(&lmb.memory, i);
  803. unsigned long start, end;
  804. start = lmb.memory.region[i].base;
  805. end = start + size;
  806. while (start < end) {
  807. unsigned long this_end;
  808. int nid;
  809. this_end = nid_range(start, end, &nid);
  810. numadbg("Adding active range nid[%d] "
  811. "start[%lx] end[%lx]\n",
  812. nid, start, this_end);
  813. add_active_range(nid,
  814. start >> PAGE_SHIFT,
  815. this_end >> PAGE_SHIFT);
  816. start = this_end;
  817. }
  818. }
  819. }
  820. static int __init grab_mlgroups(struct mdesc_handle *md)
  821. {
  822. unsigned long paddr;
  823. int count = 0;
  824. u64 node;
  825. mdesc_for_each_node_by_name(md, node, "memory-latency-group")
  826. count++;
  827. if (!count)
  828. return -ENOENT;
  829. paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup),
  830. SMP_CACHE_BYTES);
  831. if (!paddr)
  832. return -ENOMEM;
  833. mlgroups = __va(paddr);
  834. num_mlgroups = count;
  835. count = 0;
  836. mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
  837. struct mdesc_mlgroup *m = &mlgroups[count++];
  838. const u64 *val;
  839. m->node = node;
  840. val = mdesc_get_property(md, node, "latency", NULL);
  841. m->latency = *val;
  842. val = mdesc_get_property(md, node, "address-match", NULL);
  843. m->match = *val;
  844. val = mdesc_get_property(md, node, "address-mask", NULL);
  845. m->mask = *val;
  846. numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
  847. "match[%lx] mask[%lx]\n",
  848. count - 1, m->node, m->latency, m->match, m->mask);
  849. }
  850. return 0;
  851. }
  852. static int __init grab_mblocks(struct mdesc_handle *md)
  853. {
  854. unsigned long paddr;
  855. int count = 0;
  856. u64 node;
  857. mdesc_for_each_node_by_name(md, node, "mblock")
  858. count++;
  859. if (!count)
  860. return -ENOENT;
  861. paddr = lmb_alloc(count * sizeof(struct mdesc_mblock),
  862. SMP_CACHE_BYTES);
  863. if (!paddr)
  864. return -ENOMEM;
  865. mblocks = __va(paddr);
  866. num_mblocks = count;
  867. count = 0;
  868. mdesc_for_each_node_by_name(md, node, "mblock") {
  869. struct mdesc_mblock *m = &mblocks[count++];
  870. const u64 *val;
  871. val = mdesc_get_property(md, node, "base", NULL);
  872. m->base = *val;
  873. val = mdesc_get_property(md, node, "size", NULL);
  874. m->size = *val;
  875. val = mdesc_get_property(md, node,
  876. "address-congruence-offset", NULL);
  877. m->offset = *val;
  878. numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
  879. count - 1, m->base, m->size, m->offset);
  880. }
  881. return 0;
  882. }
  883. static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
  884. u64 grp, cpumask_t *mask)
  885. {
  886. u64 arc;
  887. cpus_clear(*mask);
  888. mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
  889. u64 target = mdesc_arc_target(md, arc);
  890. const char *name = mdesc_node_name(md, target);
  891. const u64 *id;
  892. if (strcmp(name, "cpu"))
  893. continue;
  894. id = mdesc_get_property(md, target, "id", NULL);
  895. if (*id < NR_CPUS)
  896. cpu_set(*id, *mask);
  897. }
  898. }
  899. static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
  900. {
  901. int i;
  902. for (i = 0; i < num_mlgroups; i++) {
  903. struct mdesc_mlgroup *m = &mlgroups[i];
  904. if (m->node == node)
  905. return m;
  906. }
  907. return NULL;
  908. }
  909. static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
  910. int index)
  911. {
  912. struct mdesc_mlgroup *candidate = NULL;
  913. u64 arc, best_latency = ~(u64)0;
  914. struct node_mem_mask *n;
  915. mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
  916. u64 target = mdesc_arc_target(md, arc);
  917. struct mdesc_mlgroup *m = find_mlgroup(target);
  918. if (!m)
  919. continue;
  920. if (m->latency < best_latency) {
  921. candidate = m;
  922. best_latency = m->latency;
  923. }
  924. }
  925. if (!candidate)
  926. return -ENOENT;
  927. if (num_node_masks != index) {
  928. printk(KERN_ERR "Inconsistent NUMA state, "
  929. "index[%d] != num_node_masks[%d]\n",
  930. index, num_node_masks);
  931. return -EINVAL;
  932. }
  933. n = &node_masks[num_node_masks++];
  934. n->mask = candidate->mask;
  935. n->val = candidate->match;
  936. numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
  937. index, n->mask, n->val, candidate->latency);
  938. return 0;
  939. }
  940. static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
  941. int index)
  942. {
  943. cpumask_t mask;
  944. int cpu;
  945. numa_parse_mdesc_group_cpus(md, grp, &mask);
  946. for_each_cpu_mask(cpu, mask)
  947. numa_cpu_lookup_table[cpu] = index;
  948. numa_cpumask_lookup_table[index] = mask;
  949. if (numa_debug) {
  950. printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
  951. for_each_cpu_mask(cpu, mask)
  952. printk("%d ", cpu);
  953. printk("]\n");
  954. }
  955. return numa_attach_mlgroup(md, grp, index);
  956. }
  957. static int __init numa_parse_mdesc(void)
  958. {
  959. struct mdesc_handle *md = mdesc_grab();
  960. int i, err, count;
  961. u64 node;
  962. node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
  963. if (node == MDESC_NODE_NULL) {
  964. mdesc_release(md);
  965. return -ENOENT;
  966. }
  967. err = grab_mblocks(md);
  968. if (err < 0)
  969. goto out;
  970. err = grab_mlgroups(md);
  971. if (err < 0)
  972. goto out;
  973. count = 0;
  974. mdesc_for_each_node_by_name(md, node, "group") {
  975. err = numa_parse_mdesc_group(md, node, count);
  976. if (err < 0)
  977. break;
  978. count++;
  979. }
  980. add_node_ranges();
  981. for (i = 0; i < num_node_masks; i++) {
  982. allocate_node_data(i);
  983. node_set_online(i);
  984. }
  985. err = 0;
  986. out:
  987. mdesc_release(md);
  988. return err;
  989. }
  990. static int __init numa_parse_jbus(void)
  991. {
  992. unsigned long cpu, index;
  993. /* NUMA node id is encoded in bits 36 and higher, and there is
  994. * a 1-to-1 mapping from CPU ID to NUMA node ID.
  995. */
  996. index = 0;
  997. for_each_present_cpu(cpu) {
  998. numa_cpu_lookup_table[cpu] = index;
  999. numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu);
  1000. node_masks[index].mask = ~((1UL << 36UL) - 1UL);
  1001. node_masks[index].val = cpu << 36UL;
  1002. index++;
  1003. }
  1004. num_node_masks = index;
  1005. add_node_ranges();
  1006. for (index = 0; index < num_node_masks; index++) {
  1007. allocate_node_data(index);
  1008. node_set_online(index);
  1009. }
  1010. return 0;
  1011. }
  1012. static int __init numa_parse_sun4u(void)
  1013. {
  1014. if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1015. unsigned long ver;
  1016. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  1017. if ((ver >> 32UL) == __JALAPENO_ID ||
  1018. (ver >> 32UL) == __SERRANO_ID)
  1019. return numa_parse_jbus();
  1020. }
  1021. return -1;
  1022. }
  1023. static int __init bootmem_init_numa(void)
  1024. {
  1025. int err = -1;
  1026. numadbg("bootmem_init_numa()\n");
  1027. if (numa_enabled) {
  1028. if (tlb_type == hypervisor)
  1029. err = numa_parse_mdesc();
  1030. else
  1031. err = numa_parse_sun4u();
  1032. }
  1033. return err;
  1034. }
  1035. #else
  1036. static int bootmem_init_numa(void)
  1037. {
  1038. return -1;
  1039. }
  1040. #endif
  1041. static void __init bootmem_init_nonnuma(void)
  1042. {
  1043. unsigned long top_of_ram = lmb_end_of_DRAM();
  1044. unsigned long total_ram = lmb_phys_mem_size();
  1045. unsigned int i;
  1046. numadbg("bootmem_init_nonnuma()\n");
  1047. printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
  1048. top_of_ram, total_ram);
  1049. printk(KERN_INFO "Memory hole size: %ldMB\n",
  1050. (top_of_ram - total_ram) >> 20);
  1051. init_node_masks_nonnuma();
  1052. for (i = 0; i < lmb.memory.cnt; i++) {
  1053. unsigned long size = lmb_size_bytes(&lmb.memory, i);
  1054. unsigned long start_pfn, end_pfn;
  1055. if (!size)
  1056. continue;
  1057. start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
  1058. end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
  1059. add_active_range(0, start_pfn, end_pfn);
  1060. }
  1061. allocate_node_data(0);
  1062. node_set_online(0);
  1063. }
  1064. static void __init reserve_range_in_node(int nid, unsigned long start,
  1065. unsigned long end)
  1066. {
  1067. numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
  1068. nid, start, end);
  1069. while (start < end) {
  1070. unsigned long this_end;
  1071. int n;
  1072. this_end = nid_range(start, end, &n);
  1073. if (n == nid) {
  1074. numadbg(" MATCH reserving range [%lx:%lx]\n",
  1075. start, this_end);
  1076. reserve_bootmem_node(NODE_DATA(nid), start,
  1077. (this_end - start), BOOTMEM_DEFAULT);
  1078. } else
  1079. numadbg(" NO MATCH, advancing start to %lx\n",
  1080. this_end);
  1081. start = this_end;
  1082. }
  1083. }
  1084. static void __init trim_reserved_in_node(int nid)
  1085. {
  1086. int i;
  1087. numadbg(" trim_reserved_in_node(%d)\n", nid);
  1088. for (i = 0; i < lmb.reserved.cnt; i++) {
  1089. unsigned long start = lmb.reserved.region[i].base;
  1090. unsigned long size = lmb_size_bytes(&lmb.reserved, i);
  1091. unsigned long end = start + size;
  1092. reserve_range_in_node(nid, start, end);
  1093. }
  1094. }
  1095. static void __init bootmem_init_one_node(int nid)
  1096. {
  1097. struct pglist_data *p;
  1098. numadbg("bootmem_init_one_node(%d)\n", nid);
  1099. p = NODE_DATA(nid);
  1100. if (p->node_spanned_pages) {
  1101. unsigned long paddr = node_masks[nid].bootmem_paddr;
  1102. unsigned long end_pfn;
  1103. end_pfn = p->node_start_pfn + p->node_spanned_pages;
  1104. numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
  1105. nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
  1106. init_bootmem_node(p, paddr >> PAGE_SHIFT,
  1107. p->node_start_pfn, end_pfn);
  1108. numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
  1109. nid, end_pfn);
  1110. free_bootmem_with_active_regions(nid, end_pfn);
  1111. trim_reserved_in_node(nid);
  1112. numadbg(" sparse_memory_present_with_active_regions(%d)\n",
  1113. nid);
  1114. sparse_memory_present_with_active_regions(nid);
  1115. }
  1116. }
  1117. static unsigned long __init bootmem_init(unsigned long phys_base)
  1118. {
  1119. unsigned long end_pfn;
  1120. int nid;
  1121. end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
  1122. max_pfn = max_low_pfn = end_pfn;
  1123. min_low_pfn = (phys_base >> PAGE_SHIFT);
  1124. if (bootmem_init_numa() < 0)
  1125. bootmem_init_nonnuma();
  1126. /* XXX cpu notifier XXX */
  1127. for_each_online_node(nid)
  1128. bootmem_init_one_node(nid);
  1129. sparse_init();
  1130. return end_pfn;
  1131. }
  1132. static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
  1133. static int pall_ents __initdata;
  1134. #ifdef CONFIG_DEBUG_PAGEALLOC
  1135. static unsigned long __ref kernel_map_range(unsigned long pstart,
  1136. unsigned long pend, pgprot_t prot)
  1137. {
  1138. unsigned long vstart = PAGE_OFFSET + pstart;
  1139. unsigned long vend = PAGE_OFFSET + pend;
  1140. unsigned long alloc_bytes = 0UL;
  1141. if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
  1142. prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
  1143. vstart, vend);
  1144. prom_halt();
  1145. }
  1146. while (vstart < vend) {
  1147. unsigned long this_end, paddr = __pa(vstart);
  1148. pgd_t *pgd = pgd_offset_k(vstart);
  1149. pud_t *pud;
  1150. pmd_t *pmd;
  1151. pte_t *pte;
  1152. pud = pud_offset(pgd, vstart);
  1153. if (pud_none(*pud)) {
  1154. pmd_t *new;
  1155. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  1156. alloc_bytes += PAGE_SIZE;
  1157. pud_populate(&init_mm, pud, new);
  1158. }
  1159. pmd = pmd_offset(pud, vstart);
  1160. if (!pmd_present(*pmd)) {
  1161. pte_t *new;
  1162. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  1163. alloc_bytes += PAGE_SIZE;
  1164. pmd_populate_kernel(&init_mm, pmd, new);
  1165. }
  1166. pte = pte_offset_kernel(pmd, vstart);
  1167. this_end = (vstart + PMD_SIZE) & PMD_MASK;
  1168. if (this_end > vend)
  1169. this_end = vend;
  1170. while (vstart < this_end) {
  1171. pte_val(*pte) = (paddr | pgprot_val(prot));
  1172. vstart += PAGE_SIZE;
  1173. paddr += PAGE_SIZE;
  1174. pte++;
  1175. }
  1176. }
  1177. return alloc_bytes;
  1178. }
  1179. extern unsigned int kvmap_linear_patch[1];
  1180. #endif /* CONFIG_DEBUG_PAGEALLOC */
  1181. static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
  1182. {
  1183. const unsigned long shift_256MB = 28;
  1184. const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
  1185. const unsigned long size_256MB = (1UL << shift_256MB);
  1186. while (start < end) {
  1187. long remains;
  1188. remains = end - start;
  1189. if (remains < size_256MB)
  1190. break;
  1191. if (start & mask_256MB) {
  1192. start = (start + size_256MB) & ~mask_256MB;
  1193. continue;
  1194. }
  1195. while (remains >= size_256MB) {
  1196. unsigned long index = start >> shift_256MB;
  1197. __set_bit(index, kpte_linear_bitmap);
  1198. start += size_256MB;
  1199. remains -= size_256MB;
  1200. }
  1201. }
  1202. }
  1203. static void __init init_kpte_bitmap(void)
  1204. {
  1205. unsigned long i;
  1206. for (i = 0; i < pall_ents; i++) {
  1207. unsigned long phys_start, phys_end;
  1208. phys_start = pall[i].phys_addr;
  1209. phys_end = phys_start + pall[i].reg_size;
  1210. mark_kpte_bitmap(phys_start, phys_end);
  1211. }
  1212. }
  1213. static void __init kernel_physical_mapping_init(void)
  1214. {
  1215. #ifdef CONFIG_DEBUG_PAGEALLOC
  1216. unsigned long i, mem_alloced = 0UL;
  1217. for (i = 0; i < pall_ents; i++) {
  1218. unsigned long phys_start, phys_end;
  1219. phys_start = pall[i].phys_addr;
  1220. phys_end = phys_start + pall[i].reg_size;
  1221. mem_alloced += kernel_map_range(phys_start, phys_end,
  1222. PAGE_KERNEL);
  1223. }
  1224. printk("Allocated %ld bytes for kernel page tables.\n",
  1225. mem_alloced);
  1226. kvmap_linear_patch[0] = 0x01000000; /* nop */
  1227. flushi(&kvmap_linear_patch[0]);
  1228. __flush_tlb_all();
  1229. #endif
  1230. }
  1231. #ifdef CONFIG_DEBUG_PAGEALLOC
  1232. void kernel_map_pages(struct page *page, int numpages, int enable)
  1233. {
  1234. unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
  1235. unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
  1236. kernel_map_range(phys_start, phys_end,
  1237. (enable ? PAGE_KERNEL : __pgprot(0)));
  1238. flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
  1239. PAGE_OFFSET + phys_end);
  1240. /* we should perform an IPI and flush all tlbs,
  1241. * but that can deadlock->flush only current cpu.
  1242. */
  1243. __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
  1244. PAGE_OFFSET + phys_end);
  1245. }
  1246. #endif
  1247. unsigned long __init find_ecache_flush_span(unsigned long size)
  1248. {
  1249. int i;
  1250. for (i = 0; i < pavail_ents; i++) {
  1251. if (pavail[i].reg_size >= size)
  1252. return pavail[i].phys_addr;
  1253. }
  1254. return ~0UL;
  1255. }
  1256. static void __init tsb_phys_patch(void)
  1257. {
  1258. struct tsb_ldquad_phys_patch_entry *pquad;
  1259. struct tsb_phys_patch_entry *p;
  1260. pquad = &__tsb_ldquad_phys_patch;
  1261. while (pquad < &__tsb_ldquad_phys_patch_end) {
  1262. unsigned long addr = pquad->addr;
  1263. if (tlb_type == hypervisor)
  1264. *(unsigned int *) addr = pquad->sun4v_insn;
  1265. else
  1266. *(unsigned int *) addr = pquad->sun4u_insn;
  1267. wmb();
  1268. __asm__ __volatile__("flush %0"
  1269. : /* no outputs */
  1270. : "r" (addr));
  1271. pquad++;
  1272. }
  1273. p = &__tsb_phys_patch;
  1274. while (p < &__tsb_phys_patch_end) {
  1275. unsigned long addr = p->addr;
  1276. *(unsigned int *) addr = p->insn;
  1277. wmb();
  1278. __asm__ __volatile__("flush %0"
  1279. : /* no outputs */
  1280. : "r" (addr));
  1281. p++;
  1282. }
  1283. }
  1284. /* Don't mark as init, we give this to the Hypervisor. */
  1285. #ifndef CONFIG_DEBUG_PAGEALLOC
  1286. #define NUM_KTSB_DESCR 2
  1287. #else
  1288. #define NUM_KTSB_DESCR 1
  1289. #endif
  1290. static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
  1291. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  1292. static void __init sun4v_ktsb_init(void)
  1293. {
  1294. unsigned long ktsb_pa;
  1295. /* First KTSB for PAGE_SIZE mappings. */
  1296. ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
  1297. switch (PAGE_SIZE) {
  1298. case 8 * 1024:
  1299. default:
  1300. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
  1301. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
  1302. break;
  1303. case 64 * 1024:
  1304. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
  1305. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
  1306. break;
  1307. case 512 * 1024:
  1308. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
  1309. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
  1310. break;
  1311. case 4 * 1024 * 1024:
  1312. ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
  1313. ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
  1314. break;
  1315. };
  1316. ktsb_descr[0].assoc = 1;
  1317. ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
  1318. ktsb_descr[0].ctx_idx = 0;
  1319. ktsb_descr[0].tsb_base = ktsb_pa;
  1320. ktsb_descr[0].resv = 0;
  1321. #ifndef CONFIG_DEBUG_PAGEALLOC
  1322. /* Second KTSB for 4MB/256MB mappings. */
  1323. ktsb_pa = (kern_base +
  1324. ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
  1325. ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
  1326. ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
  1327. HV_PGSZ_MASK_256MB);
  1328. ktsb_descr[1].assoc = 1;
  1329. ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
  1330. ktsb_descr[1].ctx_idx = 0;
  1331. ktsb_descr[1].tsb_base = ktsb_pa;
  1332. ktsb_descr[1].resv = 0;
  1333. #endif
  1334. }
  1335. void __cpuinit sun4v_ktsb_register(void)
  1336. {
  1337. unsigned long pa, ret;
  1338. pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
  1339. ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
  1340. if (ret != 0) {
  1341. prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
  1342. "errors with %lx\n", pa, ret);
  1343. prom_halt();
  1344. }
  1345. }
  1346. /* paging_init() sets up the page tables */
  1347. static unsigned long last_valid_pfn;
  1348. pgd_t swapper_pg_dir[2048];
  1349. static void sun4u_pgprot_init(void);
  1350. static void sun4v_pgprot_init(void);
  1351. /* Dummy function */
  1352. void __init setup_per_cpu_areas(void)
  1353. {
  1354. }
  1355. void __init paging_init(void)
  1356. {
  1357. unsigned long end_pfn, shift, phys_base;
  1358. unsigned long real_end, i;
  1359. /* These build time checkes make sure that the dcache_dirty_cpu()
  1360. * page->flags usage will work.
  1361. *
  1362. * When a page gets marked as dcache-dirty, we store the
  1363. * cpu number starting at bit 32 in the page->flags. Also,
  1364. * functions like clear_dcache_dirty_cpu use the cpu mask
  1365. * in 13-bit signed-immediate instruction fields.
  1366. */
  1367. /*
  1368. * Page flags must not reach into upper 32 bits that are used
  1369. * for the cpu number
  1370. */
  1371. BUILD_BUG_ON(NR_PAGEFLAGS > 32);
  1372. /*
  1373. * The bit fields placed in the high range must not reach below
  1374. * the 32 bit boundary. Otherwise we cannot place the cpu field
  1375. * at the 32 bit boundary.
  1376. */
  1377. BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
  1378. ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
  1379. BUILD_BUG_ON(NR_CPUS > 4096);
  1380. kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  1381. kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
  1382. /* Invalidate both kernel TSBs. */
  1383. memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
  1384. #ifndef CONFIG_DEBUG_PAGEALLOC
  1385. memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
  1386. #endif
  1387. if (tlb_type == hypervisor)
  1388. sun4v_pgprot_init();
  1389. else
  1390. sun4u_pgprot_init();
  1391. if (tlb_type == cheetah_plus ||
  1392. tlb_type == hypervisor)
  1393. tsb_phys_patch();
  1394. if (tlb_type == hypervisor) {
  1395. sun4v_patch_tlb_handlers();
  1396. sun4v_ktsb_init();
  1397. }
  1398. lmb_init();
  1399. /* Find available physical memory...
  1400. *
  1401. * Read it twice in order to work around a bug in openfirmware.
  1402. * The call to grab this table itself can cause openfirmware to
  1403. * allocate memory, which in turn can take away some space from
  1404. * the list of available memory. Reading it twice makes sure
  1405. * we really do get the final value.
  1406. */
  1407. read_obp_translations();
  1408. read_obp_memory("reg", &pall[0], &pall_ents);
  1409. read_obp_memory("available", &pavail[0], &pavail_ents);
  1410. read_obp_memory("available", &pavail[0], &pavail_ents);
  1411. phys_base = 0xffffffffffffffffUL;
  1412. for (i = 0; i < pavail_ents; i++) {
  1413. phys_base = min(phys_base, pavail[i].phys_addr);
  1414. lmb_add(pavail[i].phys_addr, pavail[i].reg_size);
  1415. }
  1416. lmb_reserve(kern_base, kern_size);
  1417. find_ramdisk(phys_base);
  1418. lmb_enforce_memory_limit(cmdline_memory_size);
  1419. lmb_analyze();
  1420. lmb_dump_all();
  1421. set_bit(0, mmu_context_bmap);
  1422. shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
  1423. real_end = (unsigned long)_end;
  1424. num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
  1425. printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
  1426. num_kernel_image_mappings);
  1427. /* Set kernel pgd to upper alias so physical page computations
  1428. * work.
  1429. */
  1430. init_mm.pgd += ((shift) / (sizeof(pgd_t)));
  1431. memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
  1432. /* Now can init the kernel/bad page tables. */
  1433. pud_set(pud_offset(&swapper_pg_dir[0], 0),
  1434. swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
  1435. inherit_prom_mappings();
  1436. init_kpte_bitmap();
  1437. /* Ok, we can use our TLB miss and window trap handlers safely. */
  1438. setup_tba();
  1439. __flush_tlb_all();
  1440. if (tlb_type == hypervisor)
  1441. sun4v_ktsb_register();
  1442. /* We must setup the per-cpu areas before we pull in the
  1443. * PROM and the MDESC. The code there fills in cpu and
  1444. * other information into per-cpu data structures.
  1445. */
  1446. real_setup_per_cpu_areas();
  1447. prom_build_devicetree();
  1448. if (tlb_type == hypervisor)
  1449. sun4v_mdesc_init();
  1450. /* Once the OF device tree and MDESC have been setup, we know
  1451. * the list of possible cpus. Therefore we can allocate the
  1452. * IRQ stacks.
  1453. */
  1454. for_each_possible_cpu(i) {
  1455. /* XXX Use node local allocations... XXX */
  1456. softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
  1457. hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
  1458. }
  1459. /* Setup bootmem... */
  1460. last_valid_pfn = end_pfn = bootmem_init(phys_base);
  1461. #ifndef CONFIG_NEED_MULTIPLE_NODES
  1462. max_mapnr = last_valid_pfn;
  1463. #endif
  1464. kernel_physical_mapping_init();
  1465. {
  1466. unsigned long max_zone_pfns[MAX_NR_ZONES];
  1467. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  1468. max_zone_pfns[ZONE_NORMAL] = end_pfn;
  1469. free_area_init_nodes(max_zone_pfns);
  1470. }
  1471. printk("Booting Linux...\n");
  1472. }
  1473. int __init page_in_phys_avail(unsigned long paddr)
  1474. {
  1475. int i;
  1476. paddr &= PAGE_MASK;
  1477. for (i = 0; i < pavail_ents; i++) {
  1478. unsigned long start, end;
  1479. start = pavail[i].phys_addr;
  1480. end = start + pavail[i].reg_size;
  1481. if (paddr >= start && paddr < end)
  1482. return 1;
  1483. }
  1484. if (paddr >= kern_base && paddr < (kern_base + kern_size))
  1485. return 1;
  1486. #ifdef CONFIG_BLK_DEV_INITRD
  1487. if (paddr >= __pa(initrd_start) &&
  1488. paddr < __pa(PAGE_ALIGN(initrd_end)))
  1489. return 1;
  1490. #endif
  1491. return 0;
  1492. }
  1493. static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
  1494. static int pavail_rescan_ents __initdata;
  1495. /* Certain OBP calls, such as fetching "available" properties, can
  1496. * claim physical memory. So, along with initializing the valid
  1497. * address bitmap, what we do here is refetch the physical available
  1498. * memory list again, and make sure it provides at least as much
  1499. * memory as 'pavail' does.
  1500. */
  1501. static void __init setup_valid_addr_bitmap_from_pavail(void)
  1502. {
  1503. int i;
  1504. read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
  1505. for (i = 0; i < pavail_ents; i++) {
  1506. unsigned long old_start, old_end;
  1507. old_start = pavail[i].phys_addr;
  1508. old_end = old_start + pavail[i].reg_size;
  1509. while (old_start < old_end) {
  1510. int n;
  1511. for (n = 0; n < pavail_rescan_ents; n++) {
  1512. unsigned long new_start, new_end;
  1513. new_start = pavail_rescan[n].phys_addr;
  1514. new_end = new_start +
  1515. pavail_rescan[n].reg_size;
  1516. if (new_start <= old_start &&
  1517. new_end >= (old_start + PAGE_SIZE)) {
  1518. set_bit(old_start >> 22,
  1519. sparc64_valid_addr_bitmap);
  1520. goto do_next_page;
  1521. }
  1522. }
  1523. prom_printf("mem_init: Lost memory in pavail\n");
  1524. prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
  1525. pavail[i].phys_addr,
  1526. pavail[i].reg_size);
  1527. prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
  1528. pavail_rescan[i].phys_addr,
  1529. pavail_rescan[i].reg_size);
  1530. prom_printf("mem_init: Cannot continue, aborting.\n");
  1531. prom_halt();
  1532. do_next_page:
  1533. old_start += PAGE_SIZE;
  1534. }
  1535. }
  1536. }
  1537. void __init mem_init(void)
  1538. {
  1539. unsigned long codepages, datapages, initpages;
  1540. unsigned long addr, last;
  1541. int i;
  1542. i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
  1543. i += 1;
  1544. sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
  1545. if (sparc64_valid_addr_bitmap == NULL) {
  1546. prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
  1547. prom_halt();
  1548. }
  1549. memset(sparc64_valid_addr_bitmap, 0, i << 3);
  1550. addr = PAGE_OFFSET + kern_base;
  1551. last = PAGE_ALIGN(kern_size) + addr;
  1552. while (addr < last) {
  1553. set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
  1554. addr += PAGE_SIZE;
  1555. }
  1556. setup_valid_addr_bitmap_from_pavail();
  1557. high_memory = __va(last_valid_pfn << PAGE_SHIFT);
  1558. #ifdef CONFIG_NEED_MULTIPLE_NODES
  1559. for_each_online_node(i) {
  1560. if (NODE_DATA(i)->node_spanned_pages != 0) {
  1561. totalram_pages +=
  1562. free_all_bootmem_node(NODE_DATA(i));
  1563. }
  1564. }
  1565. #else
  1566. totalram_pages = free_all_bootmem();
  1567. #endif
  1568. /* We subtract one to account for the mem_map_zero page
  1569. * allocated below.
  1570. */
  1571. totalram_pages -= 1;
  1572. num_physpages = totalram_pages;
  1573. /*
  1574. * Set up the zero page, mark it reserved, so that page count
  1575. * is not manipulated when freeing the page from user ptes.
  1576. */
  1577. mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
  1578. if (mem_map_zero == NULL) {
  1579. prom_printf("paging_init: Cannot alloc zero page.\n");
  1580. prom_halt();
  1581. }
  1582. SetPageReserved(mem_map_zero);
  1583. codepages = (((unsigned long) _etext) - ((unsigned long) _start));
  1584. codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
  1585. datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
  1586. datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
  1587. initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
  1588. initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
  1589. printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
  1590. nr_free_pages() << (PAGE_SHIFT-10),
  1591. codepages << (PAGE_SHIFT-10),
  1592. datapages << (PAGE_SHIFT-10),
  1593. initpages << (PAGE_SHIFT-10),
  1594. PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
  1595. if (tlb_type == cheetah || tlb_type == cheetah_plus)
  1596. cheetah_ecache_flush_init();
  1597. }
  1598. void free_initmem(void)
  1599. {
  1600. unsigned long addr, initend;
  1601. int do_free = 1;
  1602. /* If the physical memory maps were trimmed by kernel command
  1603. * line options, don't even try freeing this initmem stuff up.
  1604. * The kernel image could have been in the trimmed out region
  1605. * and if so the freeing below will free invalid page structs.
  1606. */
  1607. if (cmdline_memory_size)
  1608. do_free = 0;
  1609. /*
  1610. * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
  1611. */
  1612. addr = PAGE_ALIGN((unsigned long)(__init_begin));
  1613. initend = (unsigned long)(__init_end) & PAGE_MASK;
  1614. for (; addr < initend; addr += PAGE_SIZE) {
  1615. unsigned long page;
  1616. struct page *p;
  1617. page = (addr +
  1618. ((unsigned long) __va(kern_base)) -
  1619. ((unsigned long) KERNBASE));
  1620. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  1621. if (do_free) {
  1622. p = virt_to_page(page);
  1623. ClearPageReserved(p);
  1624. init_page_count(p);
  1625. __free_page(p);
  1626. num_physpages++;
  1627. totalram_pages++;
  1628. }
  1629. }
  1630. }
  1631. #ifdef CONFIG_BLK_DEV_INITRD
  1632. void free_initrd_mem(unsigned long start, unsigned long end)
  1633. {
  1634. if (start < end)
  1635. printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  1636. for (; start < end; start += PAGE_SIZE) {
  1637. struct page *p = virt_to_page(start);
  1638. ClearPageReserved(p);
  1639. init_page_count(p);
  1640. __free_page(p);
  1641. num_physpages++;
  1642. totalram_pages++;
  1643. }
  1644. }
  1645. #endif
  1646. #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
  1647. #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
  1648. #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
  1649. #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
  1650. #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
  1651. #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
  1652. pgprot_t PAGE_KERNEL __read_mostly;
  1653. EXPORT_SYMBOL(PAGE_KERNEL);
  1654. pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
  1655. pgprot_t PAGE_COPY __read_mostly;
  1656. pgprot_t PAGE_SHARED __read_mostly;
  1657. EXPORT_SYMBOL(PAGE_SHARED);
  1658. unsigned long pg_iobits __read_mostly;
  1659. unsigned long _PAGE_IE __read_mostly;
  1660. EXPORT_SYMBOL(_PAGE_IE);
  1661. unsigned long _PAGE_E __read_mostly;
  1662. EXPORT_SYMBOL(_PAGE_E);
  1663. unsigned long _PAGE_CACHE __read_mostly;
  1664. EXPORT_SYMBOL(_PAGE_CACHE);
  1665. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  1666. unsigned long vmemmap_table[VMEMMAP_SIZE];
  1667. int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
  1668. {
  1669. unsigned long vstart = (unsigned long) start;
  1670. unsigned long vend = (unsigned long) (start + nr);
  1671. unsigned long phys_start = (vstart - VMEMMAP_BASE);
  1672. unsigned long phys_end = (vend - VMEMMAP_BASE);
  1673. unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
  1674. unsigned long end = VMEMMAP_ALIGN(phys_end);
  1675. unsigned long pte_base;
  1676. pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
  1677. _PAGE_CP_4U | _PAGE_CV_4U |
  1678. _PAGE_P_4U | _PAGE_W_4U);
  1679. if (tlb_type == hypervisor)
  1680. pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
  1681. _PAGE_CP_4V | _PAGE_CV_4V |
  1682. _PAGE_P_4V | _PAGE_W_4V);
  1683. for (; addr < end; addr += VMEMMAP_CHUNK) {
  1684. unsigned long *vmem_pp =
  1685. vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
  1686. void *block;
  1687. if (!(*vmem_pp & _PAGE_VALID)) {
  1688. block = vmemmap_alloc_block(1UL << 22, node);
  1689. if (!block)
  1690. return -ENOMEM;
  1691. *vmem_pp = pte_base | __pa(block);
  1692. printk(KERN_INFO "[%p-%p] page_structs=%lu "
  1693. "node=%d entry=%lu/%lu\n", start, block, nr,
  1694. node,
  1695. addr >> VMEMMAP_CHUNK_SHIFT,
  1696. VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
  1697. }
  1698. }
  1699. return 0;
  1700. }
  1701. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  1702. static void prot_init_common(unsigned long page_none,
  1703. unsigned long page_shared,
  1704. unsigned long page_copy,
  1705. unsigned long page_readonly,
  1706. unsigned long page_exec_bit)
  1707. {
  1708. PAGE_COPY = __pgprot(page_copy);
  1709. PAGE_SHARED = __pgprot(page_shared);
  1710. protection_map[0x0] = __pgprot(page_none);
  1711. protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
  1712. protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
  1713. protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
  1714. protection_map[0x4] = __pgprot(page_readonly);
  1715. protection_map[0x5] = __pgprot(page_readonly);
  1716. protection_map[0x6] = __pgprot(page_copy);
  1717. protection_map[0x7] = __pgprot(page_copy);
  1718. protection_map[0x8] = __pgprot(page_none);
  1719. protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
  1720. protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
  1721. protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
  1722. protection_map[0xc] = __pgprot(page_readonly);
  1723. protection_map[0xd] = __pgprot(page_readonly);
  1724. protection_map[0xe] = __pgprot(page_shared);
  1725. protection_map[0xf] = __pgprot(page_shared);
  1726. }
  1727. static void __init sun4u_pgprot_init(void)
  1728. {
  1729. unsigned long page_none, page_shared, page_copy, page_readonly;
  1730. unsigned long page_exec_bit;
  1731. PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
  1732. _PAGE_CACHE_4U | _PAGE_P_4U |
  1733. __ACCESS_BITS_4U | __DIRTY_BITS_4U |
  1734. _PAGE_EXEC_4U);
  1735. PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
  1736. _PAGE_CACHE_4U | _PAGE_P_4U |
  1737. __ACCESS_BITS_4U | __DIRTY_BITS_4U |
  1738. _PAGE_EXEC_4U | _PAGE_L_4U);
  1739. _PAGE_IE = _PAGE_IE_4U;
  1740. _PAGE_E = _PAGE_E_4U;
  1741. _PAGE_CACHE = _PAGE_CACHE_4U;
  1742. pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
  1743. __ACCESS_BITS_4U | _PAGE_E_4U);
  1744. #ifdef CONFIG_DEBUG_PAGEALLOC
  1745. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
  1746. 0xfffff80000000000UL;
  1747. #else
  1748. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
  1749. 0xfffff80000000000UL;
  1750. #endif
  1751. kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
  1752. _PAGE_P_4U | _PAGE_W_4U);
  1753. /* XXX Should use 256MB on Panther. XXX */
  1754. kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
  1755. _PAGE_SZBITS = _PAGE_SZBITS_4U;
  1756. _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
  1757. _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
  1758. _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
  1759. page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
  1760. page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  1761. __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
  1762. page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  1763. __ACCESS_BITS_4U | _PAGE_EXEC_4U);
  1764. page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
  1765. __ACCESS_BITS_4U | _PAGE_EXEC_4U);
  1766. page_exec_bit = _PAGE_EXEC_4U;
  1767. prot_init_common(page_none, page_shared, page_copy, page_readonly,
  1768. page_exec_bit);
  1769. }
  1770. static void __init sun4v_pgprot_init(void)
  1771. {
  1772. unsigned long page_none, page_shared, page_copy, page_readonly;
  1773. unsigned long page_exec_bit;
  1774. PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
  1775. _PAGE_CACHE_4V | _PAGE_P_4V |
  1776. __ACCESS_BITS_4V | __DIRTY_BITS_4V |
  1777. _PAGE_EXEC_4V);
  1778. PAGE_KERNEL_LOCKED = PAGE_KERNEL;
  1779. _PAGE_IE = _PAGE_IE_4V;
  1780. _PAGE_E = _PAGE_E_4V;
  1781. _PAGE_CACHE = _PAGE_CACHE_4V;
  1782. #ifdef CONFIG_DEBUG_PAGEALLOC
  1783. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
  1784. 0xfffff80000000000UL;
  1785. #else
  1786. kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
  1787. 0xfffff80000000000UL;
  1788. #endif
  1789. kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
  1790. _PAGE_P_4V | _PAGE_W_4V);
  1791. #ifdef CONFIG_DEBUG_PAGEALLOC
  1792. kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
  1793. 0xfffff80000000000UL;
  1794. #else
  1795. kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
  1796. 0xfffff80000000000UL;
  1797. #endif
  1798. kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
  1799. _PAGE_P_4V | _PAGE_W_4V);
  1800. pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
  1801. __ACCESS_BITS_4V | _PAGE_E_4V);
  1802. _PAGE_SZBITS = _PAGE_SZBITS_4V;
  1803. _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
  1804. _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
  1805. _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
  1806. _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
  1807. page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
  1808. page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
  1809. __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
  1810. page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
  1811. __ACCESS_BITS_4V | _PAGE_EXEC_4V);
  1812. page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
  1813. __ACCESS_BITS_4V | _PAGE_EXEC_4V);
  1814. page_exec_bit = _PAGE_EXEC_4V;
  1815. prot_init_common(page_none, page_shared, page_copy, page_readonly,
  1816. page_exec_bit);
  1817. }
  1818. unsigned long pte_sz_bits(unsigned long sz)
  1819. {
  1820. if (tlb_type == hypervisor) {
  1821. switch (sz) {
  1822. case 8 * 1024:
  1823. default:
  1824. return _PAGE_SZ8K_4V;
  1825. case 64 * 1024:
  1826. return _PAGE_SZ64K_4V;
  1827. case 512 * 1024:
  1828. return _PAGE_SZ512K_4V;
  1829. case 4 * 1024 * 1024:
  1830. return _PAGE_SZ4MB_4V;
  1831. };
  1832. } else {
  1833. switch (sz) {
  1834. case 8 * 1024:
  1835. default:
  1836. return _PAGE_SZ8K_4U;
  1837. case 64 * 1024:
  1838. return _PAGE_SZ64K_4U;
  1839. case 512 * 1024:
  1840. return _PAGE_SZ512K_4U;
  1841. case 4 * 1024 * 1024:
  1842. return _PAGE_SZ4MB_4U;
  1843. };
  1844. }
  1845. }
  1846. pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
  1847. {
  1848. pte_t pte;
  1849. pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
  1850. pte_val(pte) |= (((unsigned long)space) << 32);
  1851. pte_val(pte) |= pte_sz_bits(page_size);
  1852. return pte;
  1853. }
  1854. static unsigned long kern_large_tte(unsigned long paddr)
  1855. {
  1856. unsigned long val;
  1857. val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
  1858. _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
  1859. _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
  1860. if (tlb_type == hypervisor)
  1861. val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
  1862. _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
  1863. _PAGE_EXEC_4V | _PAGE_W_4V);
  1864. return val | paddr;
  1865. }
  1866. /* If not locked, zap it. */
  1867. void __flush_tlb_all(void)
  1868. {
  1869. unsigned long pstate;
  1870. int i;
  1871. __asm__ __volatile__("flushw\n\t"
  1872. "rdpr %%pstate, %0\n\t"
  1873. "wrpr %0, %1, %%pstate"
  1874. : "=r" (pstate)
  1875. : "i" (PSTATE_IE));
  1876. if (tlb_type == hypervisor) {
  1877. sun4v_mmu_demap_all();
  1878. } else if (tlb_type == spitfire) {
  1879. for (i = 0; i < 64; i++) {
  1880. /* Spitfire Errata #32 workaround */
  1881. /* NOTE: Always runs on spitfire, so no
  1882. * cheetah+ page size encodings.
  1883. */
  1884. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  1885. "flush %%g6"
  1886. : /* No outputs */
  1887. : "r" (0),
  1888. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  1889. if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
  1890. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  1891. "membar #Sync"
  1892. : /* no outputs */
  1893. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  1894. spitfire_put_dtlb_data(i, 0x0UL);
  1895. }
  1896. /* Spitfire Errata #32 workaround */
  1897. /* NOTE: Always runs on spitfire, so no
  1898. * cheetah+ page size encodings.
  1899. */
  1900. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  1901. "flush %%g6"
  1902. : /* No outputs */
  1903. : "r" (0),
  1904. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  1905. if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
  1906. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  1907. "membar #Sync"
  1908. : /* no outputs */
  1909. : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  1910. spitfire_put_itlb_data(i, 0x0UL);
  1911. }
  1912. }
  1913. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1914. cheetah_flush_dtlb_all();
  1915. cheetah_flush_itlb_all();
  1916. }
  1917. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  1918. : : "r" (pstate));
  1919. }