init.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
  2. * arch/sparc64/mm/init.c
  3. *
  4. * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. #include <linux/config.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/string.h>
  11. #include <linux/init.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/slab.h>
  16. #include <linux/initrd.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/cache.h>
  23. #include <linux/sort.h>
  24. #include <asm/head.h>
  25. #include <asm/system.h>
  26. #include <asm/page.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/oplib.h>
  30. #include <asm/iommu.h>
  31. #include <asm/io.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/tlbflush.h>
  35. #include <asm/dma.h>
  36. #include <asm/starfire.h>
  37. #include <asm/tlb.h>
  38. #include <asm/spitfire.h>
  39. #include <asm/sections.h>
  40. #include <asm/tsb.h>
  41. #include <asm/hypervisor.h>
  42. extern void device_scan(void);
  43. #define MAX_BANKS 32
  44. static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
  45. static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
  46. static int pavail_ents __initdata;
  47. static int pavail_rescan_ents __initdata;
  48. static int cmp_p64(const void *a, const void *b)
  49. {
  50. const struct linux_prom64_registers *x = a, *y = b;
  51. if (x->phys_addr > y->phys_addr)
  52. return 1;
  53. if (x->phys_addr < y->phys_addr)
  54. return -1;
  55. return 0;
  56. }
  57. static void __init read_obp_memory(const char *property,
  58. struct linux_prom64_registers *regs,
  59. int *num_ents)
  60. {
  61. int node = prom_finddevice("/memory");
  62. int prop_size = prom_getproplen(node, property);
  63. int ents, ret, i;
  64. ents = prop_size / sizeof(struct linux_prom64_registers);
  65. if (ents > MAX_BANKS) {
  66. prom_printf("The machine has more %s property entries than "
  67. "this kernel can support (%d).\n",
  68. property, MAX_BANKS);
  69. prom_halt();
  70. }
  71. ret = prom_getproperty(node, property, (char *) regs, prop_size);
  72. if (ret == -1) {
  73. prom_printf("Couldn't get %s property from /memory.\n");
  74. prom_halt();
  75. }
  76. *num_ents = ents;
  77. /* Sanitize what we got from the firmware, by page aligning
  78. * everything.
  79. */
  80. for (i = 0; i < ents; i++) {
  81. unsigned long base, size;
  82. base = regs[i].phys_addr;
  83. size = regs[i].reg_size;
  84. size &= PAGE_MASK;
  85. if (base & ~PAGE_MASK) {
  86. unsigned long new_base = PAGE_ALIGN(base);
  87. size -= new_base - base;
  88. if ((long) size < 0L)
  89. size = 0UL;
  90. base = new_base;
  91. }
  92. regs[i].phys_addr = base;
  93. regs[i].reg_size = size;
  94. }
  95. sort(regs, ents, sizeof(struct linux_prom64_registers),
  96. cmp_p64, NULL);
  97. }
  98. unsigned long *sparc64_valid_addr_bitmap __read_mostly;
  99. /* Ugly, but necessary... -DaveM */
  100. unsigned long phys_base __read_mostly;
  101. unsigned long kern_base __read_mostly;
  102. unsigned long kern_size __read_mostly;
  103. unsigned long pfn_base __read_mostly;
  104. /* get_new_mmu_context() uses "cache + 1". */
  105. DEFINE_SPINLOCK(ctx_alloc_lock);
  106. unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
  107. #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
  108. unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
  109. /* References to special section boundaries */
  110. extern char _start[], _end[];
  111. /* Initial ramdisk setup */
  112. extern unsigned long sparc_ramdisk_image64;
  113. extern unsigned int sparc_ramdisk_image;
  114. extern unsigned int sparc_ramdisk_size;
  115. struct page *mem_map_zero __read_mostly;
  116. unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
  117. unsigned long sparc64_kern_pri_context __read_mostly;
  118. unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
  119. unsigned long sparc64_kern_sec_context __read_mostly;
  120. int bigkernel = 0;
  121. kmem_cache_t *pgtable_cache __read_mostly;
  122. static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
  123. {
  124. clear_page(addr);
  125. }
  126. void pgtable_cache_init(void)
  127. {
  128. pgtable_cache = kmem_cache_create("pgtable_cache",
  129. PAGE_SIZE, PAGE_SIZE,
  130. SLAB_HWCACHE_ALIGN |
  131. SLAB_MUST_HWCACHE_ALIGN,
  132. zero_ctor,
  133. NULL);
  134. if (!pgtable_cache) {
  135. prom_printf("pgtable_cache_init(): Could not create!\n");
  136. prom_halt();
  137. }
  138. }
  139. #ifdef CONFIG_DEBUG_DCFLUSH
  140. atomic_t dcpage_flushes = ATOMIC_INIT(0);
  141. #ifdef CONFIG_SMP
  142. atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
  143. #endif
  144. #endif
  145. __inline__ void flush_dcache_page_impl(struct page *page)
  146. {
  147. #ifdef CONFIG_DEBUG_DCFLUSH
  148. atomic_inc(&dcpage_flushes);
  149. #endif
  150. #ifdef DCACHE_ALIASING_POSSIBLE
  151. __flush_dcache_page(page_address(page),
  152. ((tlb_type == spitfire) &&
  153. page_mapping(page) != NULL));
  154. #else
  155. if (page_mapping(page) != NULL &&
  156. tlb_type == spitfire)
  157. __flush_icache_page(__pa(page_address(page)));
  158. #endif
  159. }
  160. #define PG_dcache_dirty PG_arch_1
  161. #define PG_dcache_cpu_shift 24
  162. #define PG_dcache_cpu_mask (256 - 1)
  163. #if NR_CPUS > 256
  164. #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
  165. #endif
  166. #define dcache_dirty_cpu(page) \
  167. (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
  168. static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
  169. {
  170. unsigned long mask = this_cpu;
  171. unsigned long non_cpu_bits;
  172. non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
  173. mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
  174. __asm__ __volatile__("1:\n\t"
  175. "ldx [%2], %%g7\n\t"
  176. "and %%g7, %1, %%g1\n\t"
  177. "or %%g1, %0, %%g1\n\t"
  178. "casx [%2], %%g7, %%g1\n\t"
  179. "cmp %%g7, %%g1\n\t"
  180. "membar #StoreLoad | #StoreStore\n\t"
  181. "bne,pn %%xcc, 1b\n\t"
  182. " nop"
  183. : /* no outputs */
  184. : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
  185. : "g1", "g7");
  186. }
  187. static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
  188. {
  189. unsigned long mask = (1UL << PG_dcache_dirty);
  190. __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
  191. "1:\n\t"
  192. "ldx [%2], %%g7\n\t"
  193. "srlx %%g7, %4, %%g1\n\t"
  194. "and %%g1, %3, %%g1\n\t"
  195. "cmp %%g1, %0\n\t"
  196. "bne,pn %%icc, 2f\n\t"
  197. " andn %%g7, %1, %%g1\n\t"
  198. "casx [%2], %%g7, %%g1\n\t"
  199. "cmp %%g7, %%g1\n\t"
  200. "membar #StoreLoad | #StoreStore\n\t"
  201. "bne,pn %%xcc, 1b\n\t"
  202. " nop\n"
  203. "2:"
  204. : /* no outputs */
  205. : "r" (cpu), "r" (mask), "r" (&page->flags),
  206. "i" (PG_dcache_cpu_mask),
  207. "i" (PG_dcache_cpu_shift)
  208. : "g1", "g7");
  209. }
  210. static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
  211. {
  212. unsigned long tsb_addr = (unsigned long) ent;
  213. if (tlb_type == cheetah_plus)
  214. tsb_addr = __pa(tsb_addr);
  215. __tsb_insert(tsb_addr, tag, pte);
  216. }
  217. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  218. {
  219. struct mm_struct *mm;
  220. struct page *page;
  221. unsigned long pfn;
  222. unsigned long pg_flags;
  223. pfn = pte_pfn(pte);
  224. if (pfn_valid(pfn) &&
  225. (page = pfn_to_page(pfn), page_mapping(page)) &&
  226. ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
  227. int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
  228. PG_dcache_cpu_mask);
  229. int this_cpu = get_cpu();
  230. /* This is just to optimize away some function calls
  231. * in the SMP case.
  232. */
  233. if (cpu == this_cpu)
  234. flush_dcache_page_impl(page);
  235. else
  236. smp_flush_dcache_page_impl(page, cpu);
  237. clear_dcache_dirty_cpu(page, cpu);
  238. put_cpu();
  239. }
  240. mm = vma->vm_mm;
  241. if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
  242. struct tsb *tsb;
  243. unsigned long tag;
  244. tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
  245. (mm->context.tsb_nentries - 1UL)];
  246. tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL;
  247. tsb_insert(tsb, tag, pte_val(pte));
  248. }
  249. }
  250. void flush_dcache_page(struct page *page)
  251. {
  252. struct address_space *mapping;
  253. int this_cpu;
  254. /* Do not bother with the expensive D-cache flush if it
  255. * is merely the zero page. The 'bigcore' testcase in GDB
  256. * causes this case to run millions of times.
  257. */
  258. if (page == ZERO_PAGE(0))
  259. return;
  260. this_cpu = get_cpu();
  261. mapping = page_mapping(page);
  262. if (mapping && !mapping_mapped(mapping)) {
  263. int dirty = test_bit(PG_dcache_dirty, &page->flags);
  264. if (dirty) {
  265. int dirty_cpu = dcache_dirty_cpu(page);
  266. if (dirty_cpu == this_cpu)
  267. goto out;
  268. smp_flush_dcache_page_impl(page, dirty_cpu);
  269. }
  270. set_dcache_dirty(page, this_cpu);
  271. } else {
  272. /* We could delay the flush for the !page_mapping
  273. * case too. But that case is for exec env/arg
  274. * pages and those are %99 certainly going to get
  275. * faulted into the tlb (and thus flushed) anyways.
  276. */
  277. flush_dcache_page_impl(page);
  278. }
  279. out:
  280. put_cpu();
  281. }
  282. void __kprobes flush_icache_range(unsigned long start, unsigned long end)
  283. {
  284. /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
  285. if (tlb_type == spitfire) {
  286. unsigned long kaddr;
  287. for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
  288. __flush_icache_page(__get_phys(kaddr));
  289. }
  290. }
  291. unsigned long page_to_pfn(struct page *page)
  292. {
  293. return (unsigned long) ((page - mem_map) + pfn_base);
  294. }
  295. struct page *pfn_to_page(unsigned long pfn)
  296. {
  297. return (mem_map + (pfn - pfn_base));
  298. }
  299. void show_mem(void)
  300. {
  301. printk("Mem-info:\n");
  302. show_free_areas();
  303. printk("Free swap: %6ldkB\n",
  304. nr_swap_pages << (PAGE_SHIFT-10));
  305. printk("%ld pages of RAM\n", num_physpages);
  306. printk("%d free pages\n", nr_free_pages());
  307. }
  308. void mmu_info(struct seq_file *m)
  309. {
  310. if (tlb_type == cheetah)
  311. seq_printf(m, "MMU Type\t: Cheetah\n");
  312. else if (tlb_type == cheetah_plus)
  313. seq_printf(m, "MMU Type\t: Cheetah+\n");
  314. else if (tlb_type == spitfire)
  315. seq_printf(m, "MMU Type\t: Spitfire\n");
  316. else if (tlb_type == hypervisor)
  317. seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
  318. else
  319. seq_printf(m, "MMU Type\t: ???\n");
  320. #ifdef CONFIG_DEBUG_DCFLUSH
  321. seq_printf(m, "DCPageFlushes\t: %d\n",
  322. atomic_read(&dcpage_flushes));
  323. #ifdef CONFIG_SMP
  324. seq_printf(m, "DCPageFlushesXC\t: %d\n",
  325. atomic_read(&dcpage_flushes_xcall));
  326. #endif /* CONFIG_SMP */
  327. #endif /* CONFIG_DEBUG_DCFLUSH */
  328. }
  329. struct linux_prom_translation {
  330. unsigned long virt;
  331. unsigned long size;
  332. unsigned long data;
  333. };
  334. /* Exported for kernel TLB miss handling in ktlb.S */
  335. struct linux_prom_translation prom_trans[512] __read_mostly;
  336. unsigned int prom_trans_ents __read_mostly;
  337. extern unsigned long prom_boot_page;
  338. extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
  339. extern int prom_get_mmu_ihandle(void);
  340. extern void register_prom_callbacks(void);
  341. /* Exported for SMP bootup purposes. */
  342. unsigned long kern_locked_tte_data;
  343. /*
  344. * Translate PROM's mapping we capture at boot time into physical address.
  345. * The second parameter is only set from prom_callback() invocations.
  346. */
  347. unsigned long prom_virt_to_phys(unsigned long promva, int *error)
  348. {
  349. int i;
  350. for (i = 0; i < prom_trans_ents; i++) {
  351. struct linux_prom_translation *p = &prom_trans[i];
  352. if (promva >= p->virt &&
  353. promva < (p->virt + p->size)) {
  354. unsigned long base = p->data & _PAGE_PADDR;
  355. if (error)
  356. *error = 0;
  357. return base + (promva & (8192 - 1));
  358. }
  359. }
  360. if (error)
  361. *error = 1;
  362. return 0UL;
  363. }
  364. /* The obp translations are saved based on 8k pagesize, since obp can
  365. * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
  366. * HI_OBP_ADDRESS range are handled in ktlb.S.
  367. */
  368. static inline int in_obp_range(unsigned long vaddr)
  369. {
  370. return (vaddr >= LOW_OBP_ADDRESS &&
  371. vaddr < HI_OBP_ADDRESS);
  372. }
  373. static int cmp_ptrans(const void *a, const void *b)
  374. {
  375. const struct linux_prom_translation *x = a, *y = b;
  376. if (x->virt > y->virt)
  377. return 1;
  378. if (x->virt < y->virt)
  379. return -1;
  380. return 0;
  381. }
  382. /* Read OBP translations property into 'prom_trans[]'. */
  383. static void __init read_obp_translations(void)
  384. {
  385. int n, node, ents, first, last, i;
  386. node = prom_finddevice("/virtual-memory");
  387. n = prom_getproplen(node, "translations");
  388. if (unlikely(n == 0 || n == -1)) {
  389. prom_printf("prom_mappings: Couldn't get size.\n");
  390. prom_halt();
  391. }
  392. if (unlikely(n > sizeof(prom_trans))) {
  393. prom_printf("prom_mappings: Size %Zd is too big.\n", n);
  394. prom_halt();
  395. }
  396. if ((n = prom_getproperty(node, "translations",
  397. (char *)&prom_trans[0],
  398. sizeof(prom_trans))) == -1) {
  399. prom_printf("prom_mappings: Couldn't get property.\n");
  400. prom_halt();
  401. }
  402. n = n / sizeof(struct linux_prom_translation);
  403. ents = n;
  404. sort(prom_trans, ents, sizeof(struct linux_prom_translation),
  405. cmp_ptrans, NULL);
  406. /* Now kick out all the non-OBP entries. */
  407. for (i = 0; i < ents; i++) {
  408. if (in_obp_range(prom_trans[i].virt))
  409. break;
  410. }
  411. first = i;
  412. for (; i < ents; i++) {
  413. if (!in_obp_range(prom_trans[i].virt))
  414. break;
  415. }
  416. last = i;
  417. for (i = 0; i < (last - first); i++) {
  418. struct linux_prom_translation *src = &prom_trans[i + first];
  419. struct linux_prom_translation *dest = &prom_trans[i];
  420. *dest = *src;
  421. }
  422. for (; i < ents; i++) {
  423. struct linux_prom_translation *dest = &prom_trans[i];
  424. dest->virt = dest->size = dest->data = 0x0UL;
  425. }
  426. prom_trans_ents = last - first;
  427. if (tlb_type == spitfire) {
  428. /* Clear diag TTE bits. */
  429. for (i = 0; i < prom_trans_ents; i++)
  430. prom_trans[i].data &= ~0x0003fe0000000000UL;
  431. }
  432. }
  433. static void __init hypervisor_tlb_lock(unsigned long vaddr,
  434. unsigned long pte,
  435. unsigned long mmu)
  436. {
  437. register unsigned long func asm("%o5");
  438. register unsigned long arg0 asm("%o0");
  439. register unsigned long arg1 asm("%o1");
  440. register unsigned long arg2 asm("%o2");
  441. register unsigned long arg3 asm("%o3");
  442. func = HV_FAST_MMU_MAP_PERM_ADDR;
  443. arg0 = vaddr;
  444. arg1 = 0;
  445. arg2 = pte;
  446. arg3 = mmu;
  447. __asm__ __volatile__("ta 0x80"
  448. : "=&r" (func), "=&r" (arg0),
  449. "=&r" (arg1), "=&r" (arg2),
  450. "=&r" (arg3)
  451. : "0" (func), "1" (arg0), "2" (arg1),
  452. "3" (arg2), "4" (arg3));
  453. }
  454. static void __init remap_kernel(void)
  455. {
  456. unsigned long phys_page, tte_vaddr, tte_data;
  457. int tlb_ent = sparc64_highest_locked_tlbent();
  458. tte_vaddr = (unsigned long) KERNBASE;
  459. phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  460. tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
  461. _PAGE_CP | _PAGE_CV | _PAGE_P |
  462. _PAGE_L | _PAGE_W));
  463. kern_locked_tte_data = tte_data;
  464. /* Now lock us into the TLBs via Hypervisor or OBP. */
  465. if (tlb_type == hypervisor) {
  466. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
  467. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
  468. if (bigkernel) {
  469. tte_vaddr += 0x400000;
  470. tte_data += 0x400000;
  471. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
  472. hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
  473. }
  474. } else {
  475. prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
  476. prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
  477. if (bigkernel) {
  478. tlb_ent -= 1;
  479. prom_dtlb_load(tlb_ent,
  480. tte_data + 0x400000,
  481. tte_vaddr + 0x400000);
  482. prom_itlb_load(tlb_ent,
  483. tte_data + 0x400000,
  484. tte_vaddr + 0x400000);
  485. }
  486. sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
  487. }
  488. if (tlb_type == cheetah_plus) {
  489. sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
  490. CTX_CHEETAH_PLUS_NUC);
  491. sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
  492. sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
  493. }
  494. }
  495. static void __init inherit_prom_mappings(void)
  496. {
  497. read_obp_translations();
  498. /* Now fixup OBP's idea about where we really are mapped. */
  499. prom_printf("Remapping the kernel... ");
  500. remap_kernel();
  501. prom_printf("done.\n");
  502. prom_printf("Registering callbacks... ");
  503. register_prom_callbacks();
  504. prom_printf("done.\n");
  505. }
  506. void prom_world(int enter)
  507. {
  508. if (!enter)
  509. set_fs((mm_segment_t) { get_thread_current_ds() });
  510. __asm__ __volatile__("flushw");
  511. }
  512. #ifdef DCACHE_ALIASING_POSSIBLE
  513. void __flush_dcache_range(unsigned long start, unsigned long end)
  514. {
  515. unsigned long va;
  516. if (tlb_type == spitfire) {
  517. int n = 0;
  518. for (va = start; va < end; va += 32) {
  519. spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
  520. if (++n >= 512)
  521. break;
  522. }
  523. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  524. start = __pa(start);
  525. end = __pa(end);
  526. for (va = start; va < end; va += 32)
  527. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  528. "membar #Sync"
  529. : /* no outputs */
  530. : "r" (va),
  531. "i" (ASI_DCACHE_INVALIDATE));
  532. }
  533. }
  534. #endif /* DCACHE_ALIASING_POSSIBLE */
  535. /* If not locked, zap it. */
  536. void __flush_tlb_all(void)
  537. {
  538. unsigned long pstate;
  539. int i;
  540. __asm__ __volatile__("flushw\n\t"
  541. "rdpr %%pstate, %0\n\t"
  542. "wrpr %0, %1, %%pstate"
  543. : "=r" (pstate)
  544. : "i" (PSTATE_IE));
  545. if (tlb_type == spitfire) {
  546. for (i = 0; i < 64; i++) {
  547. /* Spitfire Errata #32 workaround */
  548. /* NOTE: Always runs on spitfire, so no
  549. * cheetah+ page size encodings.
  550. */
  551. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  552. "flush %%g6"
  553. : /* No outputs */
  554. : "r" (0),
  555. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  556. if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
  557. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  558. "membar #Sync"
  559. : /* no outputs */
  560. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  561. spitfire_put_dtlb_data(i, 0x0UL);
  562. }
  563. /* Spitfire Errata #32 workaround */
  564. /* NOTE: Always runs on spitfire, so no
  565. * cheetah+ page size encodings.
  566. */
  567. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  568. "flush %%g6"
  569. : /* No outputs */
  570. : "r" (0),
  571. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  572. if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
  573. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  574. "membar #Sync"
  575. : /* no outputs */
  576. : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  577. spitfire_put_itlb_data(i, 0x0UL);
  578. }
  579. }
  580. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  581. cheetah_flush_dtlb_all();
  582. cheetah_flush_itlb_all();
  583. }
  584. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  585. : : "r" (pstate));
  586. }
  587. /* Caller does TLB context flushing on local CPU if necessary.
  588. * The caller also ensures that CTX_VALID(mm->context) is false.
  589. *
  590. * We must be careful about boundary cases so that we never
  591. * let the user have CTX 0 (nucleus) or we ever use a CTX
  592. * version of zero (and thus NO_CONTEXT would not be caught
  593. * by version mis-match tests in mmu_context.h).
  594. */
  595. void get_new_mmu_context(struct mm_struct *mm)
  596. {
  597. unsigned long ctx, new_ctx;
  598. unsigned long orig_pgsz_bits;
  599. spin_lock(&ctx_alloc_lock);
  600. orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
  601. ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
  602. new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
  603. if (new_ctx >= (1 << CTX_NR_BITS)) {
  604. new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
  605. if (new_ctx >= ctx) {
  606. int i;
  607. new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
  608. CTX_FIRST_VERSION;
  609. if (new_ctx == 1)
  610. new_ctx = CTX_FIRST_VERSION;
  611. /* Don't call memset, for 16 entries that's just
  612. * plain silly...
  613. */
  614. mmu_context_bmap[0] = 3;
  615. mmu_context_bmap[1] = 0;
  616. mmu_context_bmap[2] = 0;
  617. mmu_context_bmap[3] = 0;
  618. for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
  619. mmu_context_bmap[i + 0] = 0;
  620. mmu_context_bmap[i + 1] = 0;
  621. mmu_context_bmap[i + 2] = 0;
  622. mmu_context_bmap[i + 3] = 0;
  623. }
  624. goto out;
  625. }
  626. }
  627. mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
  628. new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
  629. out:
  630. tlb_context_cache = new_ctx;
  631. mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
  632. spin_unlock(&ctx_alloc_lock);
  633. }
  634. void sparc_ultra_dump_itlb(void)
  635. {
  636. int slot;
  637. if (tlb_type == spitfire) {
  638. printk ("Contents of itlb: ");
  639. for (slot = 0; slot < 14; slot++) printk (" ");
  640. printk ("%2x:%016lx,%016lx\n",
  641. 0,
  642. spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
  643. for (slot = 1; slot < 64; slot+=3) {
  644. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  645. slot,
  646. spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
  647. slot+1,
  648. spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
  649. slot+2,
  650. spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
  651. }
  652. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  653. printk ("Contents of itlb0:\n");
  654. for (slot = 0; slot < 16; slot+=2) {
  655. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  656. slot,
  657. cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
  658. slot+1,
  659. cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
  660. }
  661. printk ("Contents of itlb2:\n");
  662. for (slot = 0; slot < 128; slot+=2) {
  663. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  664. slot,
  665. cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
  666. slot+1,
  667. cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
  668. }
  669. }
  670. }
  671. void sparc_ultra_dump_dtlb(void)
  672. {
  673. int slot;
  674. if (tlb_type == spitfire) {
  675. printk ("Contents of dtlb: ");
  676. for (slot = 0; slot < 14; slot++) printk (" ");
  677. printk ("%2x:%016lx,%016lx\n", 0,
  678. spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
  679. for (slot = 1; slot < 64; slot+=3) {
  680. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  681. slot,
  682. spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
  683. slot+1,
  684. spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
  685. slot+2,
  686. spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
  687. }
  688. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  689. printk ("Contents of dtlb0:\n");
  690. for (slot = 0; slot < 16; slot+=2) {
  691. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  692. slot,
  693. cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
  694. slot+1,
  695. cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
  696. }
  697. printk ("Contents of dtlb2:\n");
  698. for (slot = 0; slot < 512; slot+=2) {
  699. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  700. slot,
  701. cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
  702. slot+1,
  703. cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
  704. }
  705. if (tlb_type == cheetah_plus) {
  706. printk ("Contents of dtlb3:\n");
  707. for (slot = 0; slot < 512; slot+=2) {
  708. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  709. slot,
  710. cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
  711. slot+1,
  712. cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
  713. }
  714. }
  715. }
  716. }
  717. extern unsigned long cmdline_memory_size;
  718. unsigned long __init bootmem_init(unsigned long *pages_avail)
  719. {
  720. unsigned long bootmap_size, start_pfn, end_pfn;
  721. unsigned long end_of_phys_memory = 0UL;
  722. unsigned long bootmap_pfn, bytes_avail, size;
  723. int i;
  724. #ifdef CONFIG_DEBUG_BOOTMEM
  725. prom_printf("bootmem_init: Scan pavail, ");
  726. #endif
  727. bytes_avail = 0UL;
  728. for (i = 0; i < pavail_ents; i++) {
  729. end_of_phys_memory = pavail[i].phys_addr +
  730. pavail[i].reg_size;
  731. bytes_avail += pavail[i].reg_size;
  732. if (cmdline_memory_size) {
  733. if (bytes_avail > cmdline_memory_size) {
  734. unsigned long slack = bytes_avail - cmdline_memory_size;
  735. bytes_avail -= slack;
  736. end_of_phys_memory -= slack;
  737. pavail[i].reg_size -= slack;
  738. if ((long)pavail[i].reg_size <= 0L) {
  739. pavail[i].phys_addr = 0xdeadbeefUL;
  740. pavail[i].reg_size = 0UL;
  741. pavail_ents = i;
  742. } else {
  743. pavail[i+1].reg_size = 0Ul;
  744. pavail[i+1].phys_addr = 0xdeadbeefUL;
  745. pavail_ents = i + 1;
  746. }
  747. break;
  748. }
  749. }
  750. }
  751. *pages_avail = bytes_avail >> PAGE_SHIFT;
  752. /* Start with page aligned address of last symbol in kernel
  753. * image. The kernel is hard mapped below PAGE_OFFSET in a
  754. * 4MB locked TLB translation.
  755. */
  756. start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
  757. bootmap_pfn = start_pfn;
  758. end_pfn = end_of_phys_memory >> PAGE_SHIFT;
  759. #ifdef CONFIG_BLK_DEV_INITRD
  760. /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
  761. if (sparc_ramdisk_image || sparc_ramdisk_image64) {
  762. unsigned long ramdisk_image = sparc_ramdisk_image ?
  763. sparc_ramdisk_image : sparc_ramdisk_image64;
  764. if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
  765. ramdisk_image -= KERNBASE;
  766. initrd_start = ramdisk_image + phys_base;
  767. initrd_end = initrd_start + sparc_ramdisk_size;
  768. if (initrd_end > end_of_phys_memory) {
  769. printk(KERN_CRIT "initrd extends beyond end of memory "
  770. "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
  771. initrd_end, end_of_phys_memory);
  772. initrd_start = 0;
  773. }
  774. if (initrd_start) {
  775. if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
  776. initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
  777. bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
  778. }
  779. }
  780. #endif
  781. /* Initialize the boot-time allocator. */
  782. max_pfn = max_low_pfn = end_pfn;
  783. min_low_pfn = pfn_base;
  784. #ifdef CONFIG_DEBUG_BOOTMEM
  785. prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
  786. min_low_pfn, bootmap_pfn, max_low_pfn);
  787. #endif
  788. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
  789. /* Now register the available physical memory with the
  790. * allocator.
  791. */
  792. for (i = 0; i < pavail_ents; i++) {
  793. #ifdef CONFIG_DEBUG_BOOTMEM
  794. prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
  795. i, pavail[i].phys_addr, pavail[i].reg_size);
  796. #endif
  797. free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
  798. }
  799. #ifdef CONFIG_BLK_DEV_INITRD
  800. if (initrd_start) {
  801. size = initrd_end - initrd_start;
  802. /* Resert the initrd image area. */
  803. #ifdef CONFIG_DEBUG_BOOTMEM
  804. prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
  805. initrd_start, initrd_end);
  806. #endif
  807. reserve_bootmem(initrd_start, size);
  808. *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
  809. initrd_start += PAGE_OFFSET;
  810. initrd_end += PAGE_OFFSET;
  811. }
  812. #endif
  813. /* Reserve the kernel text/data/bss. */
  814. #ifdef CONFIG_DEBUG_BOOTMEM
  815. prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
  816. #endif
  817. reserve_bootmem(kern_base, kern_size);
  818. *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
  819. /* Reserve the bootmem map. We do not account for it
  820. * in pages_avail because we will release that memory
  821. * in free_all_bootmem.
  822. */
  823. size = bootmap_size;
  824. #ifdef CONFIG_DEBUG_BOOTMEM
  825. prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
  826. (bootmap_pfn << PAGE_SHIFT), size);
  827. #endif
  828. reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
  829. *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
  830. return end_pfn;
  831. }
  832. #ifdef CONFIG_DEBUG_PAGEALLOC
  833. static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
  834. {
  835. unsigned long vstart = PAGE_OFFSET + pstart;
  836. unsigned long vend = PAGE_OFFSET + pend;
  837. unsigned long alloc_bytes = 0UL;
  838. if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
  839. prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
  840. vstart, vend);
  841. prom_halt();
  842. }
  843. while (vstart < vend) {
  844. unsigned long this_end, paddr = __pa(vstart);
  845. pgd_t *pgd = pgd_offset_k(vstart);
  846. pud_t *pud;
  847. pmd_t *pmd;
  848. pte_t *pte;
  849. pud = pud_offset(pgd, vstart);
  850. if (pud_none(*pud)) {
  851. pmd_t *new;
  852. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  853. alloc_bytes += PAGE_SIZE;
  854. pud_populate(&init_mm, pud, new);
  855. }
  856. pmd = pmd_offset(pud, vstart);
  857. if (!pmd_present(*pmd)) {
  858. pte_t *new;
  859. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  860. alloc_bytes += PAGE_SIZE;
  861. pmd_populate_kernel(&init_mm, pmd, new);
  862. }
  863. pte = pte_offset_kernel(pmd, vstart);
  864. this_end = (vstart + PMD_SIZE) & PMD_MASK;
  865. if (this_end > vend)
  866. this_end = vend;
  867. while (vstart < this_end) {
  868. pte_val(*pte) = (paddr | pgprot_val(prot));
  869. vstart += PAGE_SIZE;
  870. paddr += PAGE_SIZE;
  871. pte++;
  872. }
  873. }
  874. return alloc_bytes;
  875. }
  876. static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
  877. static int pall_ents __initdata;
  878. extern unsigned int kvmap_linear_patch[1];
  879. static void __init kernel_physical_mapping_init(void)
  880. {
  881. unsigned long i, mem_alloced = 0UL;
  882. read_obp_memory("reg", &pall[0], &pall_ents);
  883. for (i = 0; i < pall_ents; i++) {
  884. unsigned long phys_start, phys_end;
  885. phys_start = pall[i].phys_addr;
  886. phys_end = phys_start + pall[i].reg_size;
  887. mem_alloced += kernel_map_range(phys_start, phys_end,
  888. PAGE_KERNEL);
  889. }
  890. printk("Allocated %ld bytes for kernel page tables.\n",
  891. mem_alloced);
  892. kvmap_linear_patch[0] = 0x01000000; /* nop */
  893. flushi(&kvmap_linear_patch[0]);
  894. __flush_tlb_all();
  895. }
  896. void kernel_map_pages(struct page *page, int numpages, int enable)
  897. {
  898. unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
  899. unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
  900. kernel_map_range(phys_start, phys_end,
  901. (enable ? PAGE_KERNEL : __pgprot(0)));
  902. flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
  903. PAGE_OFFSET + phys_end);
  904. /* we should perform an IPI and flush all tlbs,
  905. * but that can deadlock->flush only current cpu.
  906. */
  907. __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
  908. PAGE_OFFSET + phys_end);
  909. }
  910. #endif
  911. unsigned long __init find_ecache_flush_span(unsigned long size)
  912. {
  913. int i;
  914. for (i = 0; i < pavail_ents; i++) {
  915. if (pavail[i].reg_size >= size)
  916. return pavail[i].phys_addr;
  917. }
  918. return ~0UL;
  919. }
  920. static void __init tsb_phys_patch(void)
  921. {
  922. struct tsb_ldquad_phys_patch_entry *pquad;
  923. struct tsb_phys_patch_entry *p;
  924. pquad = &__tsb_ldquad_phys_patch;
  925. while (pquad < &__tsb_ldquad_phys_patch_end) {
  926. unsigned long addr = pquad->addr;
  927. if (tlb_type == hypervisor)
  928. *(unsigned int *) addr = pquad->sun4v_insn;
  929. else
  930. *(unsigned int *) addr = pquad->sun4u_insn;
  931. wmb();
  932. __asm__ __volatile__("flush %0"
  933. : /* no outputs */
  934. : "r" (addr));
  935. pquad++;
  936. }
  937. p = &__tsb_phys_patch;
  938. while (p < &__tsb_phys_patch_end) {
  939. unsigned long addr = p->addr;
  940. *(unsigned int *) addr = p->insn;
  941. wmb();
  942. __asm__ __volatile__("flush %0"
  943. : /* no outputs */
  944. : "r" (addr));
  945. p++;
  946. }
  947. }
  948. /* paging_init() sets up the page tables */
  949. extern void cheetah_ecache_flush_init(void);
  950. extern void sun4v_patch_tlb_handlers(void);
  951. static unsigned long last_valid_pfn;
  952. pgd_t swapper_pg_dir[2048];
  953. void __init paging_init(void)
  954. {
  955. unsigned long end_pfn, pages_avail, shift;
  956. unsigned long real_end, i;
  957. kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  958. kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
  959. if (tlb_type == cheetah_plus ||
  960. tlb_type == hypervisor)
  961. tsb_phys_patch();
  962. if (tlb_type == hypervisor)
  963. sun4v_patch_tlb_handlers();
  964. /* Find available physical memory... */
  965. read_obp_memory("available", &pavail[0], &pavail_ents);
  966. phys_base = 0xffffffffffffffffUL;
  967. for (i = 0; i < pavail_ents; i++)
  968. phys_base = min(phys_base, pavail[i].phys_addr);
  969. pfn_base = phys_base >> PAGE_SHIFT;
  970. set_bit(0, mmu_context_bmap);
  971. shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
  972. real_end = (unsigned long)_end;
  973. if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
  974. bigkernel = 1;
  975. if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
  976. prom_printf("paging_init: Kernel > 8MB, too large.\n");
  977. prom_halt();
  978. }
  979. /* Set kernel pgd to upper alias so physical page computations
  980. * work.
  981. */
  982. init_mm.pgd += ((shift) / (sizeof(pgd_t)));
  983. memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
  984. /* Now can init the kernel/bad page tables. */
  985. pud_set(pud_offset(&swapper_pg_dir[0], 0),
  986. swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
  987. inherit_prom_mappings();
  988. /* Ok, we can use our TLB miss and window trap handlers safely. */
  989. setup_tba();
  990. __flush_tlb_all();
  991. /* Setup bootmem... */
  992. pages_avail = 0;
  993. last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
  994. #ifdef CONFIG_DEBUG_PAGEALLOC
  995. kernel_physical_mapping_init();
  996. #endif
  997. {
  998. unsigned long zones_size[MAX_NR_ZONES];
  999. unsigned long zholes_size[MAX_NR_ZONES];
  1000. unsigned long npages;
  1001. int znum;
  1002. for (znum = 0; znum < MAX_NR_ZONES; znum++)
  1003. zones_size[znum] = zholes_size[znum] = 0;
  1004. npages = end_pfn - pfn_base;
  1005. zones_size[ZONE_DMA] = npages;
  1006. zholes_size[ZONE_DMA] = npages - pages_avail;
  1007. free_area_init_node(0, &contig_page_data, zones_size,
  1008. phys_base >> PAGE_SHIFT, zholes_size);
  1009. }
  1010. device_scan();
  1011. }
  1012. static void __init taint_real_pages(void)
  1013. {
  1014. int i;
  1015. read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
  1016. /* Find changes discovered in the physmem available rescan and
  1017. * reserve the lost portions in the bootmem maps.
  1018. */
  1019. for (i = 0; i < pavail_ents; i++) {
  1020. unsigned long old_start, old_end;
  1021. old_start = pavail[i].phys_addr;
  1022. old_end = old_start +
  1023. pavail[i].reg_size;
  1024. while (old_start < old_end) {
  1025. int n;
  1026. for (n = 0; pavail_rescan_ents; n++) {
  1027. unsigned long new_start, new_end;
  1028. new_start = pavail_rescan[n].phys_addr;
  1029. new_end = new_start +
  1030. pavail_rescan[n].reg_size;
  1031. if (new_start <= old_start &&
  1032. new_end >= (old_start + PAGE_SIZE)) {
  1033. set_bit(old_start >> 22,
  1034. sparc64_valid_addr_bitmap);
  1035. goto do_next_page;
  1036. }
  1037. }
  1038. reserve_bootmem(old_start, PAGE_SIZE);
  1039. do_next_page:
  1040. old_start += PAGE_SIZE;
  1041. }
  1042. }
  1043. }
  1044. void __init mem_init(void)
  1045. {
  1046. unsigned long codepages, datapages, initpages;
  1047. unsigned long addr, last;
  1048. int i;
  1049. i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
  1050. i += 1;
  1051. sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
  1052. if (sparc64_valid_addr_bitmap == NULL) {
  1053. prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
  1054. prom_halt();
  1055. }
  1056. memset(sparc64_valid_addr_bitmap, 0, i << 3);
  1057. addr = PAGE_OFFSET + kern_base;
  1058. last = PAGE_ALIGN(kern_size) + addr;
  1059. while (addr < last) {
  1060. set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
  1061. addr += PAGE_SIZE;
  1062. }
  1063. taint_real_pages();
  1064. max_mapnr = last_valid_pfn - pfn_base;
  1065. high_memory = __va(last_valid_pfn << PAGE_SHIFT);
  1066. #ifdef CONFIG_DEBUG_BOOTMEM
  1067. prom_printf("mem_init: Calling free_all_bootmem().\n");
  1068. #endif
  1069. totalram_pages = num_physpages = free_all_bootmem() - 1;
  1070. /*
  1071. * Set up the zero page, mark it reserved, so that page count
  1072. * is not manipulated when freeing the page from user ptes.
  1073. */
  1074. mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
  1075. if (mem_map_zero == NULL) {
  1076. prom_printf("paging_init: Cannot alloc zero page.\n");
  1077. prom_halt();
  1078. }
  1079. SetPageReserved(mem_map_zero);
  1080. codepages = (((unsigned long) _etext) - ((unsigned long) _start));
  1081. codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
  1082. datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
  1083. datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
  1084. initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
  1085. initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
  1086. printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
  1087. nr_free_pages() << (PAGE_SHIFT-10),
  1088. codepages << (PAGE_SHIFT-10),
  1089. datapages << (PAGE_SHIFT-10),
  1090. initpages << (PAGE_SHIFT-10),
  1091. PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
  1092. if (tlb_type == cheetah || tlb_type == cheetah_plus)
  1093. cheetah_ecache_flush_init();
  1094. }
  1095. void free_initmem(void)
  1096. {
  1097. unsigned long addr, initend;
  1098. /*
  1099. * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
  1100. */
  1101. addr = PAGE_ALIGN((unsigned long)(__init_begin));
  1102. initend = (unsigned long)(__init_end) & PAGE_MASK;
  1103. for (; addr < initend; addr += PAGE_SIZE) {
  1104. unsigned long page;
  1105. struct page *p;
  1106. page = (addr +
  1107. ((unsigned long) __va(kern_base)) -
  1108. ((unsigned long) KERNBASE));
  1109. memset((void *)addr, 0xcc, PAGE_SIZE);
  1110. p = virt_to_page(page);
  1111. ClearPageReserved(p);
  1112. set_page_count(p, 1);
  1113. __free_page(p);
  1114. num_physpages++;
  1115. totalram_pages++;
  1116. }
  1117. }
  1118. #ifdef CONFIG_BLK_DEV_INITRD
  1119. void free_initrd_mem(unsigned long start, unsigned long end)
  1120. {
  1121. if (start < end)
  1122. printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  1123. for (; start < end; start += PAGE_SIZE) {
  1124. struct page *p = virt_to_page(start);
  1125. ClearPageReserved(p);
  1126. set_page_count(p, 1);
  1127. __free_page(p);
  1128. num_physpages++;
  1129. totalram_pages++;
  1130. }
  1131. }
  1132. #endif