init.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794
  1. /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
  2. * arch/sparc64/mm/init.c
  3. *
  4. * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. #include <linux/config.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/string.h>
  11. #include <linux/init.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/slab.h>
  16. #include <linux/initrd.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kprobes.h>
  22. #include <asm/head.h>
  23. #include <asm/system.h>
  24. #include <asm/page.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/oplib.h>
  28. #include <asm/iommu.h>
  29. #include <asm/io.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/tlbflush.h>
  33. #include <asm/dma.h>
  34. #include <asm/starfire.h>
  35. #include <asm/tlb.h>
  36. #include <asm/spitfire.h>
  37. #include <asm/sections.h>
  38. extern void device_scan(void);
  39. struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
  40. unsigned long *sparc64_valid_addr_bitmap;
  41. /* Ugly, but necessary... -DaveM */
  42. unsigned long phys_base;
  43. unsigned long kern_base;
  44. unsigned long kern_size;
  45. unsigned long pfn_base;
  46. /* This is even uglier. We have a problem where the kernel may not be
  47. * located at phys_base. However, initial __alloc_bootmem() calls need to
  48. * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
  49. * those page mappings wont work. Things are ok after inherit_prom_mappings
  50. * is called though. Dave says he'll clean this up some other time.
  51. * -- BenC
  52. */
  53. static unsigned long bootmap_base;
  54. /* get_new_mmu_context() uses "cache + 1". */
  55. DEFINE_SPINLOCK(ctx_alloc_lock);
  56. unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
  57. #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
  58. unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
  59. /* References to special section boundaries */
  60. extern char _start[], _end[];
  61. /* Initial ramdisk setup */
  62. extern unsigned long sparc_ramdisk_image64;
  63. extern unsigned int sparc_ramdisk_image;
  64. extern unsigned int sparc_ramdisk_size;
  65. struct page *mem_map_zero;
  66. int bigkernel = 0;
  67. /* XXX Tune this... */
  68. #define PGT_CACHE_LOW 25
  69. #define PGT_CACHE_HIGH 50
  70. void check_pgt_cache(void)
  71. {
  72. preempt_disable();
  73. if (pgtable_cache_size > PGT_CACHE_HIGH) {
  74. do {
  75. if (pgd_quicklist)
  76. free_pgd_slow(get_pgd_fast());
  77. if (pte_quicklist[0])
  78. free_pte_slow(pte_alloc_one_fast(NULL, 0));
  79. if (pte_quicklist[1])
  80. free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
  81. } while (pgtable_cache_size > PGT_CACHE_LOW);
  82. }
  83. preempt_enable();
  84. }
  85. #ifdef CONFIG_DEBUG_DCFLUSH
  86. atomic_t dcpage_flushes = ATOMIC_INIT(0);
  87. #ifdef CONFIG_SMP
  88. atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
  89. #endif
  90. #endif
  91. __inline__ void flush_dcache_page_impl(struct page *page)
  92. {
  93. #ifdef CONFIG_DEBUG_DCFLUSH
  94. atomic_inc(&dcpage_flushes);
  95. #endif
  96. #ifdef DCACHE_ALIASING_POSSIBLE
  97. __flush_dcache_page(page_address(page),
  98. ((tlb_type == spitfire) &&
  99. page_mapping(page) != NULL));
  100. #else
  101. if (page_mapping(page) != NULL &&
  102. tlb_type == spitfire)
  103. __flush_icache_page(__pa(page_address(page)));
  104. #endif
  105. }
  106. #define PG_dcache_dirty PG_arch_1
  107. #define PG_dcache_cpu_shift 24
  108. #define PG_dcache_cpu_mask (256 - 1)
  109. #if NR_CPUS > 256
  110. #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
  111. #endif
  112. #define dcache_dirty_cpu(page) \
  113. (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
  114. static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
  115. {
  116. unsigned long mask = this_cpu;
  117. unsigned long non_cpu_bits;
  118. non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
  119. mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
  120. __asm__ __volatile__("1:\n\t"
  121. "ldx [%2], %%g7\n\t"
  122. "and %%g7, %1, %%g1\n\t"
  123. "or %%g1, %0, %%g1\n\t"
  124. "casx [%2], %%g7, %%g1\n\t"
  125. "cmp %%g7, %%g1\n\t"
  126. "membar #StoreLoad | #StoreStore\n\t"
  127. "bne,pn %%xcc, 1b\n\t"
  128. " nop"
  129. : /* no outputs */
  130. : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
  131. : "g1", "g7");
  132. }
  133. static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
  134. {
  135. unsigned long mask = (1UL << PG_dcache_dirty);
  136. __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
  137. "1:\n\t"
  138. "ldx [%2], %%g7\n\t"
  139. "srlx %%g7, %4, %%g1\n\t"
  140. "and %%g1, %3, %%g1\n\t"
  141. "cmp %%g1, %0\n\t"
  142. "bne,pn %%icc, 2f\n\t"
  143. " andn %%g7, %1, %%g1\n\t"
  144. "casx [%2], %%g7, %%g1\n\t"
  145. "cmp %%g7, %%g1\n\t"
  146. "membar #StoreLoad | #StoreStore\n\t"
  147. "bne,pn %%xcc, 1b\n\t"
  148. " nop\n"
  149. "2:"
  150. : /* no outputs */
  151. : "r" (cpu), "r" (mask), "r" (&page->flags),
  152. "i" (PG_dcache_cpu_mask),
  153. "i" (PG_dcache_cpu_shift)
  154. : "g1", "g7");
  155. }
  156. extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
  157. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  158. {
  159. struct page *page;
  160. unsigned long pfn;
  161. unsigned long pg_flags;
  162. pfn = pte_pfn(pte);
  163. if (pfn_valid(pfn) &&
  164. (page = pfn_to_page(pfn), page_mapping(page)) &&
  165. ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
  166. int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
  167. PG_dcache_cpu_mask);
  168. int this_cpu = get_cpu();
  169. /* This is just to optimize away some function calls
  170. * in the SMP case.
  171. */
  172. if (cpu == this_cpu)
  173. flush_dcache_page_impl(page);
  174. else
  175. smp_flush_dcache_page_impl(page, cpu);
  176. clear_dcache_dirty_cpu(page, cpu);
  177. put_cpu();
  178. }
  179. if (get_thread_fault_code())
  180. __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
  181. address, pte, get_thread_fault_code());
  182. }
  183. void flush_dcache_page(struct page *page)
  184. {
  185. struct address_space *mapping;
  186. int this_cpu;
  187. /* Do not bother with the expensive D-cache flush if it
  188. * is merely the zero page. The 'bigcore' testcase in GDB
  189. * causes this case to run millions of times.
  190. */
  191. if (page == ZERO_PAGE(0))
  192. return;
  193. this_cpu = get_cpu();
  194. mapping = page_mapping(page);
  195. if (mapping && !mapping_mapped(mapping)) {
  196. int dirty = test_bit(PG_dcache_dirty, &page->flags);
  197. if (dirty) {
  198. int dirty_cpu = dcache_dirty_cpu(page);
  199. if (dirty_cpu == this_cpu)
  200. goto out;
  201. smp_flush_dcache_page_impl(page, dirty_cpu);
  202. }
  203. set_dcache_dirty(page, this_cpu);
  204. } else {
  205. /* We could delay the flush for the !page_mapping
  206. * case too. But that case is for exec env/arg
  207. * pages and those are %99 certainly going to get
  208. * faulted into the tlb (and thus flushed) anyways.
  209. */
  210. flush_dcache_page_impl(page);
  211. }
  212. out:
  213. put_cpu();
  214. }
  215. void __kprobes flush_icache_range(unsigned long start, unsigned long end)
  216. {
  217. /* Cheetah has coherent I-cache. */
  218. if (tlb_type == spitfire) {
  219. unsigned long kaddr;
  220. for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
  221. __flush_icache_page(__get_phys(kaddr));
  222. }
  223. }
  224. unsigned long page_to_pfn(struct page *page)
  225. {
  226. return (unsigned long) ((page - mem_map) + pfn_base);
  227. }
  228. struct page *pfn_to_page(unsigned long pfn)
  229. {
  230. return (mem_map + (pfn - pfn_base));
  231. }
  232. void show_mem(void)
  233. {
  234. printk("Mem-info:\n");
  235. show_free_areas();
  236. printk("Free swap: %6ldkB\n",
  237. nr_swap_pages << (PAGE_SHIFT-10));
  238. printk("%ld pages of RAM\n", num_physpages);
  239. printk("%d free pages\n", nr_free_pages());
  240. printk("%d pages in page table cache\n",pgtable_cache_size);
  241. }
  242. void mmu_info(struct seq_file *m)
  243. {
  244. if (tlb_type == cheetah)
  245. seq_printf(m, "MMU Type\t: Cheetah\n");
  246. else if (tlb_type == cheetah_plus)
  247. seq_printf(m, "MMU Type\t: Cheetah+\n");
  248. else if (tlb_type == spitfire)
  249. seq_printf(m, "MMU Type\t: Spitfire\n");
  250. else
  251. seq_printf(m, "MMU Type\t: ???\n");
  252. #ifdef CONFIG_DEBUG_DCFLUSH
  253. seq_printf(m, "DCPageFlushes\t: %d\n",
  254. atomic_read(&dcpage_flushes));
  255. #ifdef CONFIG_SMP
  256. seq_printf(m, "DCPageFlushesXC\t: %d\n",
  257. atomic_read(&dcpage_flushes_xcall));
  258. #endif /* CONFIG_SMP */
  259. #endif /* CONFIG_DEBUG_DCFLUSH */
  260. }
  261. struct linux_prom_translation {
  262. unsigned long virt;
  263. unsigned long size;
  264. unsigned long data;
  265. };
  266. extern unsigned long prom_boot_page;
  267. extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
  268. extern int prom_get_mmu_ihandle(void);
  269. extern void register_prom_callbacks(void);
  270. /* Exported for SMP bootup purposes. */
  271. unsigned long kern_locked_tte_data;
  272. void __init early_pgtable_allocfail(char *type)
  273. {
  274. prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
  275. prom_halt();
  276. }
  277. #define BASE_PAGE_SIZE 8192
  278. static pmd_t *prompmd;
  279. /*
  280. * Translate PROM's mapping we capture at boot time into physical address.
  281. * The second parameter is only set from prom_callback() invocations.
  282. */
  283. unsigned long prom_virt_to_phys(unsigned long promva, int *error)
  284. {
  285. pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
  286. pte_t *ptep;
  287. unsigned long base;
  288. if (pmd_none(*pmdp)) {
  289. if (error)
  290. *error = 1;
  291. return(0);
  292. }
  293. ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
  294. if (!pte_present(*ptep)) {
  295. if (error)
  296. *error = 1;
  297. return(0);
  298. }
  299. if (error) {
  300. *error = 0;
  301. return(pte_val(*ptep));
  302. }
  303. base = pte_val(*ptep) & _PAGE_PADDR;
  304. return(base + (promva & (BASE_PAGE_SIZE - 1)));
  305. }
  306. static void inherit_prom_mappings(void)
  307. {
  308. struct linux_prom_translation *trans;
  309. unsigned long phys_page, tte_vaddr, tte_data;
  310. void (*remap_func)(unsigned long, unsigned long, int);
  311. pmd_t *pmdp;
  312. pte_t *ptep;
  313. int node, n, i, tsz;
  314. extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
  315. node = prom_finddevice("/virtual-memory");
  316. n = prom_getproplen(node, "translations");
  317. if (n == 0 || n == -1) {
  318. prom_printf("Couldn't get translation property\n");
  319. prom_halt();
  320. }
  321. n += 5 * sizeof(struct linux_prom_translation);
  322. for (tsz = 1; tsz < n; tsz <<= 1)
  323. /* empty */;
  324. trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
  325. if (trans == NULL) {
  326. prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
  327. prom_halt();
  328. }
  329. memset(trans, 0, tsz);
  330. if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
  331. prom_printf("Couldn't get translation property\n");
  332. prom_halt();
  333. }
  334. n = n / sizeof(*trans);
  335. /*
  336. * The obp translations are saved based on 8k pagesize, since obp can
  337. * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
  338. * ie obp range, are handled in entry.S and do not use the vpte scheme
  339. * (see rant in inherit_locked_prom_mappings()).
  340. */
  341. #define OBP_PMD_SIZE 2048
  342. prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
  343. if (prompmd == NULL)
  344. early_pgtable_allocfail("pmd");
  345. memset(prompmd, 0, OBP_PMD_SIZE);
  346. for (i = 0; i < n; i++) {
  347. unsigned long vaddr;
  348. if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
  349. for (vaddr = trans[i].virt;
  350. ((vaddr < trans[i].virt + trans[i].size) &&
  351. (vaddr < HI_OBP_ADDRESS));
  352. vaddr += BASE_PAGE_SIZE) {
  353. unsigned long val;
  354. pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
  355. if (pmd_none(*pmdp)) {
  356. ptep = __alloc_bootmem(BASE_PAGE_SIZE,
  357. BASE_PAGE_SIZE,
  358. bootmap_base);
  359. if (ptep == NULL)
  360. early_pgtable_allocfail("pte");
  361. memset(ptep, 0, BASE_PAGE_SIZE);
  362. pmd_set(pmdp, ptep);
  363. }
  364. ptep = (pte_t *)__pmd_page(*pmdp) +
  365. ((vaddr >> 13) & 0x3ff);
  366. val = trans[i].data;
  367. /* Clear diag TTE bits. */
  368. if (tlb_type == spitfire)
  369. val &= ~0x0003fe0000000000UL;
  370. set_pte_at(&init_mm, vaddr,
  371. ptep, __pte(val | _PAGE_MODIFIED));
  372. trans[i].data += BASE_PAGE_SIZE;
  373. }
  374. }
  375. }
  376. phys_page = __pa(prompmd);
  377. obp_iaddr_patch[0] |= (phys_page >> 10);
  378. obp_iaddr_patch[1] |= (phys_page & 0x3ff);
  379. flushi((long)&obp_iaddr_patch[0]);
  380. obp_daddr_patch[0] |= (phys_page >> 10);
  381. obp_daddr_patch[1] |= (phys_page & 0x3ff);
  382. flushi((long)&obp_daddr_patch[0]);
  383. /* Now fixup OBP's idea about where we really are mapped. */
  384. prom_printf("Remapping the kernel... ");
  385. /* Spitfire Errata #32 workaround */
  386. /* NOTE: Using plain zero for the context value is
  387. * correct here, we are not using the Linux trap
  388. * tables yet so we should not use the special
  389. * UltraSPARC-III+ page size encodings yet.
  390. */
  391. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  392. "flush %%g6"
  393. : /* No outputs */
  394. : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  395. switch (tlb_type) {
  396. default:
  397. case spitfire:
  398. phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
  399. break;
  400. case cheetah:
  401. case cheetah_plus:
  402. phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
  403. break;
  404. };
  405. phys_page &= _PAGE_PADDR;
  406. phys_page += ((unsigned long)&prom_boot_page -
  407. (unsigned long)KERNBASE);
  408. if (tlb_type == spitfire) {
  409. /* Lock this into i/d tlb entry 59 */
  410. __asm__ __volatile__(
  411. "stxa %%g0, [%2] %3\n\t"
  412. "stxa %0, [%1] %4\n\t"
  413. "membar #Sync\n\t"
  414. "flush %%g6\n\t"
  415. "stxa %%g0, [%2] %5\n\t"
  416. "stxa %0, [%1] %6\n\t"
  417. "membar #Sync\n\t"
  418. "flush %%g6"
  419. : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
  420. _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
  421. "r" (59 << 3), "r" (TLB_TAG_ACCESS),
  422. "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
  423. "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
  424. : "memory");
  425. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  426. /* Lock this into i/d tlb-0 entry 11 */
  427. __asm__ __volatile__(
  428. "stxa %%g0, [%2] %3\n\t"
  429. "stxa %0, [%1] %4\n\t"
  430. "membar #Sync\n\t"
  431. "flush %%g6\n\t"
  432. "stxa %%g0, [%2] %5\n\t"
  433. "stxa %0, [%1] %6\n\t"
  434. "membar #Sync\n\t"
  435. "flush %%g6"
  436. : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
  437. _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
  438. "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
  439. "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
  440. "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
  441. : "memory");
  442. } else {
  443. /* Implement me :-) */
  444. BUG();
  445. }
  446. tte_vaddr = (unsigned long) KERNBASE;
  447. /* Spitfire Errata #32 workaround */
  448. /* NOTE: Using plain zero for the context value is
  449. * correct here, we are not using the Linux trap
  450. * tables yet so we should not use the special
  451. * UltraSPARC-III+ page size encodings yet.
  452. */
  453. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  454. "flush %%g6"
  455. : /* No outputs */
  456. : "r" (0),
  457. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  458. if (tlb_type == spitfire)
  459. tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
  460. else
  461. tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
  462. kern_locked_tte_data = tte_data;
  463. remap_func = (void *) ((unsigned long) &prom_remap -
  464. (unsigned long) &prom_boot_page);
  465. /* Spitfire Errata #32 workaround */
  466. /* NOTE: Using plain zero for the context value is
  467. * correct here, we are not using the Linux trap
  468. * tables yet so we should not use the special
  469. * UltraSPARC-III+ page size encodings yet.
  470. */
  471. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  472. "flush %%g6"
  473. : /* No outputs */
  474. : "r" (0),
  475. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  476. remap_func((tlb_type == spitfire ?
  477. (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
  478. (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
  479. (unsigned long) KERNBASE,
  480. prom_get_mmu_ihandle());
  481. if (bigkernel)
  482. remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
  483. (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
  484. /* Flush out that temporary mapping. */
  485. spitfire_flush_dtlb_nucleus_page(0x0);
  486. spitfire_flush_itlb_nucleus_page(0x0);
  487. /* Now lock us back into the TLBs via OBP. */
  488. prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
  489. prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
  490. if (bigkernel) {
  491. prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
  492. tte_vaddr + 0x400000);
  493. prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
  494. tte_vaddr + 0x400000);
  495. }
  496. /* Re-read translations property. */
  497. if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
  498. prom_printf("Couldn't get translation property\n");
  499. prom_halt();
  500. }
  501. n = n / sizeof(*trans);
  502. for (i = 0; i < n; i++) {
  503. unsigned long vaddr = trans[i].virt;
  504. unsigned long size = trans[i].size;
  505. if (vaddr < 0xf0000000UL) {
  506. unsigned long avoid_start = (unsigned long) KERNBASE;
  507. unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
  508. if (bigkernel)
  509. avoid_end += (4 * 1024 * 1024);
  510. if (vaddr < avoid_start) {
  511. unsigned long top = vaddr + size;
  512. if (top > avoid_start)
  513. top = avoid_start;
  514. prom_unmap(top - vaddr, vaddr);
  515. }
  516. if ((vaddr + size) > avoid_end) {
  517. unsigned long bottom = vaddr;
  518. if (bottom < avoid_end)
  519. bottom = avoid_end;
  520. prom_unmap((vaddr + size) - bottom, bottom);
  521. }
  522. }
  523. }
  524. prom_printf("done.\n");
  525. register_prom_callbacks();
  526. }
  527. /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
  528. * upwards as reserved for use by the firmware (I wonder if this
  529. * will be the same on Cheetah...). We use this virtual address
  530. * range for the VPTE table mappings of the nucleus so we need
  531. * to zap them when we enter the PROM. -DaveM
  532. */
  533. static void __flush_nucleus_vptes(void)
  534. {
  535. unsigned long prom_reserved_base = 0xfffffffc00000000UL;
  536. int i;
  537. /* Only DTLB must be checked for VPTE entries. */
  538. if (tlb_type == spitfire) {
  539. for (i = 0; i < 63; i++) {
  540. unsigned long tag;
  541. /* Spitfire Errata #32 workaround */
  542. /* NOTE: Always runs on spitfire, so no cheetah+
  543. * page size encodings.
  544. */
  545. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  546. "flush %%g6"
  547. : /* No outputs */
  548. : "r" (0),
  549. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  550. tag = spitfire_get_dtlb_tag(i);
  551. if (((tag & ~(PAGE_MASK)) == 0) &&
  552. ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
  553. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  554. "membar #Sync"
  555. : /* no outputs */
  556. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  557. spitfire_put_dtlb_data(i, 0x0UL);
  558. }
  559. }
  560. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  561. for (i = 0; i < 512; i++) {
  562. unsigned long tag = cheetah_get_dtlb_tag(i, 2);
  563. if ((tag & ~PAGE_MASK) == 0 &&
  564. (tag & PAGE_MASK) >= prom_reserved_base) {
  565. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  566. "membar #Sync"
  567. : /* no outputs */
  568. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  569. cheetah_put_dtlb_data(i, 0x0UL, 2);
  570. }
  571. if (tlb_type != cheetah_plus)
  572. continue;
  573. tag = cheetah_get_dtlb_tag(i, 3);
  574. if ((tag & ~PAGE_MASK) == 0 &&
  575. (tag & PAGE_MASK) >= prom_reserved_base) {
  576. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  577. "membar #Sync"
  578. : /* no outputs */
  579. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  580. cheetah_put_dtlb_data(i, 0x0UL, 3);
  581. }
  582. }
  583. } else {
  584. /* Implement me :-) */
  585. BUG();
  586. }
  587. }
  588. static int prom_ditlb_set;
  589. struct prom_tlb_entry {
  590. int tlb_ent;
  591. unsigned long tlb_tag;
  592. unsigned long tlb_data;
  593. };
  594. struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
  595. void prom_world(int enter)
  596. {
  597. unsigned long pstate;
  598. int i;
  599. if (!enter)
  600. set_fs((mm_segment_t) { get_thread_current_ds() });
  601. if (!prom_ditlb_set)
  602. return;
  603. /* Make sure the following runs atomically. */
  604. __asm__ __volatile__("flushw\n\t"
  605. "rdpr %%pstate, %0\n\t"
  606. "wrpr %0, %1, %%pstate"
  607. : "=r" (pstate)
  608. : "i" (PSTATE_IE));
  609. if (enter) {
  610. /* Kick out nucleus VPTEs. */
  611. __flush_nucleus_vptes();
  612. /* Install PROM world. */
  613. for (i = 0; i < 16; i++) {
  614. if (prom_dtlb[i].tlb_ent != -1) {
  615. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  616. "membar #Sync"
  617. : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
  618. "i" (ASI_DMMU));
  619. if (tlb_type == spitfire)
  620. spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
  621. prom_dtlb[i].tlb_data);
  622. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  623. cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
  624. prom_dtlb[i].tlb_data);
  625. }
  626. if (prom_itlb[i].tlb_ent != -1) {
  627. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  628. "membar #Sync"
  629. : : "r" (prom_itlb[i].tlb_tag),
  630. "r" (TLB_TAG_ACCESS),
  631. "i" (ASI_IMMU));
  632. if (tlb_type == spitfire)
  633. spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
  634. prom_itlb[i].tlb_data);
  635. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  636. cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
  637. prom_itlb[i].tlb_data);
  638. }
  639. }
  640. } else {
  641. for (i = 0; i < 16; i++) {
  642. if (prom_dtlb[i].tlb_ent != -1) {
  643. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  644. "membar #Sync"
  645. : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  646. if (tlb_type == spitfire)
  647. spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
  648. else
  649. cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
  650. }
  651. if (prom_itlb[i].tlb_ent != -1) {
  652. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  653. "membar #Sync"
  654. : : "r" (TLB_TAG_ACCESS),
  655. "i" (ASI_IMMU));
  656. if (tlb_type == spitfire)
  657. spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
  658. else
  659. cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
  660. }
  661. }
  662. }
  663. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  664. : : "r" (pstate));
  665. }
  666. void inherit_locked_prom_mappings(int save_p)
  667. {
  668. int i;
  669. int dtlb_seen = 0;
  670. int itlb_seen = 0;
  671. /* Fucking losing PROM has more mappings in the TLB, but
  672. * it (conveniently) fails to mention any of these in the
  673. * translations property. The only ones that matter are
  674. * the locked PROM tlb entries, so we impose the following
  675. * irrecovable rule on the PROM, it is allowed 8 locked
  676. * entries in the ITLB and 8 in the DTLB.
  677. *
  678. * Supposedly the upper 16GB of the address space is
  679. * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
  680. * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
  681. * used between the client program and the firmware on sun5
  682. * systems to coordinate mmu mappings is also COMPLETELY
  683. * UNDOCUMENTED!!!!!! Thanks S(t)un!
  684. */
  685. if (save_p) {
  686. for (i = 0; i < 16; i++) {
  687. prom_itlb[i].tlb_ent = -1;
  688. prom_dtlb[i].tlb_ent = -1;
  689. }
  690. }
  691. if (tlb_type == spitfire) {
  692. int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
  693. for (i = 0; i < high; i++) {
  694. unsigned long data;
  695. /* Spitfire Errata #32 workaround */
  696. /* NOTE: Always runs on spitfire, so no cheetah+
  697. * page size encodings.
  698. */
  699. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  700. "flush %%g6"
  701. : /* No outputs */
  702. : "r" (0),
  703. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  704. data = spitfire_get_dtlb_data(i);
  705. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  706. unsigned long tag;
  707. /* Spitfire Errata #32 workaround */
  708. /* NOTE: Always runs on spitfire, so no
  709. * cheetah+ page size encodings.
  710. */
  711. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  712. "flush %%g6"
  713. : /* No outputs */
  714. : "r" (0),
  715. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  716. tag = spitfire_get_dtlb_tag(i);
  717. if (save_p) {
  718. prom_dtlb[dtlb_seen].tlb_ent = i;
  719. prom_dtlb[dtlb_seen].tlb_tag = tag;
  720. prom_dtlb[dtlb_seen].tlb_data = data;
  721. }
  722. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  723. "membar #Sync"
  724. : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  725. spitfire_put_dtlb_data(i, 0x0UL);
  726. dtlb_seen++;
  727. if (dtlb_seen > 15)
  728. break;
  729. }
  730. }
  731. for (i = 0; i < high; i++) {
  732. unsigned long data;
  733. /* Spitfire Errata #32 workaround */
  734. /* NOTE: Always runs on spitfire, so no
  735. * cheetah+ page size encodings.
  736. */
  737. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  738. "flush %%g6"
  739. : /* No outputs */
  740. : "r" (0),
  741. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  742. data = spitfire_get_itlb_data(i);
  743. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  744. unsigned long tag;
  745. /* Spitfire Errata #32 workaround */
  746. /* NOTE: Always runs on spitfire, so no
  747. * cheetah+ page size encodings.
  748. */
  749. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  750. "flush %%g6"
  751. : /* No outputs */
  752. : "r" (0),
  753. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  754. tag = spitfire_get_itlb_tag(i);
  755. if (save_p) {
  756. prom_itlb[itlb_seen].tlb_ent = i;
  757. prom_itlb[itlb_seen].tlb_tag = tag;
  758. prom_itlb[itlb_seen].tlb_data = data;
  759. }
  760. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  761. "membar #Sync"
  762. : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  763. spitfire_put_itlb_data(i, 0x0UL);
  764. itlb_seen++;
  765. if (itlb_seen > 15)
  766. break;
  767. }
  768. }
  769. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  770. int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
  771. for (i = 0; i < high; i++) {
  772. unsigned long data;
  773. data = cheetah_get_ldtlb_data(i);
  774. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  775. unsigned long tag;
  776. tag = cheetah_get_ldtlb_tag(i);
  777. if (save_p) {
  778. prom_dtlb[dtlb_seen].tlb_ent = i;
  779. prom_dtlb[dtlb_seen].tlb_tag = tag;
  780. prom_dtlb[dtlb_seen].tlb_data = data;
  781. }
  782. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  783. "membar #Sync"
  784. : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  785. cheetah_put_ldtlb_data(i, 0x0UL);
  786. dtlb_seen++;
  787. if (dtlb_seen > 15)
  788. break;
  789. }
  790. }
  791. for (i = 0; i < high; i++) {
  792. unsigned long data;
  793. data = cheetah_get_litlb_data(i);
  794. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  795. unsigned long tag;
  796. tag = cheetah_get_litlb_tag(i);
  797. if (save_p) {
  798. prom_itlb[itlb_seen].tlb_ent = i;
  799. prom_itlb[itlb_seen].tlb_tag = tag;
  800. prom_itlb[itlb_seen].tlb_data = data;
  801. }
  802. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  803. "membar #Sync"
  804. : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  805. cheetah_put_litlb_data(i, 0x0UL);
  806. itlb_seen++;
  807. if (itlb_seen > 15)
  808. break;
  809. }
  810. }
  811. } else {
  812. /* Implement me :-) */
  813. BUG();
  814. }
  815. if (save_p)
  816. prom_ditlb_set = 1;
  817. }
  818. /* Give PROM back his world, done during reboots... */
  819. void prom_reload_locked(void)
  820. {
  821. int i;
  822. for (i = 0; i < 16; i++) {
  823. if (prom_dtlb[i].tlb_ent != -1) {
  824. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  825. "membar #Sync"
  826. : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
  827. "i" (ASI_DMMU));
  828. if (tlb_type == spitfire)
  829. spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
  830. prom_dtlb[i].tlb_data);
  831. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  832. cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
  833. prom_dtlb[i].tlb_data);
  834. }
  835. if (prom_itlb[i].tlb_ent != -1) {
  836. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  837. "membar #Sync"
  838. : : "r" (prom_itlb[i].tlb_tag),
  839. "r" (TLB_TAG_ACCESS),
  840. "i" (ASI_IMMU));
  841. if (tlb_type == spitfire)
  842. spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
  843. prom_itlb[i].tlb_data);
  844. else
  845. cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
  846. prom_itlb[i].tlb_data);
  847. }
  848. }
  849. }
  850. #ifdef DCACHE_ALIASING_POSSIBLE
  851. void __flush_dcache_range(unsigned long start, unsigned long end)
  852. {
  853. unsigned long va;
  854. if (tlb_type == spitfire) {
  855. int n = 0;
  856. for (va = start; va < end; va += 32) {
  857. spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
  858. if (++n >= 512)
  859. break;
  860. }
  861. } else {
  862. start = __pa(start);
  863. end = __pa(end);
  864. for (va = start; va < end; va += 32)
  865. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  866. "membar #Sync"
  867. : /* no outputs */
  868. : "r" (va),
  869. "i" (ASI_DCACHE_INVALIDATE));
  870. }
  871. }
  872. #endif /* DCACHE_ALIASING_POSSIBLE */
  873. /* If not locked, zap it. */
  874. void __flush_tlb_all(void)
  875. {
  876. unsigned long pstate;
  877. int i;
  878. __asm__ __volatile__("flushw\n\t"
  879. "rdpr %%pstate, %0\n\t"
  880. "wrpr %0, %1, %%pstate"
  881. : "=r" (pstate)
  882. : "i" (PSTATE_IE));
  883. if (tlb_type == spitfire) {
  884. for (i = 0; i < 64; i++) {
  885. /* Spitfire Errata #32 workaround */
  886. /* NOTE: Always runs on spitfire, so no
  887. * cheetah+ page size encodings.
  888. */
  889. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  890. "flush %%g6"
  891. : /* No outputs */
  892. : "r" (0),
  893. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  894. if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
  895. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  896. "membar #Sync"
  897. : /* no outputs */
  898. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  899. spitfire_put_dtlb_data(i, 0x0UL);
  900. }
  901. /* Spitfire Errata #32 workaround */
  902. /* NOTE: Always runs on spitfire, so no
  903. * cheetah+ page size encodings.
  904. */
  905. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  906. "flush %%g6"
  907. : /* No outputs */
  908. : "r" (0),
  909. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  910. if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
  911. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  912. "membar #Sync"
  913. : /* no outputs */
  914. : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  915. spitfire_put_itlb_data(i, 0x0UL);
  916. }
  917. }
  918. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  919. cheetah_flush_dtlb_all();
  920. cheetah_flush_itlb_all();
  921. }
  922. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  923. : : "r" (pstate));
  924. }
  925. /* Caller does TLB context flushing on local CPU if necessary.
  926. * The caller also ensures that CTX_VALID(mm->context) is false.
  927. *
  928. * We must be careful about boundary cases so that we never
  929. * let the user have CTX 0 (nucleus) or we ever use a CTX
  930. * version of zero (and thus NO_CONTEXT would not be caught
  931. * by version mis-match tests in mmu_context.h).
  932. */
  933. void get_new_mmu_context(struct mm_struct *mm)
  934. {
  935. unsigned long ctx, new_ctx;
  936. unsigned long orig_pgsz_bits;
  937. spin_lock(&ctx_alloc_lock);
  938. orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
  939. ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
  940. new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
  941. if (new_ctx >= (1 << CTX_NR_BITS)) {
  942. new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
  943. if (new_ctx >= ctx) {
  944. int i;
  945. new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
  946. CTX_FIRST_VERSION;
  947. if (new_ctx == 1)
  948. new_ctx = CTX_FIRST_VERSION;
  949. /* Don't call memset, for 16 entries that's just
  950. * plain silly...
  951. */
  952. mmu_context_bmap[0] = 3;
  953. mmu_context_bmap[1] = 0;
  954. mmu_context_bmap[2] = 0;
  955. mmu_context_bmap[3] = 0;
  956. for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
  957. mmu_context_bmap[i + 0] = 0;
  958. mmu_context_bmap[i + 1] = 0;
  959. mmu_context_bmap[i + 2] = 0;
  960. mmu_context_bmap[i + 3] = 0;
  961. }
  962. goto out;
  963. }
  964. }
  965. mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
  966. new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
  967. out:
  968. tlb_context_cache = new_ctx;
  969. mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
  970. spin_unlock(&ctx_alloc_lock);
  971. }
  972. #ifndef CONFIG_SMP
  973. struct pgtable_cache_struct pgt_quicklists;
  974. #endif
  975. /* OK, we have to color these pages. The page tables are accessed
  976. * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
  977. * code, as well as by PAGE_OFFSET range direct-mapped addresses by
  978. * other parts of the kernel. By coloring, we make sure that the tlbmiss
  979. * fast handlers do not get data from old/garbage dcache lines that
  980. * correspond to an old/stale virtual address (user/kernel) that
  981. * previously mapped the pagetable page while accessing vpte range
  982. * addresses. The idea is that if the vpte color and PAGE_OFFSET range
  983. * color is the same, then when the kernel initializes the pagetable
  984. * using the later address range, accesses with the first address
  985. * range will see the newly initialized data rather than the garbage.
  986. */
  987. #ifdef DCACHE_ALIASING_POSSIBLE
  988. #define DC_ALIAS_SHIFT 1
  989. #else
  990. #define DC_ALIAS_SHIFT 0
  991. #endif
  992. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  993. {
  994. struct page *page;
  995. unsigned long color;
  996. {
  997. pte_t *ptep = pte_alloc_one_fast(mm, address);
  998. if (ptep)
  999. return ptep;
  1000. }
  1001. color = VPTE_COLOR(address);
  1002. page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
  1003. if (page) {
  1004. unsigned long *to_free;
  1005. unsigned long paddr;
  1006. pte_t *pte;
  1007. #ifdef DCACHE_ALIASING_POSSIBLE
  1008. set_page_count(page, 1);
  1009. ClearPageCompound(page);
  1010. set_page_count((page + 1), 1);
  1011. ClearPageCompound(page + 1);
  1012. #endif
  1013. paddr = (unsigned long) page_address(page);
  1014. memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
  1015. if (!color) {
  1016. pte = (pte_t *) paddr;
  1017. to_free = (unsigned long *) (paddr + PAGE_SIZE);
  1018. } else {
  1019. pte = (pte_t *) (paddr + PAGE_SIZE);
  1020. to_free = (unsigned long *) paddr;
  1021. }
  1022. #ifdef DCACHE_ALIASING_POSSIBLE
  1023. /* Now free the other one up, adjust cache size. */
  1024. preempt_disable();
  1025. *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
  1026. pte_quicklist[color ^ 0x1] = to_free;
  1027. pgtable_cache_size++;
  1028. preempt_enable();
  1029. #endif
  1030. return pte;
  1031. }
  1032. return NULL;
  1033. }
  1034. void sparc_ultra_dump_itlb(void)
  1035. {
  1036. int slot;
  1037. if (tlb_type == spitfire) {
  1038. printk ("Contents of itlb: ");
  1039. for (slot = 0; slot < 14; slot++) printk (" ");
  1040. printk ("%2x:%016lx,%016lx\n",
  1041. 0,
  1042. spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
  1043. for (slot = 1; slot < 64; slot+=3) {
  1044. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1045. slot,
  1046. spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
  1047. slot+1,
  1048. spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
  1049. slot+2,
  1050. spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
  1051. }
  1052. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1053. printk ("Contents of itlb0:\n");
  1054. for (slot = 0; slot < 16; slot+=2) {
  1055. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1056. slot,
  1057. cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
  1058. slot+1,
  1059. cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
  1060. }
  1061. printk ("Contents of itlb2:\n");
  1062. for (slot = 0; slot < 128; slot+=2) {
  1063. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1064. slot,
  1065. cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
  1066. slot+1,
  1067. cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
  1068. }
  1069. }
  1070. }
  1071. void sparc_ultra_dump_dtlb(void)
  1072. {
  1073. int slot;
  1074. if (tlb_type == spitfire) {
  1075. printk ("Contents of dtlb: ");
  1076. for (slot = 0; slot < 14; slot++) printk (" ");
  1077. printk ("%2x:%016lx,%016lx\n", 0,
  1078. spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
  1079. for (slot = 1; slot < 64; slot+=3) {
  1080. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1081. slot,
  1082. spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
  1083. slot+1,
  1084. spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
  1085. slot+2,
  1086. spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
  1087. }
  1088. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1089. printk ("Contents of dtlb0:\n");
  1090. for (slot = 0; slot < 16; slot+=2) {
  1091. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1092. slot,
  1093. cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
  1094. slot+1,
  1095. cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
  1096. }
  1097. printk ("Contents of dtlb2:\n");
  1098. for (slot = 0; slot < 512; slot+=2) {
  1099. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1100. slot,
  1101. cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
  1102. slot+1,
  1103. cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
  1104. }
  1105. if (tlb_type == cheetah_plus) {
  1106. printk ("Contents of dtlb3:\n");
  1107. for (slot = 0; slot < 512; slot+=2) {
  1108. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1109. slot,
  1110. cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
  1111. slot+1,
  1112. cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
  1113. }
  1114. }
  1115. }
  1116. }
  1117. extern unsigned long cmdline_memory_size;
  1118. unsigned long __init bootmem_init(unsigned long *pages_avail)
  1119. {
  1120. unsigned long bootmap_size, start_pfn, end_pfn;
  1121. unsigned long end_of_phys_memory = 0UL;
  1122. unsigned long bootmap_pfn, bytes_avail, size;
  1123. int i;
  1124. #ifdef CONFIG_DEBUG_BOOTMEM
  1125. prom_printf("bootmem_init: Scan sp_banks, ");
  1126. #endif
  1127. bytes_avail = 0UL;
  1128. for (i = 0; sp_banks[i].num_bytes != 0; i++) {
  1129. end_of_phys_memory = sp_banks[i].base_addr +
  1130. sp_banks[i].num_bytes;
  1131. bytes_avail += sp_banks[i].num_bytes;
  1132. if (cmdline_memory_size) {
  1133. if (bytes_avail > cmdline_memory_size) {
  1134. unsigned long slack = bytes_avail - cmdline_memory_size;
  1135. bytes_avail -= slack;
  1136. end_of_phys_memory -= slack;
  1137. sp_banks[i].num_bytes -= slack;
  1138. if (sp_banks[i].num_bytes == 0) {
  1139. sp_banks[i].base_addr = 0xdeadbeef;
  1140. } else {
  1141. sp_banks[i+1].num_bytes = 0;
  1142. sp_banks[i+1].base_addr = 0xdeadbeef;
  1143. }
  1144. break;
  1145. }
  1146. }
  1147. }
  1148. *pages_avail = bytes_avail >> PAGE_SHIFT;
  1149. /* Start with page aligned address of last symbol in kernel
  1150. * image. The kernel is hard mapped below PAGE_OFFSET in a
  1151. * 4MB locked TLB translation.
  1152. */
  1153. start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
  1154. bootmap_pfn = start_pfn;
  1155. end_pfn = end_of_phys_memory >> PAGE_SHIFT;
  1156. #ifdef CONFIG_BLK_DEV_INITRD
  1157. /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
  1158. if (sparc_ramdisk_image || sparc_ramdisk_image64) {
  1159. unsigned long ramdisk_image = sparc_ramdisk_image ?
  1160. sparc_ramdisk_image : sparc_ramdisk_image64;
  1161. if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
  1162. ramdisk_image -= KERNBASE;
  1163. initrd_start = ramdisk_image + phys_base;
  1164. initrd_end = initrd_start + sparc_ramdisk_size;
  1165. if (initrd_end > end_of_phys_memory) {
  1166. printk(KERN_CRIT "initrd extends beyond end of memory "
  1167. "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
  1168. initrd_end, end_of_phys_memory);
  1169. initrd_start = 0;
  1170. }
  1171. if (initrd_start) {
  1172. if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
  1173. initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
  1174. bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
  1175. }
  1176. }
  1177. #endif
  1178. /* Initialize the boot-time allocator. */
  1179. max_pfn = max_low_pfn = end_pfn;
  1180. min_low_pfn = pfn_base;
  1181. #ifdef CONFIG_DEBUG_BOOTMEM
  1182. prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
  1183. min_low_pfn, bootmap_pfn, max_low_pfn);
  1184. #endif
  1185. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
  1186. bootmap_base = bootmap_pfn << PAGE_SHIFT;
  1187. /* Now register the available physical memory with the
  1188. * allocator.
  1189. */
  1190. for (i = 0; sp_banks[i].num_bytes != 0; i++) {
  1191. #ifdef CONFIG_DEBUG_BOOTMEM
  1192. prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
  1193. i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
  1194. #endif
  1195. free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
  1196. }
  1197. #ifdef CONFIG_BLK_DEV_INITRD
  1198. if (initrd_start) {
  1199. size = initrd_end - initrd_start;
  1200. /* Resert the initrd image area. */
  1201. #ifdef CONFIG_DEBUG_BOOTMEM
  1202. prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
  1203. initrd_start, initrd_end);
  1204. #endif
  1205. reserve_bootmem(initrd_start, size);
  1206. *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
  1207. initrd_start += PAGE_OFFSET;
  1208. initrd_end += PAGE_OFFSET;
  1209. }
  1210. #endif
  1211. /* Reserve the kernel text/data/bss. */
  1212. #ifdef CONFIG_DEBUG_BOOTMEM
  1213. prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
  1214. #endif
  1215. reserve_bootmem(kern_base, kern_size);
  1216. *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
  1217. /* Reserve the bootmem map. We do not account for it
  1218. * in pages_avail because we will release that memory
  1219. * in free_all_bootmem.
  1220. */
  1221. size = bootmap_size;
  1222. #ifdef CONFIG_DEBUG_BOOTMEM
  1223. prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
  1224. (bootmap_pfn << PAGE_SHIFT), size);
  1225. #endif
  1226. reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
  1227. *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
  1228. return end_pfn;
  1229. }
  1230. /* paging_init() sets up the page tables */
  1231. extern void cheetah_ecache_flush_init(void);
  1232. static unsigned long last_valid_pfn;
  1233. void __init paging_init(void)
  1234. {
  1235. extern pmd_t swapper_pmd_dir[1024];
  1236. extern unsigned int sparc64_vpte_patchme1[1];
  1237. extern unsigned int sparc64_vpte_patchme2[1];
  1238. unsigned long alias_base = kern_base + PAGE_OFFSET;
  1239. unsigned long second_alias_page = 0;
  1240. unsigned long pt, flags, end_pfn, pages_avail;
  1241. unsigned long shift = alias_base - ((unsigned long)KERNBASE);
  1242. unsigned long real_end;
  1243. set_bit(0, mmu_context_bmap);
  1244. real_end = (unsigned long)_end;
  1245. if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
  1246. bigkernel = 1;
  1247. #ifdef CONFIG_BLK_DEV_INITRD
  1248. if (sparc_ramdisk_image || sparc_ramdisk_image64)
  1249. real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
  1250. #endif
  1251. /* We assume physical memory starts at some 4mb multiple,
  1252. * if this were not true we wouldn't boot up to this point
  1253. * anyways.
  1254. */
  1255. pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
  1256. pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
  1257. local_irq_save(flags);
  1258. if (tlb_type == spitfire) {
  1259. __asm__ __volatile__(
  1260. " stxa %1, [%0] %3\n"
  1261. " stxa %2, [%5] %4\n"
  1262. " membar #Sync\n"
  1263. " flush %%g6\n"
  1264. " nop\n"
  1265. " nop\n"
  1266. " nop\n"
  1267. : /* No outputs */
  1268. : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
  1269. "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
  1270. : "memory");
  1271. if (real_end >= KERNBASE + 0x340000) {
  1272. second_alias_page = alias_base + 0x400000;
  1273. __asm__ __volatile__(
  1274. " stxa %1, [%0] %3\n"
  1275. " stxa %2, [%5] %4\n"
  1276. " membar #Sync\n"
  1277. " flush %%g6\n"
  1278. " nop\n"
  1279. " nop\n"
  1280. " nop\n"
  1281. : /* No outputs */
  1282. : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
  1283. "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
  1284. : "memory");
  1285. }
  1286. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1287. __asm__ __volatile__(
  1288. " stxa %1, [%0] %3\n"
  1289. " stxa %2, [%5] %4\n"
  1290. " membar #Sync\n"
  1291. " flush %%g6\n"
  1292. " nop\n"
  1293. " nop\n"
  1294. " nop\n"
  1295. : /* No outputs */
  1296. : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
  1297. "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
  1298. : "memory");
  1299. if (real_end >= KERNBASE + 0x340000) {
  1300. second_alias_page = alias_base + 0x400000;
  1301. __asm__ __volatile__(
  1302. " stxa %1, [%0] %3\n"
  1303. " stxa %2, [%5] %4\n"
  1304. " membar #Sync\n"
  1305. " flush %%g6\n"
  1306. " nop\n"
  1307. " nop\n"
  1308. " nop\n"
  1309. : /* No outputs */
  1310. : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
  1311. "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
  1312. : "memory");
  1313. }
  1314. }
  1315. local_irq_restore(flags);
  1316. /* Now set kernel pgd to upper alias so physical page computations
  1317. * work.
  1318. */
  1319. init_mm.pgd += ((shift) / (sizeof(pgd_t)));
  1320. memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
  1321. /* Now can init the kernel/bad page tables. */
  1322. pud_set(pud_offset(&swapper_pg_dir[0], 0),
  1323. swapper_pmd_dir + (shift / sizeof(pgd_t)));
  1324. sparc64_vpte_patchme1[0] |=
  1325. (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
  1326. sparc64_vpte_patchme2[0] |=
  1327. (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
  1328. flushi((long)&sparc64_vpte_patchme1[0]);
  1329. /* Setup bootmem... */
  1330. pages_avail = 0;
  1331. last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
  1332. /* Inherit non-locked OBP mappings. */
  1333. inherit_prom_mappings();
  1334. /* Ok, we can use our TLB miss and window trap handlers safely.
  1335. * We need to do a quick peek here to see if we are on StarFire
  1336. * or not, so setup_tba can setup the IRQ globals correctly (it
  1337. * needs to get the hard smp processor id correctly).
  1338. */
  1339. {
  1340. extern void setup_tba(int);
  1341. setup_tba(this_is_starfire);
  1342. }
  1343. inherit_locked_prom_mappings(1);
  1344. /* We only created DTLB mapping of this stuff. */
  1345. spitfire_flush_dtlb_nucleus_page(alias_base);
  1346. if (second_alias_page)
  1347. spitfire_flush_dtlb_nucleus_page(second_alias_page);
  1348. __flush_tlb_all();
  1349. {
  1350. unsigned long zones_size[MAX_NR_ZONES];
  1351. unsigned long zholes_size[MAX_NR_ZONES];
  1352. unsigned long npages;
  1353. int znum;
  1354. for (znum = 0; znum < MAX_NR_ZONES; znum++)
  1355. zones_size[znum] = zholes_size[znum] = 0;
  1356. npages = end_pfn - pfn_base;
  1357. zones_size[ZONE_DMA] = npages;
  1358. zholes_size[ZONE_DMA] = npages - pages_avail;
  1359. free_area_init_node(0, &contig_page_data, zones_size,
  1360. phys_base >> PAGE_SHIFT, zholes_size);
  1361. }
  1362. device_scan();
  1363. }
  1364. /* Ok, it seems that the prom can allocate some more memory chunks
  1365. * as a side effect of some prom calls we perform during the
  1366. * boot sequence. My most likely theory is that it is from the
  1367. * prom_set_traptable() call, and OBP is allocating a scratchpad
  1368. * for saving client program register state etc.
  1369. */
  1370. static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
  1371. {
  1372. int swapi = 0;
  1373. int i, mitr;
  1374. unsigned long tmpaddr, tmpsize;
  1375. unsigned long lowest;
  1376. for (i = 0; thislist[i].theres_more != 0; i++) {
  1377. lowest = thislist[i].start_adr;
  1378. for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
  1379. if (thislist[mitr].start_adr < lowest) {
  1380. lowest = thislist[mitr].start_adr;
  1381. swapi = mitr;
  1382. }
  1383. if (lowest == thislist[i].start_adr)
  1384. continue;
  1385. tmpaddr = thislist[swapi].start_adr;
  1386. tmpsize = thislist[swapi].num_bytes;
  1387. for (mitr = swapi; mitr > i; mitr--) {
  1388. thislist[mitr].start_adr = thislist[mitr-1].start_adr;
  1389. thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
  1390. }
  1391. thislist[i].start_adr = tmpaddr;
  1392. thislist[i].num_bytes = tmpsize;
  1393. }
  1394. }
  1395. void __init rescan_sp_banks(void)
  1396. {
  1397. struct linux_prom64_registers memlist[64];
  1398. struct linux_mlist_p1275 avail[64], *mlist;
  1399. unsigned long bytes, base_paddr;
  1400. int num_regs, node = prom_finddevice("/memory");
  1401. int i;
  1402. num_regs = prom_getproperty(node, "available",
  1403. (char *) memlist, sizeof(memlist));
  1404. num_regs = (num_regs / sizeof(struct linux_prom64_registers));
  1405. for (i = 0; i < num_regs; i++) {
  1406. avail[i].start_adr = memlist[i].phys_addr;
  1407. avail[i].num_bytes = memlist[i].reg_size;
  1408. avail[i].theres_more = &avail[i + 1];
  1409. }
  1410. avail[i - 1].theres_more = NULL;
  1411. sort_memlist(avail);
  1412. mlist = &avail[0];
  1413. i = 0;
  1414. bytes = mlist->num_bytes;
  1415. base_paddr = mlist->start_adr;
  1416. sp_banks[0].base_addr = base_paddr;
  1417. sp_banks[0].num_bytes = bytes;
  1418. while (mlist->theres_more != NULL){
  1419. i++;
  1420. mlist = mlist->theres_more;
  1421. bytes = mlist->num_bytes;
  1422. if (i >= SPARC_PHYS_BANKS-1) {
  1423. printk ("The machine has more banks than "
  1424. "this kernel can support\n"
  1425. "Increase the SPARC_PHYS_BANKS "
  1426. "setting (currently %d)\n",
  1427. SPARC_PHYS_BANKS);
  1428. i = SPARC_PHYS_BANKS-1;
  1429. break;
  1430. }
  1431. sp_banks[i].base_addr = mlist->start_adr;
  1432. sp_banks[i].num_bytes = mlist->num_bytes;
  1433. }
  1434. i++;
  1435. sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
  1436. sp_banks[i].num_bytes = 0;
  1437. for (i = 0; sp_banks[i].num_bytes != 0; i++)
  1438. sp_banks[i].num_bytes &= PAGE_MASK;
  1439. }
  1440. static void __init taint_real_pages(void)
  1441. {
  1442. struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
  1443. int i;
  1444. for (i = 0; i < SPARC_PHYS_BANKS; i++) {
  1445. saved_sp_banks[i].base_addr =
  1446. sp_banks[i].base_addr;
  1447. saved_sp_banks[i].num_bytes =
  1448. sp_banks[i].num_bytes;
  1449. }
  1450. rescan_sp_banks();
  1451. /* Find changes discovered in the sp_bank rescan and
  1452. * reserve the lost portions in the bootmem maps.
  1453. */
  1454. for (i = 0; saved_sp_banks[i].num_bytes; i++) {
  1455. unsigned long old_start, old_end;
  1456. old_start = saved_sp_banks[i].base_addr;
  1457. old_end = old_start +
  1458. saved_sp_banks[i].num_bytes;
  1459. while (old_start < old_end) {
  1460. int n;
  1461. for (n = 0; sp_banks[n].num_bytes; n++) {
  1462. unsigned long new_start, new_end;
  1463. new_start = sp_banks[n].base_addr;
  1464. new_end = new_start + sp_banks[n].num_bytes;
  1465. if (new_start <= old_start &&
  1466. new_end >= (old_start + PAGE_SIZE)) {
  1467. set_bit (old_start >> 22,
  1468. sparc64_valid_addr_bitmap);
  1469. goto do_next_page;
  1470. }
  1471. }
  1472. reserve_bootmem(old_start, PAGE_SIZE);
  1473. do_next_page:
  1474. old_start += PAGE_SIZE;
  1475. }
  1476. }
  1477. }
  1478. void __init mem_init(void)
  1479. {
  1480. unsigned long codepages, datapages, initpages;
  1481. unsigned long addr, last;
  1482. int i;
  1483. i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
  1484. i += 1;
  1485. sparc64_valid_addr_bitmap = (unsigned long *)
  1486. __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
  1487. if (sparc64_valid_addr_bitmap == NULL) {
  1488. prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
  1489. prom_halt();
  1490. }
  1491. memset(sparc64_valid_addr_bitmap, 0, i << 3);
  1492. addr = PAGE_OFFSET + kern_base;
  1493. last = PAGE_ALIGN(kern_size) + addr;
  1494. while (addr < last) {
  1495. set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
  1496. addr += PAGE_SIZE;
  1497. }
  1498. taint_real_pages();
  1499. max_mapnr = last_valid_pfn - pfn_base;
  1500. high_memory = __va(last_valid_pfn << PAGE_SHIFT);
  1501. #ifdef CONFIG_DEBUG_BOOTMEM
  1502. prom_printf("mem_init: Calling free_all_bootmem().\n");
  1503. #endif
  1504. totalram_pages = num_physpages = free_all_bootmem() - 1;
  1505. /*
  1506. * Set up the zero page, mark it reserved, so that page count
  1507. * is not manipulated when freeing the page from user ptes.
  1508. */
  1509. mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
  1510. if (mem_map_zero == NULL) {
  1511. prom_printf("paging_init: Cannot alloc zero page.\n");
  1512. prom_halt();
  1513. }
  1514. SetPageReserved(mem_map_zero);
  1515. codepages = (((unsigned long) _etext) - ((unsigned long) _start));
  1516. codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
  1517. datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
  1518. datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
  1519. initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
  1520. initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
  1521. printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
  1522. nr_free_pages() << (PAGE_SHIFT-10),
  1523. codepages << (PAGE_SHIFT-10),
  1524. datapages << (PAGE_SHIFT-10),
  1525. initpages << (PAGE_SHIFT-10),
  1526. PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
  1527. if (tlb_type == cheetah || tlb_type == cheetah_plus)
  1528. cheetah_ecache_flush_init();
  1529. }
  1530. void free_initmem (void)
  1531. {
  1532. unsigned long addr, initend;
  1533. /*
  1534. * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
  1535. */
  1536. addr = PAGE_ALIGN((unsigned long)(__init_begin));
  1537. initend = (unsigned long)(__init_end) & PAGE_MASK;
  1538. for (; addr < initend; addr += PAGE_SIZE) {
  1539. unsigned long page;
  1540. struct page *p;
  1541. page = (addr +
  1542. ((unsigned long) __va(kern_base)) -
  1543. ((unsigned long) KERNBASE));
  1544. memset((void *)addr, 0xcc, PAGE_SIZE);
  1545. p = virt_to_page(page);
  1546. ClearPageReserved(p);
  1547. set_page_count(p, 1);
  1548. __free_page(p);
  1549. num_physpages++;
  1550. totalram_pages++;
  1551. }
  1552. }
  1553. #ifdef CONFIG_BLK_DEV_INITRD
  1554. void free_initrd_mem(unsigned long start, unsigned long end)
  1555. {
  1556. if (start < end)
  1557. printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  1558. for (; start < end; start += PAGE_SIZE) {
  1559. struct page *p = virt_to_page(start);
  1560. ClearPageReserved(p);
  1561. set_page_count(p, 1);
  1562. __free_page(p);
  1563. num_physpages++;
  1564. totalram_pages++;
  1565. }
  1566. }
  1567. #endif