init.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827
  1. /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
  2. * arch/sparc64/mm/init.c
  3. *
  4. * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. */
  7. #include <linux/config.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/string.h>
  11. #include <linux/init.h>
  12. #include <linux/bootmem.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/slab.h>
  16. #include <linux/initrd.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/cache.h>
  23. #include <asm/head.h>
  24. #include <asm/system.h>
  25. #include <asm/page.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/oplib.h>
  29. #include <asm/iommu.h>
  30. #include <asm/io.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/mmu_context.h>
  33. #include <asm/tlbflush.h>
  34. #include <asm/dma.h>
  35. #include <asm/starfire.h>
  36. #include <asm/tlb.h>
  37. #include <asm/spitfire.h>
  38. #include <asm/sections.h>
  39. extern void device_scan(void);
  40. struct sparc_phys_banks {
  41. unsigned long base_addr;
  42. unsigned long num_bytes;
  43. };
  44. #define SPARC_PHYS_BANKS 32
  45. static struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
  46. unsigned long *sparc64_valid_addr_bitmap __read_mostly;
  47. /* Ugly, but necessary... -DaveM */
  48. unsigned long phys_base __read_mostly;
  49. unsigned long kern_base __read_mostly;
  50. unsigned long kern_size __read_mostly;
  51. unsigned long pfn_base __read_mostly;
  52. /* get_new_mmu_context() uses "cache + 1". */
  53. DEFINE_SPINLOCK(ctx_alloc_lock);
  54. unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
  55. #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
  56. unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
  57. /* References to special section boundaries */
  58. extern char _start[], _end[];
  59. /* Initial ramdisk setup */
  60. extern unsigned long sparc_ramdisk_image64;
  61. extern unsigned int sparc_ramdisk_image;
  62. extern unsigned int sparc_ramdisk_size;
  63. struct page *mem_map_zero __read_mostly;
  64. int bigkernel = 0;
  65. /* XXX Tune this... */
  66. #define PGT_CACHE_LOW 25
  67. #define PGT_CACHE_HIGH 50
  68. void check_pgt_cache(void)
  69. {
  70. preempt_disable();
  71. if (pgtable_cache_size > PGT_CACHE_HIGH) {
  72. do {
  73. if (pgd_quicklist)
  74. free_pgd_slow(get_pgd_fast());
  75. if (pte_quicklist[0])
  76. free_pte_slow(pte_alloc_one_fast(NULL, 0));
  77. if (pte_quicklist[1])
  78. free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
  79. } while (pgtable_cache_size > PGT_CACHE_LOW);
  80. }
  81. preempt_enable();
  82. }
  83. #ifdef CONFIG_DEBUG_DCFLUSH
  84. atomic_t dcpage_flushes = ATOMIC_INIT(0);
  85. #ifdef CONFIG_SMP
  86. atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
  87. #endif
  88. #endif
  89. __inline__ void flush_dcache_page_impl(struct page *page)
  90. {
  91. #ifdef CONFIG_DEBUG_DCFLUSH
  92. atomic_inc(&dcpage_flushes);
  93. #endif
  94. #ifdef DCACHE_ALIASING_POSSIBLE
  95. __flush_dcache_page(page_address(page),
  96. ((tlb_type == spitfire) &&
  97. page_mapping(page) != NULL));
  98. #else
  99. if (page_mapping(page) != NULL &&
  100. tlb_type == spitfire)
  101. __flush_icache_page(__pa(page_address(page)));
  102. #endif
  103. }
  104. #define PG_dcache_dirty PG_arch_1
  105. #define PG_dcache_cpu_shift 24
  106. #define PG_dcache_cpu_mask (256 - 1)
  107. #if NR_CPUS > 256
  108. #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
  109. #endif
  110. #define dcache_dirty_cpu(page) \
  111. (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
  112. static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
  113. {
  114. unsigned long mask = this_cpu;
  115. unsigned long non_cpu_bits;
  116. non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
  117. mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
  118. __asm__ __volatile__("1:\n\t"
  119. "ldx [%2], %%g7\n\t"
  120. "and %%g7, %1, %%g1\n\t"
  121. "or %%g1, %0, %%g1\n\t"
  122. "casx [%2], %%g7, %%g1\n\t"
  123. "cmp %%g7, %%g1\n\t"
  124. "membar #StoreLoad | #StoreStore\n\t"
  125. "bne,pn %%xcc, 1b\n\t"
  126. " nop"
  127. : /* no outputs */
  128. : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
  129. : "g1", "g7");
  130. }
  131. static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
  132. {
  133. unsigned long mask = (1UL << PG_dcache_dirty);
  134. __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
  135. "1:\n\t"
  136. "ldx [%2], %%g7\n\t"
  137. "srlx %%g7, %4, %%g1\n\t"
  138. "and %%g1, %3, %%g1\n\t"
  139. "cmp %%g1, %0\n\t"
  140. "bne,pn %%icc, 2f\n\t"
  141. " andn %%g7, %1, %%g1\n\t"
  142. "casx [%2], %%g7, %%g1\n\t"
  143. "cmp %%g7, %%g1\n\t"
  144. "membar #StoreLoad | #StoreStore\n\t"
  145. "bne,pn %%xcc, 1b\n\t"
  146. " nop\n"
  147. "2:"
  148. : /* no outputs */
  149. : "r" (cpu), "r" (mask), "r" (&page->flags),
  150. "i" (PG_dcache_cpu_mask),
  151. "i" (PG_dcache_cpu_shift)
  152. : "g1", "g7");
  153. }
  154. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  155. {
  156. struct page *page;
  157. unsigned long pfn;
  158. unsigned long pg_flags;
  159. pfn = pte_pfn(pte);
  160. if (pfn_valid(pfn) &&
  161. (page = pfn_to_page(pfn), page_mapping(page)) &&
  162. ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
  163. int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
  164. PG_dcache_cpu_mask);
  165. int this_cpu = get_cpu();
  166. /* This is just to optimize away some function calls
  167. * in the SMP case.
  168. */
  169. if (cpu == this_cpu)
  170. flush_dcache_page_impl(page);
  171. else
  172. smp_flush_dcache_page_impl(page, cpu);
  173. clear_dcache_dirty_cpu(page, cpu);
  174. put_cpu();
  175. }
  176. }
  177. void flush_dcache_page(struct page *page)
  178. {
  179. struct address_space *mapping;
  180. int this_cpu;
  181. /* Do not bother with the expensive D-cache flush if it
  182. * is merely the zero page. The 'bigcore' testcase in GDB
  183. * causes this case to run millions of times.
  184. */
  185. if (page == ZERO_PAGE(0))
  186. return;
  187. this_cpu = get_cpu();
  188. mapping = page_mapping(page);
  189. if (mapping && !mapping_mapped(mapping)) {
  190. int dirty = test_bit(PG_dcache_dirty, &page->flags);
  191. if (dirty) {
  192. int dirty_cpu = dcache_dirty_cpu(page);
  193. if (dirty_cpu == this_cpu)
  194. goto out;
  195. smp_flush_dcache_page_impl(page, dirty_cpu);
  196. }
  197. set_dcache_dirty(page, this_cpu);
  198. } else {
  199. /* We could delay the flush for the !page_mapping
  200. * case too. But that case is for exec env/arg
  201. * pages and those are %99 certainly going to get
  202. * faulted into the tlb (and thus flushed) anyways.
  203. */
  204. flush_dcache_page_impl(page);
  205. }
  206. out:
  207. put_cpu();
  208. }
  209. void __kprobes flush_icache_range(unsigned long start, unsigned long end)
  210. {
  211. /* Cheetah has coherent I-cache. */
  212. if (tlb_type == spitfire) {
  213. unsigned long kaddr;
  214. for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
  215. __flush_icache_page(__get_phys(kaddr));
  216. }
  217. }
  218. unsigned long page_to_pfn(struct page *page)
  219. {
  220. return (unsigned long) ((page - mem_map) + pfn_base);
  221. }
  222. struct page *pfn_to_page(unsigned long pfn)
  223. {
  224. return (mem_map + (pfn - pfn_base));
  225. }
  226. void show_mem(void)
  227. {
  228. printk("Mem-info:\n");
  229. show_free_areas();
  230. printk("Free swap: %6ldkB\n",
  231. nr_swap_pages << (PAGE_SHIFT-10));
  232. printk("%ld pages of RAM\n", num_physpages);
  233. printk("%d free pages\n", nr_free_pages());
  234. printk("%d pages in page table cache\n",pgtable_cache_size);
  235. }
  236. void mmu_info(struct seq_file *m)
  237. {
  238. if (tlb_type == cheetah)
  239. seq_printf(m, "MMU Type\t: Cheetah\n");
  240. else if (tlb_type == cheetah_plus)
  241. seq_printf(m, "MMU Type\t: Cheetah+\n");
  242. else if (tlb_type == spitfire)
  243. seq_printf(m, "MMU Type\t: Spitfire\n");
  244. else
  245. seq_printf(m, "MMU Type\t: ???\n");
  246. #ifdef CONFIG_DEBUG_DCFLUSH
  247. seq_printf(m, "DCPageFlushes\t: %d\n",
  248. atomic_read(&dcpage_flushes));
  249. #ifdef CONFIG_SMP
  250. seq_printf(m, "DCPageFlushesXC\t: %d\n",
  251. atomic_read(&dcpage_flushes_xcall));
  252. #endif /* CONFIG_SMP */
  253. #endif /* CONFIG_DEBUG_DCFLUSH */
  254. }
  255. struct linux_prom_translation {
  256. unsigned long virt;
  257. unsigned long size;
  258. unsigned long data;
  259. };
  260. static struct linux_prom_translation prom_trans[512] __initdata;
  261. extern unsigned long prom_boot_page;
  262. extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
  263. extern int prom_get_mmu_ihandle(void);
  264. extern void register_prom_callbacks(void);
  265. /* Exported for SMP bootup purposes. */
  266. unsigned long kern_locked_tte_data;
  267. /* Exported for kernel TLB miss handling in ktlb.S */
  268. unsigned long prom_pmd_phys __read_mostly;
  269. unsigned int swapper_pgd_zero __read_mostly;
  270. /* Allocate power-of-2 aligned chunks from the end of the
  271. * kernel image. Return physical address.
  272. */
  273. static inline unsigned long early_alloc_phys(unsigned long size)
  274. {
  275. unsigned long base;
  276. BUILD_BUG_ON(size & (size - 1));
  277. kern_size = (kern_size + (size - 1)) & ~(size - 1);
  278. base = kern_base + kern_size;
  279. kern_size += size;
  280. return base;
  281. }
  282. static inline unsigned long load_phys32(unsigned long pa)
  283. {
  284. unsigned long val;
  285. __asm__ __volatile__("lduwa [%1] %2, %0"
  286. : "=&r" (val)
  287. : "r" (pa), "i" (ASI_PHYS_USE_EC));
  288. return val;
  289. }
  290. static inline unsigned long load_phys64(unsigned long pa)
  291. {
  292. unsigned long val;
  293. __asm__ __volatile__("ldxa [%1] %2, %0"
  294. : "=&r" (val)
  295. : "r" (pa), "i" (ASI_PHYS_USE_EC));
  296. return val;
  297. }
  298. static inline void store_phys32(unsigned long pa, unsigned long val)
  299. {
  300. __asm__ __volatile__("stwa %0, [%1] %2"
  301. : /* no outputs */
  302. : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
  303. }
  304. static inline void store_phys64(unsigned long pa, unsigned long val)
  305. {
  306. __asm__ __volatile__("stxa %0, [%1] %2"
  307. : /* no outputs */
  308. : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
  309. }
  310. #define BASE_PAGE_SIZE 8192
  311. /*
  312. * Translate PROM's mapping we capture at boot time into physical address.
  313. * The second parameter is only set from prom_callback() invocations.
  314. */
  315. unsigned long prom_virt_to_phys(unsigned long promva, int *error)
  316. {
  317. unsigned long pmd_phys = (prom_pmd_phys +
  318. ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
  319. unsigned long pte_phys;
  320. pmd_t pmd_ent;
  321. pte_t pte_ent;
  322. unsigned long base;
  323. pmd_val(pmd_ent) = load_phys32(pmd_phys);
  324. if (pmd_none(pmd_ent)) {
  325. if (error)
  326. *error = 1;
  327. return 0;
  328. }
  329. pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
  330. pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
  331. pte_val(pte_ent) = load_phys64(pte_phys);
  332. if (!pte_present(pte_ent)) {
  333. if (error)
  334. *error = 1;
  335. return 0;
  336. }
  337. if (error) {
  338. *error = 0;
  339. return pte_val(pte_ent);
  340. }
  341. base = pte_val(pte_ent) & _PAGE_PADDR;
  342. return (base + (promva & (BASE_PAGE_SIZE - 1)));
  343. }
  344. /* The obp translations are saved based on 8k pagesize, since obp can
  345. * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
  346. * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
  347. * scheme (also, see rant in inherit_locked_prom_mappings()).
  348. */
  349. static void __init build_obp_range(unsigned long start, unsigned long end, unsigned long data)
  350. {
  351. unsigned long vaddr;
  352. for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
  353. unsigned long val, pte_phys, pmd_phys;
  354. pmd_t pmd_ent;
  355. int i;
  356. pmd_phys = (prom_pmd_phys +
  357. (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
  358. pmd_val(pmd_ent) = load_phys32(pmd_phys);
  359. if (pmd_none(pmd_ent)) {
  360. pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
  361. for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
  362. store_phys64(pte_phys+i*sizeof(pte_t),0);
  363. pmd_val(pmd_ent) = pte_phys >> 11UL;
  364. store_phys32(pmd_phys, pmd_val(pmd_ent));
  365. }
  366. pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
  367. pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
  368. val = data;
  369. /* Clear diag TTE bits. */
  370. if (tlb_type == spitfire)
  371. val &= ~0x0003fe0000000000UL;
  372. store_phys64(pte_phys, val | _PAGE_MODIFIED);
  373. data += BASE_PAGE_SIZE;
  374. }
  375. }
  376. static inline int in_obp_range(unsigned long vaddr)
  377. {
  378. return (vaddr >= LOW_OBP_ADDRESS &&
  379. vaddr < HI_OBP_ADDRESS);
  380. }
  381. #define OBP_PMD_SIZE 2048
  382. static void __init build_obp_pgtable(int prom_trans_ents)
  383. {
  384. unsigned long i;
  385. prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
  386. for (i = 0; i < OBP_PMD_SIZE; i += 4)
  387. store_phys32(prom_pmd_phys + i, 0);
  388. for (i = 0; i < prom_trans_ents; i++) {
  389. unsigned long start, end;
  390. if (!in_obp_range(prom_trans[i].virt))
  391. continue;
  392. start = prom_trans[i].virt;
  393. end = start + prom_trans[i].size;
  394. if (end > HI_OBP_ADDRESS)
  395. end = HI_OBP_ADDRESS;
  396. build_obp_range(start, end, prom_trans[i].data);
  397. }
  398. }
  399. /* Read OBP translations property into 'prom_trans[]'.
  400. * Return the number of entries.
  401. */
  402. static int __init read_obp_translations(void)
  403. {
  404. int n, node;
  405. node = prom_finddevice("/virtual-memory");
  406. n = prom_getproplen(node, "translations");
  407. if (unlikely(n == 0 || n == -1)) {
  408. prom_printf("prom_mappings: Couldn't get size.\n");
  409. prom_halt();
  410. }
  411. if (unlikely(n > sizeof(prom_trans))) {
  412. prom_printf("prom_mappings: Size %Zd is too big.\n", n);
  413. prom_halt();
  414. }
  415. if ((n = prom_getproperty(node, "translations",
  416. (char *)&prom_trans[0],
  417. sizeof(prom_trans))) == -1) {
  418. prom_printf("prom_mappings: Couldn't get property.\n");
  419. prom_halt();
  420. }
  421. n = n / sizeof(struct linux_prom_translation);
  422. return n;
  423. }
  424. static void __init remap_kernel(void)
  425. {
  426. unsigned long phys_page, tte_vaddr, tte_data;
  427. int tlb_ent = sparc64_highest_locked_tlbent();
  428. tte_vaddr = (unsigned long) KERNBASE;
  429. phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  430. tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
  431. _PAGE_CP | _PAGE_CV | _PAGE_P |
  432. _PAGE_L | _PAGE_W));
  433. kern_locked_tte_data = tte_data;
  434. /* Now lock us into the TLBs via OBP. */
  435. prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
  436. prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
  437. if (bigkernel) {
  438. prom_dtlb_load(tlb_ent - 1,
  439. tte_data + 0x400000,
  440. tte_vaddr + 0x400000);
  441. prom_itlb_load(tlb_ent - 1,
  442. tte_data + 0x400000,
  443. tte_vaddr + 0x400000);
  444. }
  445. }
  446. static void __init inherit_prom_mappings(void)
  447. {
  448. int n;
  449. n = read_obp_translations();
  450. build_obp_pgtable(n);
  451. /* Now fixup OBP's idea about where we really are mapped. */
  452. prom_printf("Remapping the kernel... ");
  453. remap_kernel();
  454. prom_printf("done.\n");
  455. register_prom_callbacks();
  456. }
  457. /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
  458. * upwards as reserved for use by the firmware (I wonder if this
  459. * will be the same on Cheetah...). We use this virtual address
  460. * range for the VPTE table mappings of the nucleus so we need
  461. * to zap them when we enter the PROM. -DaveM
  462. */
  463. static void __flush_nucleus_vptes(void)
  464. {
  465. unsigned long prom_reserved_base = 0xfffffffc00000000UL;
  466. int i;
  467. /* Only DTLB must be checked for VPTE entries. */
  468. if (tlb_type == spitfire) {
  469. for (i = 0; i < 63; i++) {
  470. unsigned long tag;
  471. /* Spitfire Errata #32 workaround */
  472. /* NOTE: Always runs on spitfire, so no cheetah+
  473. * page size encodings.
  474. */
  475. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  476. "flush %%g6"
  477. : /* No outputs */
  478. : "r" (0),
  479. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  480. tag = spitfire_get_dtlb_tag(i);
  481. if (((tag & ~(PAGE_MASK)) == 0) &&
  482. ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
  483. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  484. "membar #Sync"
  485. : /* no outputs */
  486. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  487. spitfire_put_dtlb_data(i, 0x0UL);
  488. }
  489. }
  490. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  491. for (i = 0; i < 512; i++) {
  492. unsigned long tag = cheetah_get_dtlb_tag(i, 2);
  493. if ((tag & ~PAGE_MASK) == 0 &&
  494. (tag & PAGE_MASK) >= prom_reserved_base) {
  495. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  496. "membar #Sync"
  497. : /* no outputs */
  498. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  499. cheetah_put_dtlb_data(i, 0x0UL, 2);
  500. }
  501. if (tlb_type != cheetah_plus)
  502. continue;
  503. tag = cheetah_get_dtlb_tag(i, 3);
  504. if ((tag & ~PAGE_MASK) == 0 &&
  505. (tag & PAGE_MASK) >= prom_reserved_base) {
  506. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  507. "membar #Sync"
  508. : /* no outputs */
  509. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  510. cheetah_put_dtlb_data(i, 0x0UL, 3);
  511. }
  512. }
  513. } else {
  514. /* Implement me :-) */
  515. BUG();
  516. }
  517. }
  518. static int prom_ditlb_set;
  519. struct prom_tlb_entry {
  520. int tlb_ent;
  521. unsigned long tlb_tag;
  522. unsigned long tlb_data;
  523. };
  524. struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
  525. void prom_world(int enter)
  526. {
  527. unsigned long pstate;
  528. int i;
  529. if (!enter)
  530. set_fs((mm_segment_t) { get_thread_current_ds() });
  531. if (!prom_ditlb_set)
  532. return;
  533. /* Make sure the following runs atomically. */
  534. __asm__ __volatile__("flushw\n\t"
  535. "rdpr %%pstate, %0\n\t"
  536. "wrpr %0, %1, %%pstate"
  537. : "=r" (pstate)
  538. : "i" (PSTATE_IE));
  539. if (enter) {
  540. /* Kick out nucleus VPTEs. */
  541. __flush_nucleus_vptes();
  542. /* Install PROM world. */
  543. for (i = 0; i < 16; i++) {
  544. if (prom_dtlb[i].tlb_ent != -1) {
  545. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  546. "membar #Sync"
  547. : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
  548. "i" (ASI_DMMU));
  549. if (tlb_type == spitfire)
  550. spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
  551. prom_dtlb[i].tlb_data);
  552. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  553. cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
  554. prom_dtlb[i].tlb_data);
  555. }
  556. if (prom_itlb[i].tlb_ent != -1) {
  557. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  558. "membar #Sync"
  559. : : "r" (prom_itlb[i].tlb_tag),
  560. "r" (TLB_TAG_ACCESS),
  561. "i" (ASI_IMMU));
  562. if (tlb_type == spitfire)
  563. spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
  564. prom_itlb[i].tlb_data);
  565. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  566. cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
  567. prom_itlb[i].tlb_data);
  568. }
  569. }
  570. } else {
  571. for (i = 0; i < 16; i++) {
  572. if (prom_dtlb[i].tlb_ent != -1) {
  573. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  574. "membar #Sync"
  575. : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  576. if (tlb_type == spitfire)
  577. spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
  578. else
  579. cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
  580. }
  581. if (prom_itlb[i].tlb_ent != -1) {
  582. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  583. "membar #Sync"
  584. : : "r" (TLB_TAG_ACCESS),
  585. "i" (ASI_IMMU));
  586. if (tlb_type == spitfire)
  587. spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
  588. else
  589. cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
  590. }
  591. }
  592. }
  593. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  594. : : "r" (pstate));
  595. }
  596. void inherit_locked_prom_mappings(int save_p)
  597. {
  598. int i;
  599. int dtlb_seen = 0;
  600. int itlb_seen = 0;
  601. /* Fucking losing PROM has more mappings in the TLB, but
  602. * it (conveniently) fails to mention any of these in the
  603. * translations property. The only ones that matter are
  604. * the locked PROM tlb entries, so we impose the following
  605. * irrecovable rule on the PROM, it is allowed 8 locked
  606. * entries in the ITLB and 8 in the DTLB.
  607. *
  608. * Supposedly the upper 16GB of the address space is
  609. * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
  610. * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
  611. * used between the client program and the firmware on sun5
  612. * systems to coordinate mmu mappings is also COMPLETELY
  613. * UNDOCUMENTED!!!!!! Thanks S(t)un!
  614. */
  615. if (save_p) {
  616. for (i = 0; i < 16; i++) {
  617. prom_itlb[i].tlb_ent = -1;
  618. prom_dtlb[i].tlb_ent = -1;
  619. }
  620. }
  621. if (tlb_type == spitfire) {
  622. int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
  623. for (i = 0; i < high; i++) {
  624. unsigned long data;
  625. /* Spitfire Errata #32 workaround */
  626. /* NOTE: Always runs on spitfire, so no cheetah+
  627. * page size encodings.
  628. */
  629. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  630. "flush %%g6"
  631. : /* No outputs */
  632. : "r" (0),
  633. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  634. data = spitfire_get_dtlb_data(i);
  635. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  636. unsigned long tag;
  637. /* Spitfire Errata #32 workaround */
  638. /* NOTE: Always runs on spitfire, so no
  639. * cheetah+ page size encodings.
  640. */
  641. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  642. "flush %%g6"
  643. : /* No outputs */
  644. : "r" (0),
  645. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  646. tag = spitfire_get_dtlb_tag(i);
  647. if (save_p) {
  648. prom_dtlb[dtlb_seen].tlb_ent = i;
  649. prom_dtlb[dtlb_seen].tlb_tag = tag;
  650. prom_dtlb[dtlb_seen].tlb_data = data;
  651. }
  652. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  653. "membar #Sync"
  654. : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  655. spitfire_put_dtlb_data(i, 0x0UL);
  656. dtlb_seen++;
  657. if (dtlb_seen > 15)
  658. break;
  659. }
  660. }
  661. for (i = 0; i < high; i++) {
  662. unsigned long data;
  663. /* Spitfire Errata #32 workaround */
  664. /* NOTE: Always runs on spitfire, so no
  665. * cheetah+ page size encodings.
  666. */
  667. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  668. "flush %%g6"
  669. : /* No outputs */
  670. : "r" (0),
  671. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  672. data = spitfire_get_itlb_data(i);
  673. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  674. unsigned long tag;
  675. /* Spitfire Errata #32 workaround */
  676. /* NOTE: Always runs on spitfire, so no
  677. * cheetah+ page size encodings.
  678. */
  679. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  680. "flush %%g6"
  681. : /* No outputs */
  682. : "r" (0),
  683. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  684. tag = spitfire_get_itlb_tag(i);
  685. if (save_p) {
  686. prom_itlb[itlb_seen].tlb_ent = i;
  687. prom_itlb[itlb_seen].tlb_tag = tag;
  688. prom_itlb[itlb_seen].tlb_data = data;
  689. }
  690. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  691. "membar #Sync"
  692. : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  693. spitfire_put_itlb_data(i, 0x0UL);
  694. itlb_seen++;
  695. if (itlb_seen > 15)
  696. break;
  697. }
  698. }
  699. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  700. int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
  701. for (i = 0; i < high; i++) {
  702. unsigned long data;
  703. data = cheetah_get_ldtlb_data(i);
  704. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  705. unsigned long tag;
  706. tag = cheetah_get_ldtlb_tag(i);
  707. if (save_p) {
  708. prom_dtlb[dtlb_seen].tlb_ent = i;
  709. prom_dtlb[dtlb_seen].tlb_tag = tag;
  710. prom_dtlb[dtlb_seen].tlb_data = data;
  711. }
  712. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  713. "membar #Sync"
  714. : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  715. cheetah_put_ldtlb_data(i, 0x0UL);
  716. dtlb_seen++;
  717. if (dtlb_seen > 15)
  718. break;
  719. }
  720. }
  721. for (i = 0; i < high; i++) {
  722. unsigned long data;
  723. data = cheetah_get_litlb_data(i);
  724. if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
  725. unsigned long tag;
  726. tag = cheetah_get_litlb_tag(i);
  727. if (save_p) {
  728. prom_itlb[itlb_seen].tlb_ent = i;
  729. prom_itlb[itlb_seen].tlb_tag = tag;
  730. prom_itlb[itlb_seen].tlb_data = data;
  731. }
  732. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  733. "membar #Sync"
  734. : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  735. cheetah_put_litlb_data(i, 0x0UL);
  736. itlb_seen++;
  737. if (itlb_seen > 15)
  738. break;
  739. }
  740. }
  741. } else {
  742. /* Implement me :-) */
  743. BUG();
  744. }
  745. if (save_p)
  746. prom_ditlb_set = 1;
  747. }
  748. /* Give PROM back his world, done during reboots... */
  749. void prom_reload_locked(void)
  750. {
  751. int i;
  752. for (i = 0; i < 16; i++) {
  753. if (prom_dtlb[i].tlb_ent != -1) {
  754. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  755. "membar #Sync"
  756. : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
  757. "i" (ASI_DMMU));
  758. if (tlb_type == spitfire)
  759. spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
  760. prom_dtlb[i].tlb_data);
  761. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  762. cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
  763. prom_dtlb[i].tlb_data);
  764. }
  765. if (prom_itlb[i].tlb_ent != -1) {
  766. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  767. "membar #Sync"
  768. : : "r" (prom_itlb[i].tlb_tag),
  769. "r" (TLB_TAG_ACCESS),
  770. "i" (ASI_IMMU));
  771. if (tlb_type == spitfire)
  772. spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
  773. prom_itlb[i].tlb_data);
  774. else
  775. cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
  776. prom_itlb[i].tlb_data);
  777. }
  778. }
  779. }
  780. #ifdef DCACHE_ALIASING_POSSIBLE
  781. void __flush_dcache_range(unsigned long start, unsigned long end)
  782. {
  783. unsigned long va;
  784. if (tlb_type == spitfire) {
  785. int n = 0;
  786. for (va = start; va < end; va += 32) {
  787. spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
  788. if (++n >= 512)
  789. break;
  790. }
  791. } else {
  792. start = __pa(start);
  793. end = __pa(end);
  794. for (va = start; va < end; va += 32)
  795. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  796. "membar #Sync"
  797. : /* no outputs */
  798. : "r" (va),
  799. "i" (ASI_DCACHE_INVALIDATE));
  800. }
  801. }
  802. #endif /* DCACHE_ALIASING_POSSIBLE */
  803. /* If not locked, zap it. */
  804. void __flush_tlb_all(void)
  805. {
  806. unsigned long pstate;
  807. int i;
  808. __asm__ __volatile__("flushw\n\t"
  809. "rdpr %%pstate, %0\n\t"
  810. "wrpr %0, %1, %%pstate"
  811. : "=r" (pstate)
  812. : "i" (PSTATE_IE));
  813. if (tlb_type == spitfire) {
  814. for (i = 0; i < 64; i++) {
  815. /* Spitfire Errata #32 workaround */
  816. /* NOTE: Always runs on spitfire, so no
  817. * cheetah+ page size encodings.
  818. */
  819. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  820. "flush %%g6"
  821. : /* No outputs */
  822. : "r" (0),
  823. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  824. if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
  825. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  826. "membar #Sync"
  827. : /* no outputs */
  828. : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
  829. spitfire_put_dtlb_data(i, 0x0UL);
  830. }
  831. /* Spitfire Errata #32 workaround */
  832. /* NOTE: Always runs on spitfire, so no
  833. * cheetah+ page size encodings.
  834. */
  835. __asm__ __volatile__("stxa %0, [%1] %2\n\t"
  836. "flush %%g6"
  837. : /* No outputs */
  838. : "r" (0),
  839. "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
  840. if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
  841. __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
  842. "membar #Sync"
  843. : /* no outputs */
  844. : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
  845. spitfire_put_itlb_data(i, 0x0UL);
  846. }
  847. }
  848. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  849. cheetah_flush_dtlb_all();
  850. cheetah_flush_itlb_all();
  851. }
  852. __asm__ __volatile__("wrpr %0, 0, %%pstate"
  853. : : "r" (pstate));
  854. }
  855. /* Caller does TLB context flushing on local CPU if necessary.
  856. * The caller also ensures that CTX_VALID(mm->context) is false.
  857. *
  858. * We must be careful about boundary cases so that we never
  859. * let the user have CTX 0 (nucleus) or we ever use a CTX
  860. * version of zero (and thus NO_CONTEXT would not be caught
  861. * by version mis-match tests in mmu_context.h).
  862. */
  863. void get_new_mmu_context(struct mm_struct *mm)
  864. {
  865. unsigned long ctx, new_ctx;
  866. unsigned long orig_pgsz_bits;
  867. spin_lock(&ctx_alloc_lock);
  868. orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
  869. ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
  870. new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
  871. if (new_ctx >= (1 << CTX_NR_BITS)) {
  872. new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
  873. if (new_ctx >= ctx) {
  874. int i;
  875. new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
  876. CTX_FIRST_VERSION;
  877. if (new_ctx == 1)
  878. new_ctx = CTX_FIRST_VERSION;
  879. /* Don't call memset, for 16 entries that's just
  880. * plain silly...
  881. */
  882. mmu_context_bmap[0] = 3;
  883. mmu_context_bmap[1] = 0;
  884. mmu_context_bmap[2] = 0;
  885. mmu_context_bmap[3] = 0;
  886. for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
  887. mmu_context_bmap[i + 0] = 0;
  888. mmu_context_bmap[i + 1] = 0;
  889. mmu_context_bmap[i + 2] = 0;
  890. mmu_context_bmap[i + 3] = 0;
  891. }
  892. goto out;
  893. }
  894. }
  895. mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
  896. new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
  897. out:
  898. tlb_context_cache = new_ctx;
  899. mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
  900. spin_unlock(&ctx_alloc_lock);
  901. }
  902. #ifndef CONFIG_SMP
  903. struct pgtable_cache_struct pgt_quicklists;
  904. #endif
  905. /* OK, we have to color these pages. The page tables are accessed
  906. * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
  907. * code, as well as by PAGE_OFFSET range direct-mapped addresses by
  908. * other parts of the kernel. By coloring, we make sure that the tlbmiss
  909. * fast handlers do not get data from old/garbage dcache lines that
  910. * correspond to an old/stale virtual address (user/kernel) that
  911. * previously mapped the pagetable page while accessing vpte range
  912. * addresses. The idea is that if the vpte color and PAGE_OFFSET range
  913. * color is the same, then when the kernel initializes the pagetable
  914. * using the later address range, accesses with the first address
  915. * range will see the newly initialized data rather than the garbage.
  916. */
  917. #ifdef DCACHE_ALIASING_POSSIBLE
  918. #define DC_ALIAS_SHIFT 1
  919. #else
  920. #define DC_ALIAS_SHIFT 0
  921. #endif
  922. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  923. {
  924. struct page *page;
  925. unsigned long color;
  926. {
  927. pte_t *ptep = pte_alloc_one_fast(mm, address);
  928. if (ptep)
  929. return ptep;
  930. }
  931. color = VPTE_COLOR(address);
  932. page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
  933. if (page) {
  934. unsigned long *to_free;
  935. unsigned long paddr;
  936. pte_t *pte;
  937. #ifdef DCACHE_ALIASING_POSSIBLE
  938. set_page_count(page, 1);
  939. ClearPageCompound(page);
  940. set_page_count((page + 1), 1);
  941. ClearPageCompound(page + 1);
  942. #endif
  943. paddr = (unsigned long) page_address(page);
  944. memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
  945. if (!color) {
  946. pte = (pte_t *) paddr;
  947. to_free = (unsigned long *) (paddr + PAGE_SIZE);
  948. } else {
  949. pte = (pte_t *) (paddr + PAGE_SIZE);
  950. to_free = (unsigned long *) paddr;
  951. }
  952. #ifdef DCACHE_ALIASING_POSSIBLE
  953. /* Now free the other one up, adjust cache size. */
  954. preempt_disable();
  955. *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
  956. pte_quicklist[color ^ 0x1] = to_free;
  957. pgtable_cache_size++;
  958. preempt_enable();
  959. #endif
  960. return pte;
  961. }
  962. return NULL;
  963. }
  964. void sparc_ultra_dump_itlb(void)
  965. {
  966. int slot;
  967. if (tlb_type == spitfire) {
  968. printk ("Contents of itlb: ");
  969. for (slot = 0; slot < 14; slot++) printk (" ");
  970. printk ("%2x:%016lx,%016lx\n",
  971. 0,
  972. spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
  973. for (slot = 1; slot < 64; slot+=3) {
  974. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  975. slot,
  976. spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
  977. slot+1,
  978. spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
  979. slot+2,
  980. spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
  981. }
  982. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  983. printk ("Contents of itlb0:\n");
  984. for (slot = 0; slot < 16; slot+=2) {
  985. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  986. slot,
  987. cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
  988. slot+1,
  989. cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
  990. }
  991. printk ("Contents of itlb2:\n");
  992. for (slot = 0; slot < 128; slot+=2) {
  993. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  994. slot,
  995. cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
  996. slot+1,
  997. cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
  998. }
  999. }
  1000. }
  1001. void sparc_ultra_dump_dtlb(void)
  1002. {
  1003. int slot;
  1004. if (tlb_type == spitfire) {
  1005. printk ("Contents of dtlb: ");
  1006. for (slot = 0; slot < 14; slot++) printk (" ");
  1007. printk ("%2x:%016lx,%016lx\n", 0,
  1008. spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
  1009. for (slot = 1; slot < 64; slot+=3) {
  1010. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1011. slot,
  1012. spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
  1013. slot+1,
  1014. spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
  1015. slot+2,
  1016. spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
  1017. }
  1018. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  1019. printk ("Contents of dtlb0:\n");
  1020. for (slot = 0; slot < 16; slot+=2) {
  1021. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1022. slot,
  1023. cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
  1024. slot+1,
  1025. cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
  1026. }
  1027. printk ("Contents of dtlb2:\n");
  1028. for (slot = 0; slot < 512; slot+=2) {
  1029. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1030. slot,
  1031. cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
  1032. slot+1,
  1033. cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
  1034. }
  1035. if (tlb_type == cheetah_plus) {
  1036. printk ("Contents of dtlb3:\n");
  1037. for (slot = 0; slot < 512; slot+=2) {
  1038. printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
  1039. slot,
  1040. cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
  1041. slot+1,
  1042. cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
  1043. }
  1044. }
  1045. }
  1046. }
  1047. extern unsigned long cmdline_memory_size;
  1048. unsigned long __init bootmem_init(unsigned long *pages_avail)
  1049. {
  1050. unsigned long bootmap_size, start_pfn, end_pfn;
  1051. unsigned long end_of_phys_memory = 0UL;
  1052. unsigned long bootmap_pfn, bytes_avail, size;
  1053. int i;
  1054. #ifdef CONFIG_DEBUG_BOOTMEM
  1055. prom_printf("bootmem_init: Scan sp_banks, ");
  1056. #endif
  1057. bytes_avail = 0UL;
  1058. for (i = 0; sp_banks[i].num_bytes != 0; i++) {
  1059. end_of_phys_memory = sp_banks[i].base_addr +
  1060. sp_banks[i].num_bytes;
  1061. bytes_avail += sp_banks[i].num_bytes;
  1062. if (cmdline_memory_size) {
  1063. if (bytes_avail > cmdline_memory_size) {
  1064. unsigned long slack = bytes_avail - cmdline_memory_size;
  1065. bytes_avail -= slack;
  1066. end_of_phys_memory -= slack;
  1067. sp_banks[i].num_bytes -= slack;
  1068. if (sp_banks[i].num_bytes == 0) {
  1069. sp_banks[i].base_addr = 0xdeadbeef;
  1070. } else {
  1071. sp_banks[i+1].num_bytes = 0;
  1072. sp_banks[i+1].base_addr = 0xdeadbeef;
  1073. }
  1074. break;
  1075. }
  1076. }
  1077. }
  1078. *pages_avail = bytes_avail >> PAGE_SHIFT;
  1079. /* Start with page aligned address of last symbol in kernel
  1080. * image. The kernel is hard mapped below PAGE_OFFSET in a
  1081. * 4MB locked TLB translation.
  1082. */
  1083. start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
  1084. bootmap_pfn = start_pfn;
  1085. end_pfn = end_of_phys_memory >> PAGE_SHIFT;
  1086. #ifdef CONFIG_BLK_DEV_INITRD
  1087. /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
  1088. if (sparc_ramdisk_image || sparc_ramdisk_image64) {
  1089. unsigned long ramdisk_image = sparc_ramdisk_image ?
  1090. sparc_ramdisk_image : sparc_ramdisk_image64;
  1091. if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
  1092. ramdisk_image -= KERNBASE;
  1093. initrd_start = ramdisk_image + phys_base;
  1094. initrd_end = initrd_start + sparc_ramdisk_size;
  1095. if (initrd_end > end_of_phys_memory) {
  1096. printk(KERN_CRIT "initrd extends beyond end of memory "
  1097. "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
  1098. initrd_end, end_of_phys_memory);
  1099. initrd_start = 0;
  1100. }
  1101. if (initrd_start) {
  1102. if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
  1103. initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
  1104. bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
  1105. }
  1106. }
  1107. #endif
  1108. /* Initialize the boot-time allocator. */
  1109. max_pfn = max_low_pfn = end_pfn;
  1110. min_low_pfn = pfn_base;
  1111. #ifdef CONFIG_DEBUG_BOOTMEM
  1112. prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
  1113. min_low_pfn, bootmap_pfn, max_low_pfn);
  1114. #endif
  1115. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
  1116. /* Now register the available physical memory with the
  1117. * allocator.
  1118. */
  1119. for (i = 0; sp_banks[i].num_bytes != 0; i++) {
  1120. #ifdef CONFIG_DEBUG_BOOTMEM
  1121. prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
  1122. i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
  1123. #endif
  1124. free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
  1125. }
  1126. #ifdef CONFIG_BLK_DEV_INITRD
  1127. if (initrd_start) {
  1128. size = initrd_end - initrd_start;
  1129. /* Resert the initrd image area. */
  1130. #ifdef CONFIG_DEBUG_BOOTMEM
  1131. prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
  1132. initrd_start, initrd_end);
  1133. #endif
  1134. reserve_bootmem(initrd_start, size);
  1135. *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
  1136. initrd_start += PAGE_OFFSET;
  1137. initrd_end += PAGE_OFFSET;
  1138. }
  1139. #endif
  1140. /* Reserve the kernel text/data/bss. */
  1141. #ifdef CONFIG_DEBUG_BOOTMEM
  1142. prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
  1143. #endif
  1144. reserve_bootmem(kern_base, kern_size);
  1145. *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
  1146. /* Reserve the bootmem map. We do not account for it
  1147. * in pages_avail because we will release that memory
  1148. * in free_all_bootmem.
  1149. */
  1150. size = bootmap_size;
  1151. #ifdef CONFIG_DEBUG_BOOTMEM
  1152. prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
  1153. (bootmap_pfn << PAGE_SHIFT), size);
  1154. #endif
  1155. reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
  1156. *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
  1157. return end_pfn;
  1158. }
  1159. #ifdef CONFIG_DEBUG_PAGEALLOC
  1160. static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
  1161. {
  1162. unsigned long vstart = PAGE_OFFSET + pstart;
  1163. unsigned long vend = PAGE_OFFSET + pend;
  1164. unsigned long alloc_bytes = 0UL;
  1165. if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
  1166. prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
  1167. vstart, vend);
  1168. prom_halt();
  1169. }
  1170. while (vstart < vend) {
  1171. unsigned long this_end, paddr = __pa(vstart);
  1172. pgd_t *pgd = pgd_offset_k(vstart);
  1173. pud_t *pud;
  1174. pmd_t *pmd;
  1175. pte_t *pte;
  1176. pud = pud_offset(pgd, vstart);
  1177. if (pud_none(*pud)) {
  1178. pmd_t *new;
  1179. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  1180. alloc_bytes += PAGE_SIZE;
  1181. pud_populate(&init_mm, pud, new);
  1182. }
  1183. pmd = pmd_offset(pud, vstart);
  1184. if (!pmd_present(*pmd)) {
  1185. pte_t *new;
  1186. new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
  1187. alloc_bytes += PAGE_SIZE;
  1188. pmd_populate_kernel(&init_mm, pmd, new);
  1189. }
  1190. pte = pte_offset_kernel(pmd, vstart);
  1191. this_end = (vstart + PMD_SIZE) & PMD_MASK;
  1192. if (this_end > vend)
  1193. this_end = vend;
  1194. while (vstart < this_end) {
  1195. pte_val(*pte) = (paddr | pgprot_val(prot));
  1196. vstart += PAGE_SIZE;
  1197. paddr += PAGE_SIZE;
  1198. pte++;
  1199. }
  1200. }
  1201. return alloc_bytes;
  1202. }
  1203. extern struct linux_mlist_p1275 *prom_ptot_ptr;
  1204. extern unsigned int kvmap_linear_patch[1];
  1205. static void __init kernel_physical_mapping_init(void)
  1206. {
  1207. struct linux_mlist_p1275 *p = prom_ptot_ptr;
  1208. unsigned long mem_alloced = 0UL;
  1209. while (p) {
  1210. unsigned long phys_start, phys_end;
  1211. phys_start = p->start_adr;
  1212. phys_end = phys_start + p->num_bytes;
  1213. mem_alloced += kernel_map_range(phys_start, phys_end,
  1214. PAGE_KERNEL);
  1215. p = p->theres_more;
  1216. }
  1217. printk("Allocated %ld bytes for kernel page tables.\n",
  1218. mem_alloced);
  1219. kvmap_linear_patch[0] = 0x01000000; /* nop */
  1220. flushi(&kvmap_linear_patch[0]);
  1221. __flush_tlb_all();
  1222. }
  1223. void kernel_map_pages(struct page *page, int numpages, int enable)
  1224. {
  1225. unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
  1226. unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
  1227. kernel_map_range(phys_start, phys_end,
  1228. (enable ? PAGE_KERNEL : __pgprot(0)));
  1229. /* we should perform an IPI and flush all tlbs,
  1230. * but that can deadlock->flush only current cpu.
  1231. */
  1232. __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
  1233. PAGE_OFFSET + phys_end);
  1234. }
  1235. #endif
  1236. unsigned long __init find_ecache_flush_span(unsigned long size)
  1237. {
  1238. unsigned long i;
  1239. for (i = 0; ; i++) {
  1240. if (sp_banks[i].num_bytes == 0)
  1241. break;
  1242. if (sp_banks[i].num_bytes >= size)
  1243. return sp_banks[i].base_addr;
  1244. }
  1245. return ~0UL;
  1246. }
  1247. static void __init prom_probe_memory(void)
  1248. {
  1249. struct linux_mlist_p1275 *mlist;
  1250. unsigned long bytes, base_paddr, tally;
  1251. int i;
  1252. i = 0;
  1253. mlist = *prom_meminfo()->p1275_available;
  1254. bytes = tally = mlist->num_bytes;
  1255. base_paddr = mlist->start_adr;
  1256. sp_banks[0].base_addr = base_paddr;
  1257. sp_banks[0].num_bytes = bytes;
  1258. while (mlist->theres_more != (void *) 0) {
  1259. i++;
  1260. mlist = mlist->theres_more;
  1261. bytes = mlist->num_bytes;
  1262. tally += bytes;
  1263. if (i >= SPARC_PHYS_BANKS-1) {
  1264. printk ("The machine has more banks than "
  1265. "this kernel can support\n"
  1266. "Increase the SPARC_PHYS_BANKS "
  1267. "setting (currently %d)\n",
  1268. SPARC_PHYS_BANKS);
  1269. i = SPARC_PHYS_BANKS-1;
  1270. break;
  1271. }
  1272. sp_banks[i].base_addr = mlist->start_adr;
  1273. sp_banks[i].num_bytes = mlist->num_bytes;
  1274. }
  1275. i++;
  1276. sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
  1277. sp_banks[i].num_bytes = 0;
  1278. /* Now mask all bank sizes on a page boundary, it is all we can
  1279. * use anyways.
  1280. */
  1281. for (i = 0; sp_banks[i].num_bytes != 0; i++)
  1282. sp_banks[i].num_bytes &= PAGE_MASK;
  1283. }
  1284. /* paging_init() sets up the page tables */
  1285. extern void cheetah_ecache_flush_init(void);
  1286. static unsigned long last_valid_pfn;
  1287. pgd_t swapper_pg_dir[2048];
  1288. void __init paging_init(void)
  1289. {
  1290. unsigned long end_pfn, pages_avail, shift;
  1291. unsigned long real_end, i;
  1292. prom_probe_memory();
  1293. phys_base = 0xffffffffffffffffUL;
  1294. for (i = 0; sp_banks[i].num_bytes != 0; i++) {
  1295. unsigned long top;
  1296. if (sp_banks[i].base_addr < phys_base)
  1297. phys_base = sp_banks[i].base_addr;
  1298. top = sp_banks[i].base_addr +
  1299. sp_banks[i].num_bytes;
  1300. }
  1301. pfn_base = phys_base >> PAGE_SHIFT;
  1302. kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
  1303. kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
  1304. set_bit(0, mmu_context_bmap);
  1305. shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
  1306. real_end = (unsigned long)_end;
  1307. if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
  1308. bigkernel = 1;
  1309. if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
  1310. prom_printf("paging_init: Kernel > 8MB, too large.\n");
  1311. prom_halt();
  1312. }
  1313. /* Set kernel pgd to upper alias so physical page computations
  1314. * work.
  1315. */
  1316. init_mm.pgd += ((shift) / (sizeof(pgd_t)));
  1317. memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
  1318. /* Now can init the kernel/bad page tables. */
  1319. pud_set(pud_offset(&swapper_pg_dir[0], 0),
  1320. swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
  1321. swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
  1322. /* Inherit non-locked OBP mappings. */
  1323. inherit_prom_mappings();
  1324. /* Ok, we can use our TLB miss and window trap handlers safely.
  1325. * We need to do a quick peek here to see if we are on StarFire
  1326. * or not, so setup_tba can setup the IRQ globals correctly (it
  1327. * needs to get the hard smp processor id correctly).
  1328. */
  1329. {
  1330. extern void setup_tba(int);
  1331. setup_tba(this_is_starfire);
  1332. }
  1333. inherit_locked_prom_mappings(1);
  1334. __flush_tlb_all();
  1335. /* Setup bootmem... */
  1336. pages_avail = 0;
  1337. last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
  1338. #ifdef CONFIG_DEBUG_PAGEALLOC
  1339. kernel_physical_mapping_init();
  1340. #endif
  1341. {
  1342. unsigned long zones_size[MAX_NR_ZONES];
  1343. unsigned long zholes_size[MAX_NR_ZONES];
  1344. unsigned long npages;
  1345. int znum;
  1346. for (znum = 0; znum < MAX_NR_ZONES; znum++)
  1347. zones_size[znum] = zholes_size[znum] = 0;
  1348. npages = end_pfn - pfn_base;
  1349. zones_size[ZONE_DMA] = npages;
  1350. zholes_size[ZONE_DMA] = npages - pages_avail;
  1351. free_area_init_node(0, &contig_page_data, zones_size,
  1352. phys_base >> PAGE_SHIFT, zholes_size);
  1353. }
  1354. device_scan();
  1355. }
  1356. /* Ok, it seems that the prom can allocate some more memory chunks
  1357. * as a side effect of some prom calls we perform during the
  1358. * boot sequence. My most likely theory is that it is from the
  1359. * prom_set_traptable() call, and OBP is allocating a scratchpad
  1360. * for saving client program register state etc.
  1361. */
  1362. static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
  1363. {
  1364. int swapi = 0;
  1365. int i, mitr;
  1366. unsigned long tmpaddr, tmpsize;
  1367. unsigned long lowest;
  1368. for (i = 0; thislist[i].theres_more != 0; i++) {
  1369. lowest = thislist[i].start_adr;
  1370. for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
  1371. if (thislist[mitr].start_adr < lowest) {
  1372. lowest = thislist[mitr].start_adr;
  1373. swapi = mitr;
  1374. }
  1375. if (lowest == thislist[i].start_adr)
  1376. continue;
  1377. tmpaddr = thislist[swapi].start_adr;
  1378. tmpsize = thislist[swapi].num_bytes;
  1379. for (mitr = swapi; mitr > i; mitr--) {
  1380. thislist[mitr].start_adr = thislist[mitr-1].start_adr;
  1381. thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
  1382. }
  1383. thislist[i].start_adr = tmpaddr;
  1384. thislist[i].num_bytes = tmpsize;
  1385. }
  1386. }
  1387. void __init rescan_sp_banks(void)
  1388. {
  1389. struct linux_prom64_registers memlist[64];
  1390. struct linux_mlist_p1275 avail[64], *mlist;
  1391. unsigned long bytes, base_paddr;
  1392. int num_regs, node = prom_finddevice("/memory");
  1393. int i;
  1394. num_regs = prom_getproperty(node, "available",
  1395. (char *) memlist, sizeof(memlist));
  1396. num_regs = (num_regs / sizeof(struct linux_prom64_registers));
  1397. for (i = 0; i < num_regs; i++) {
  1398. avail[i].start_adr = memlist[i].phys_addr;
  1399. avail[i].num_bytes = memlist[i].reg_size;
  1400. avail[i].theres_more = &avail[i + 1];
  1401. }
  1402. avail[i - 1].theres_more = NULL;
  1403. sort_memlist(avail);
  1404. mlist = &avail[0];
  1405. i = 0;
  1406. bytes = mlist->num_bytes;
  1407. base_paddr = mlist->start_adr;
  1408. sp_banks[0].base_addr = base_paddr;
  1409. sp_banks[0].num_bytes = bytes;
  1410. while (mlist->theres_more != NULL){
  1411. i++;
  1412. mlist = mlist->theres_more;
  1413. bytes = mlist->num_bytes;
  1414. if (i >= SPARC_PHYS_BANKS-1) {
  1415. printk ("The machine has more banks than "
  1416. "this kernel can support\n"
  1417. "Increase the SPARC_PHYS_BANKS "
  1418. "setting (currently %d)\n",
  1419. SPARC_PHYS_BANKS);
  1420. i = SPARC_PHYS_BANKS-1;
  1421. break;
  1422. }
  1423. sp_banks[i].base_addr = mlist->start_adr;
  1424. sp_banks[i].num_bytes = mlist->num_bytes;
  1425. }
  1426. i++;
  1427. sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
  1428. sp_banks[i].num_bytes = 0;
  1429. for (i = 0; sp_banks[i].num_bytes != 0; i++)
  1430. sp_banks[i].num_bytes &= PAGE_MASK;
  1431. }
  1432. static void __init taint_real_pages(void)
  1433. {
  1434. struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
  1435. int i;
  1436. for (i = 0; i < SPARC_PHYS_BANKS; i++) {
  1437. saved_sp_banks[i].base_addr =
  1438. sp_banks[i].base_addr;
  1439. saved_sp_banks[i].num_bytes =
  1440. sp_banks[i].num_bytes;
  1441. }
  1442. rescan_sp_banks();
  1443. /* Find changes discovered in the sp_bank rescan and
  1444. * reserve the lost portions in the bootmem maps.
  1445. */
  1446. for (i = 0; saved_sp_banks[i].num_bytes; i++) {
  1447. unsigned long old_start, old_end;
  1448. old_start = saved_sp_banks[i].base_addr;
  1449. old_end = old_start +
  1450. saved_sp_banks[i].num_bytes;
  1451. while (old_start < old_end) {
  1452. int n;
  1453. for (n = 0; sp_banks[n].num_bytes; n++) {
  1454. unsigned long new_start, new_end;
  1455. new_start = sp_banks[n].base_addr;
  1456. new_end = new_start + sp_banks[n].num_bytes;
  1457. if (new_start <= old_start &&
  1458. new_end >= (old_start + PAGE_SIZE)) {
  1459. set_bit (old_start >> 22,
  1460. sparc64_valid_addr_bitmap);
  1461. goto do_next_page;
  1462. }
  1463. }
  1464. reserve_bootmem(old_start, PAGE_SIZE);
  1465. do_next_page:
  1466. old_start += PAGE_SIZE;
  1467. }
  1468. }
  1469. }
  1470. void __init mem_init(void)
  1471. {
  1472. unsigned long codepages, datapages, initpages;
  1473. unsigned long addr, last;
  1474. int i;
  1475. i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
  1476. i += 1;
  1477. sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
  1478. if (sparc64_valid_addr_bitmap == NULL) {
  1479. prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
  1480. prom_halt();
  1481. }
  1482. memset(sparc64_valid_addr_bitmap, 0, i << 3);
  1483. addr = PAGE_OFFSET + kern_base;
  1484. last = PAGE_ALIGN(kern_size) + addr;
  1485. while (addr < last) {
  1486. set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
  1487. addr += PAGE_SIZE;
  1488. }
  1489. taint_real_pages();
  1490. max_mapnr = last_valid_pfn - pfn_base;
  1491. high_memory = __va(last_valid_pfn << PAGE_SHIFT);
  1492. #ifdef CONFIG_DEBUG_BOOTMEM
  1493. prom_printf("mem_init: Calling free_all_bootmem().\n");
  1494. #endif
  1495. totalram_pages = num_physpages = free_all_bootmem() - 1;
  1496. /*
  1497. * Set up the zero page, mark it reserved, so that page count
  1498. * is not manipulated when freeing the page from user ptes.
  1499. */
  1500. mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
  1501. if (mem_map_zero == NULL) {
  1502. prom_printf("paging_init: Cannot alloc zero page.\n");
  1503. prom_halt();
  1504. }
  1505. SetPageReserved(mem_map_zero);
  1506. codepages = (((unsigned long) _etext) - ((unsigned long) _start));
  1507. codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
  1508. datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
  1509. datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
  1510. initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
  1511. initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
  1512. printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
  1513. nr_free_pages() << (PAGE_SHIFT-10),
  1514. codepages << (PAGE_SHIFT-10),
  1515. datapages << (PAGE_SHIFT-10),
  1516. initpages << (PAGE_SHIFT-10),
  1517. PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
  1518. if (tlb_type == cheetah || tlb_type == cheetah_plus)
  1519. cheetah_ecache_flush_init();
  1520. }
  1521. void free_initmem(void)
  1522. {
  1523. unsigned long addr, initend;
  1524. /*
  1525. * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
  1526. */
  1527. addr = PAGE_ALIGN((unsigned long)(__init_begin));
  1528. initend = (unsigned long)(__init_end) & PAGE_MASK;
  1529. for (; addr < initend; addr += PAGE_SIZE) {
  1530. unsigned long page;
  1531. struct page *p;
  1532. page = (addr +
  1533. ((unsigned long) __va(kern_base)) -
  1534. ((unsigned long) KERNBASE));
  1535. memset((void *)addr, 0xcc, PAGE_SIZE);
  1536. p = virt_to_page(page);
  1537. ClearPageReserved(p);
  1538. set_page_count(p, 1);
  1539. __free_page(p);
  1540. num_physpages++;
  1541. totalram_pages++;
  1542. }
  1543. }
  1544. #ifdef CONFIG_BLK_DEV_INITRD
  1545. void free_initrd_mem(unsigned long start, unsigned long end)
  1546. {
  1547. if (start < end)
  1548. printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  1549. for (; start < end; start += PAGE_SIZE) {
  1550. struct page *p = virt_to_page(start);
  1551. ClearPageReserved(p);
  1552. set_page_count(p, 1);
  1553. __free_page(p);
  1554. num_physpages++;
  1555. totalram_pages++;
  1556. }
  1557. }
  1558. #endif