mem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7. * Copyright (C) 1996 Paul Mackerras
  8. * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  9. * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  10. *
  11. * Derived from "arch/i386/mm/init.c"
  12. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. *
  19. */
  20. #include <linux/config.h>
  21. #include <linux/module.h>
  22. #include <linux/sched.h>
  23. #include <linux/kernel.h>
  24. #include <linux/errno.h>
  25. #include <linux/string.h>
  26. #include <linux/types.h>
  27. #include <linux/mm.h>
  28. #include <linux/stddef.h>
  29. #include <linux/init.h>
  30. #include <linux/bootmem.h>
  31. #include <linux/highmem.h>
  32. #include <linux/initrd.h>
  33. #include <linux/pagemap.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/prom.h>
  36. #include <asm/io.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/mmu.h>
  40. #include <asm/smp.h>
  41. #include <asm/machdep.h>
  42. #include <asm/btext.h>
  43. #include <asm/tlb.h>
  44. #include <asm/bootinfo.h>
  45. #include <asm/prom.h>
  46. #include <asm/lmb.h>
  47. #include <asm/sections.h>
  48. #include "mmu_decl.h"
  49. #ifndef CPU_FTR_COHERENT_ICACHE
  50. #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
  51. #define CPU_FTR_NOEXECUTE 0
  52. #endif
  53. int init_bootmem_done;
  54. int mem_init_done;
  55. /*
  56. * This is called by /dev/mem to know if a given address has to
  57. * be mapped non-cacheable or not
  58. */
  59. int page_is_ram(unsigned long pfn)
  60. {
  61. unsigned long paddr = (pfn << PAGE_SHIFT);
  62. #ifndef CONFIG_PPC64 /* XXX for now */
  63. return paddr < __pa(high_memory);
  64. #else
  65. int i;
  66. for (i=0; i < lmb.memory.cnt; i++) {
  67. unsigned long base;
  68. base = lmb.memory.region[i].base;
  69. if ((paddr >= base) &&
  70. (paddr < (base + lmb.memory.region[i].size))) {
  71. return 1;
  72. }
  73. }
  74. return 0;
  75. #endif
  76. }
  77. EXPORT_SYMBOL(page_is_ram);
  78. pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
  79. unsigned long size, pgprot_t vma_prot)
  80. {
  81. if (ppc_md.phys_mem_access_prot)
  82. return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
  83. if (!page_is_ram(addr >> PAGE_SHIFT))
  84. vma_prot = __pgprot(pgprot_val(vma_prot)
  85. | _PAGE_GUARDED | _PAGE_NO_CACHE);
  86. return vma_prot;
  87. }
  88. EXPORT_SYMBOL(phys_mem_access_prot);
  89. void show_mem(void)
  90. {
  91. unsigned long total = 0, reserved = 0;
  92. unsigned long shared = 0, cached = 0;
  93. unsigned long highmem = 0;
  94. struct page *page;
  95. pg_data_t *pgdat;
  96. unsigned long i;
  97. printk("Mem-info:\n");
  98. show_free_areas();
  99. printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
  100. for_each_pgdat(pgdat) {
  101. for (i = 0; i < pgdat->node_spanned_pages; i++) {
  102. page = pgdat_page_nr(pgdat, i);
  103. total++;
  104. if (PageHighMem(page))
  105. highmem++;
  106. if (PageReserved(page))
  107. reserved++;
  108. else if (PageSwapCache(page))
  109. cached++;
  110. else if (page_count(page))
  111. shared += page_count(page) - 1;
  112. }
  113. }
  114. printk("%ld pages of RAM\n", total);
  115. #ifdef CONFIG_HIGHMEM
  116. printk("%ld pages of HIGHMEM\n", highmem);
  117. #endif
  118. printk("%ld reserved pages\n", reserved);
  119. printk("%ld pages shared\n", shared);
  120. printk("%ld pages swap cached\n", cached);
  121. }
  122. /*
  123. * Initialize the bootmem system and give it all the memory we
  124. * have available. If we are using highmem, we only put the
  125. * lowmem into the bootmem system.
  126. */
  127. #ifndef CONFIG_NEED_MULTIPLE_NODES
  128. void __init do_init_bootmem(void)
  129. {
  130. unsigned long i;
  131. unsigned long start, bootmap_pages;
  132. unsigned long total_pages;
  133. int boot_mapsize;
  134. max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
  135. #ifdef CONFIG_HIGHMEM
  136. total_pages = total_lowmem >> PAGE_SHIFT;
  137. #endif
  138. /*
  139. * Find an area to use for the bootmem bitmap. Calculate the size of
  140. * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
  141. * Add 1 additional page in case the address isn't page-aligned.
  142. */
  143. bootmap_pages = bootmem_bootmap_pages(total_pages);
  144. start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
  145. BUG_ON(!start);
  146. boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
  147. /* Add all physical memory to the bootmem map, mark each area
  148. * present.
  149. */
  150. for (i = 0; i < lmb.memory.cnt; i++) {
  151. unsigned long base = lmb.memory.region[i].base;
  152. unsigned long size = lmb_size_bytes(&lmb.memory, i);
  153. #ifdef CONFIG_HIGHMEM
  154. if (base >= total_lowmem)
  155. continue;
  156. if (base + size > total_lowmem)
  157. size = total_lowmem - base;
  158. #endif
  159. free_bootmem(base, size);
  160. }
  161. /* reserve the sections we're already using */
  162. for (i = 0; i < lmb.reserved.cnt; i++)
  163. reserve_bootmem(lmb.reserved.region[i].base,
  164. lmb_size_bytes(&lmb.reserved, i));
  165. /* XXX need to clip this if using highmem? */
  166. for (i = 0; i < lmb.memory.cnt; i++)
  167. memory_present(0, lmb_start_pfn(&lmb.memory, i),
  168. lmb_end_pfn(&lmb.memory, i));
  169. init_bootmem_done = 1;
  170. }
  171. /*
  172. * paging_init() sets up the page tables - in fact we've already done this.
  173. */
  174. void __init paging_init(void)
  175. {
  176. unsigned long zones_size[MAX_NR_ZONES];
  177. unsigned long zholes_size[MAX_NR_ZONES];
  178. unsigned long total_ram = lmb_phys_mem_size();
  179. unsigned long top_of_ram = lmb_end_of_DRAM();
  180. #ifdef CONFIG_HIGHMEM
  181. map_page(PKMAP_BASE, 0, 0); /* XXX gross */
  182. pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
  183. (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
  184. map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
  185. kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
  186. (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
  187. kmap_prot = PAGE_KERNEL;
  188. #endif /* CONFIG_HIGHMEM */
  189. printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
  190. top_of_ram, total_ram);
  191. printk(KERN_INFO "Memory hole size: %ldMB\n",
  192. (top_of_ram - total_ram) >> 20);
  193. /*
  194. * All pages are DMA-able so we put them all in the DMA zone.
  195. */
  196. memset(zones_size, 0, sizeof(zones_size));
  197. memset(zholes_size, 0, sizeof(zholes_size));
  198. zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
  199. zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
  200. #ifdef CONFIG_HIGHMEM
  201. zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
  202. zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
  203. zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
  204. #else
  205. zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
  206. zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
  207. #endif /* CONFIG_HIGHMEM */
  208. free_area_init_node(0, NODE_DATA(0), zones_size,
  209. __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
  210. }
  211. #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
  212. void __init mem_init(void)
  213. {
  214. #ifdef CONFIG_NEED_MULTIPLE_NODES
  215. int nid;
  216. #endif
  217. pg_data_t *pgdat;
  218. unsigned long i;
  219. struct page *page;
  220. unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
  221. num_physpages = max_pfn; /* RAM is assumed contiguous */
  222. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
  223. #ifdef CONFIG_NEED_MULTIPLE_NODES
  224. for_each_online_node(nid) {
  225. if (NODE_DATA(nid)->node_spanned_pages != 0) {
  226. printk("freeing bootmem node %x\n", nid);
  227. totalram_pages +=
  228. free_all_bootmem_node(NODE_DATA(nid));
  229. }
  230. }
  231. #else
  232. max_mapnr = num_physpages;
  233. totalram_pages += free_all_bootmem();
  234. #endif
  235. for_each_pgdat(pgdat) {
  236. for (i = 0; i < pgdat->node_spanned_pages; i++) {
  237. page = pgdat_page_nr(pgdat, i);
  238. if (PageReserved(page))
  239. reservedpages++;
  240. }
  241. }
  242. codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
  243. datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
  244. initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
  245. bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
  246. #ifdef CONFIG_HIGHMEM
  247. {
  248. unsigned long pfn, highmem_mapnr;
  249. highmem_mapnr = total_lowmem >> PAGE_SHIFT;
  250. for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
  251. struct page *page = pfn_to_page(pfn);
  252. ClearPageReserved(page);
  253. set_page_count(page, 1);
  254. __free_page(page);
  255. totalhigh_pages++;
  256. }
  257. totalram_pages += totalhigh_pages;
  258. printk(KERN_INFO "High memory: %luk\n",
  259. totalhigh_pages << (PAGE_SHIFT-10));
  260. }
  261. #endif /* CONFIG_HIGHMEM */
  262. printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
  263. "%luk reserved, %luk data, %luk bss, %luk init)\n",
  264. (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
  265. num_physpages << (PAGE_SHIFT-10),
  266. codesize >> 10,
  267. reservedpages << (PAGE_SHIFT-10),
  268. datasize >> 10,
  269. bsssize >> 10,
  270. initsize >> 10);
  271. mem_init_done = 1;
  272. #ifdef CONFIG_PPC64
  273. /* Initialize the vDSO */
  274. vdso_init();
  275. #endif
  276. }
  277. /*
  278. * This is called when a page has been modified by the kernel.
  279. * It just marks the page as not i-cache clean. We do the i-cache
  280. * flush later when the page is given to a user process, if necessary.
  281. */
  282. void flush_dcache_page(struct page *page)
  283. {
  284. if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  285. return;
  286. /* avoid an atomic op if possible */
  287. if (test_bit(PG_arch_1, &page->flags))
  288. clear_bit(PG_arch_1, &page->flags);
  289. }
  290. EXPORT_SYMBOL(flush_dcache_page);
  291. void flush_dcache_icache_page(struct page *page)
  292. {
  293. #ifdef CONFIG_BOOKE
  294. void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
  295. __flush_dcache_icache(start);
  296. kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
  297. #elif defined(CONFIG_8xx)
  298. /* On 8xx there is no need to kmap since highmem is not supported */
  299. __flush_dcache_icache(page_address(page));
  300. #else
  301. __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
  302. #endif
  303. }
  304. void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
  305. {
  306. clear_page(page);
  307. if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  308. return;
  309. /*
  310. * We shouldnt have to do this, but some versions of glibc
  311. * require it (ld.so assumes zero filled pages are icache clean)
  312. * - Anton
  313. */
  314. /* avoid an atomic op if possible */
  315. if (test_bit(PG_arch_1, &pg->flags))
  316. clear_bit(PG_arch_1, &pg->flags);
  317. }
  318. EXPORT_SYMBOL(clear_user_page);
  319. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  320. struct page *pg)
  321. {
  322. copy_page(vto, vfrom);
  323. /*
  324. * We should be able to use the following optimisation, however
  325. * there are two problems.
  326. * Firstly a bug in some versions of binutils meant PLT sections
  327. * were not marked executable.
  328. * Secondly the first word in the GOT section is blrl, used
  329. * to establish the GOT address. Until recently the GOT was
  330. * not marked executable.
  331. * - Anton
  332. */
  333. #if 0
  334. if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
  335. return;
  336. #endif
  337. if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  338. return;
  339. /* avoid an atomic op if possible */
  340. if (test_bit(PG_arch_1, &pg->flags))
  341. clear_bit(PG_arch_1, &pg->flags);
  342. }
  343. void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  344. unsigned long addr, int len)
  345. {
  346. unsigned long maddr;
  347. maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
  348. flush_icache_range(maddr, maddr + len);
  349. kunmap(page);
  350. }
  351. EXPORT_SYMBOL(flush_icache_user_range);
  352. /*
  353. * This is called at the end of handling a user page fault, when the
  354. * fault has been handled by updating a PTE in the linux page tables.
  355. * We use it to preload an HPTE into the hash table corresponding to
  356. * the updated linux PTE.
  357. *
  358. * This must always be called with the mm->page_table_lock held
  359. */
  360. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  361. pte_t pte)
  362. {
  363. /* handle i-cache coherency */
  364. unsigned long pfn = pte_pfn(pte);
  365. #ifdef CONFIG_PPC32
  366. pmd_t *pmd;
  367. #else
  368. unsigned long vsid;
  369. void *pgdir;
  370. pte_t *ptep;
  371. int local = 0;
  372. cpumask_t tmp;
  373. unsigned long flags;
  374. #endif
  375. /* handle i-cache coherency */
  376. if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
  377. !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
  378. pfn_valid(pfn)) {
  379. struct page *page = pfn_to_page(pfn);
  380. if (!PageReserved(page)
  381. && !test_bit(PG_arch_1, &page->flags)) {
  382. if (vma->vm_mm == current->active_mm) {
  383. #ifdef CONFIG_8xx
  384. /* On 8xx, cache control instructions (particularly
  385. * "dcbst" from flush_dcache_icache) fault as write
  386. * operation if there is an unpopulated TLB entry
  387. * for the address in question. To workaround that,
  388. * we invalidate the TLB here, thus avoiding dcbst
  389. * misbehaviour.
  390. */
  391. _tlbie(address);
  392. #endif
  393. __flush_dcache_icache((void *) address);
  394. } else
  395. flush_dcache_icache_page(page);
  396. set_bit(PG_arch_1, &page->flags);
  397. }
  398. }
  399. #ifdef CONFIG_PPC_STD_MMU
  400. /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
  401. if (!pte_young(pte) || address >= TASK_SIZE)
  402. return;
  403. #ifdef CONFIG_PPC32
  404. if (Hash == 0)
  405. return;
  406. pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
  407. if (!pmd_none(*pmd))
  408. add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
  409. #else
  410. pgdir = vma->vm_mm->pgd;
  411. if (pgdir == NULL)
  412. return;
  413. ptep = find_linux_pte(pgdir, ea);
  414. if (!ptep)
  415. return;
  416. vsid = get_vsid(vma->vm_mm->context.id, ea);
  417. local_irq_save(flags);
  418. tmp = cpumask_of_cpu(smp_processor_id());
  419. if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
  420. local = 1;
  421. __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
  422. 0x300, local);
  423. local_irq_restore(flags);
  424. #endif
  425. #endif
  426. }