mem.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7. * Copyright (C) 1996 Paul Mackerras
  8. * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  9. *
  10. * Derived from "arch/i386/mm/init.c"
  11. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License
  15. * as published by the Free Software Foundation; either version
  16. * 2 of the License, or (at your option) any later version.
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/sched.h>
  21. #include <linux/kernel.h>
  22. #include <linux/errno.h>
  23. #include <linux/string.h>
  24. #include <linux/types.h>
  25. #include <linux/mm.h>
  26. #include <linux/stddef.h>
  27. #include <linux/init.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/highmem.h>
  30. #include <linux/initrd.h>
  31. #include <linux/pagemap.h>
  32. #include <linux/suspend.h>
  33. #include <linux/lmb.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/prom.h>
  36. #include <asm/io.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/mmu.h>
  40. #include <asm/smp.h>
  41. #include <asm/machdep.h>
  42. #include <asm/btext.h>
  43. #include <asm/tlb.h>
  44. #include <asm/sections.h>
  45. #include <asm/sparsemem.h>
  46. #include <asm/vdso.h>
  47. #include <asm/fixmap.h>
  48. #include "mmu_decl.h"
  49. #ifndef CPU_FTR_COHERENT_ICACHE
  50. #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
  51. #define CPU_FTR_NOEXECUTE 0
  52. #endif
  53. int init_bootmem_done;
  54. int mem_init_done;
  55. unsigned long memory_limit;
  56. #ifdef CONFIG_HIGHMEM
  57. pte_t *kmap_pte;
  58. pgprot_t kmap_prot;
  59. EXPORT_SYMBOL(kmap_prot);
  60. EXPORT_SYMBOL(kmap_pte);
  61. static inline pte_t *virt_to_kpte(unsigned long vaddr)
  62. {
  63. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  64. vaddr), vaddr), vaddr);
  65. }
  66. #endif
  67. int page_is_ram(unsigned long pfn)
  68. {
  69. unsigned long paddr = (pfn << PAGE_SHIFT);
  70. #ifndef CONFIG_PPC64 /* XXX for now */
  71. return paddr < __pa(high_memory);
  72. #else
  73. int i;
  74. for (i=0; i < lmb.memory.cnt; i++) {
  75. unsigned long base;
  76. base = lmb.memory.region[i].base;
  77. if ((paddr >= base) &&
  78. (paddr < (base + lmb.memory.region[i].size))) {
  79. return 1;
  80. }
  81. }
  82. return 0;
  83. #endif
  84. }
  85. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  86. unsigned long size, pgprot_t vma_prot)
  87. {
  88. if (ppc_md.phys_mem_access_prot)
  89. return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
  90. if (!page_is_ram(pfn))
  91. vma_prot = __pgprot(pgprot_val(vma_prot)
  92. | _PAGE_GUARDED | _PAGE_NO_CACHE);
  93. return vma_prot;
  94. }
  95. EXPORT_SYMBOL(phys_mem_access_prot);
  96. #ifdef CONFIG_MEMORY_HOTPLUG
  97. #ifdef CONFIG_NUMA
  98. int memory_add_physaddr_to_nid(u64 start)
  99. {
  100. return hot_add_scn_to_nid(start);
  101. }
  102. #endif
  103. int arch_add_memory(int nid, u64 start, u64 size)
  104. {
  105. struct pglist_data *pgdata;
  106. struct zone *zone;
  107. unsigned long start_pfn = start >> PAGE_SHIFT;
  108. unsigned long nr_pages = size >> PAGE_SHIFT;
  109. pgdata = NODE_DATA(nid);
  110. start = (unsigned long)__va(start);
  111. create_section_mapping(start, start + size);
  112. /* this should work for most non-highmem platforms */
  113. zone = pgdata->node_zones;
  114. return __add_pages(zone, start_pfn, nr_pages);
  115. }
  116. #ifdef CONFIG_MEMORY_HOTREMOVE
  117. int remove_memory(u64 start, u64 size)
  118. {
  119. unsigned long start_pfn, end_pfn;
  120. int ret;
  121. start_pfn = start >> PAGE_SHIFT;
  122. end_pfn = start_pfn + (size >> PAGE_SHIFT);
  123. ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
  124. if (ret)
  125. goto out;
  126. /* Arch-specific calls go here - next patch */
  127. out:
  128. return ret;
  129. }
  130. #endif /* CONFIG_MEMORY_HOTREMOVE */
  131. #endif /* CONFIG_MEMORY_HOTPLUG */
  132. /*
  133. * walk_memory_resource() needs to make sure there is no holes in a given
  134. * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
  135. * Instead it maintains it in lmb.memory structures. Walk through the
  136. * memory regions, find holes and callback for contiguous regions.
  137. */
  138. int
  139. walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
  140. int (*func)(unsigned long, unsigned long, void *))
  141. {
  142. struct lmb_property res;
  143. unsigned long pfn, len;
  144. u64 end;
  145. int ret = -1;
  146. res.base = (u64) start_pfn << PAGE_SHIFT;
  147. res.size = (u64) nr_pages << PAGE_SHIFT;
  148. end = res.base + res.size - 1;
  149. while ((res.base < end) && (lmb_find(&res) >= 0)) {
  150. pfn = (unsigned long)(res.base >> PAGE_SHIFT);
  151. len = (unsigned long)(res.size >> PAGE_SHIFT);
  152. ret = (*func)(pfn, len, arg);
  153. if (ret)
  154. break;
  155. res.base += (res.size + 1);
  156. res.size = (end - res.base + 1);
  157. }
  158. return ret;
  159. }
  160. EXPORT_SYMBOL_GPL(walk_memory_resource);
  161. void show_mem(void)
  162. {
  163. unsigned long total = 0, reserved = 0;
  164. unsigned long shared = 0, cached = 0;
  165. unsigned long highmem = 0;
  166. struct page *page;
  167. pg_data_t *pgdat;
  168. unsigned long i;
  169. printk("Mem-info:\n");
  170. show_free_areas();
  171. for_each_online_pgdat(pgdat) {
  172. unsigned long flags;
  173. pgdat_resize_lock(pgdat, &flags);
  174. for (i = 0; i < pgdat->node_spanned_pages; i++) {
  175. if (!pfn_valid(pgdat->node_start_pfn + i))
  176. continue;
  177. page = pgdat_page_nr(pgdat, i);
  178. total++;
  179. if (PageHighMem(page))
  180. highmem++;
  181. if (PageReserved(page))
  182. reserved++;
  183. else if (PageSwapCache(page))
  184. cached++;
  185. else if (page_count(page))
  186. shared += page_count(page) - 1;
  187. }
  188. pgdat_resize_unlock(pgdat, &flags);
  189. }
  190. printk("%ld pages of RAM\n", total);
  191. #ifdef CONFIG_HIGHMEM
  192. printk("%ld pages of HIGHMEM\n", highmem);
  193. #endif
  194. printk("%ld reserved pages\n", reserved);
  195. printk("%ld pages shared\n", shared);
  196. printk("%ld pages swap cached\n", cached);
  197. }
  198. /*
  199. * Initialize the bootmem system and give it all the memory we
  200. * have available. If we are using highmem, we only put the
  201. * lowmem into the bootmem system.
  202. */
  203. #ifndef CONFIG_NEED_MULTIPLE_NODES
  204. void __init do_init_bootmem(void)
  205. {
  206. unsigned long i;
  207. unsigned long start, bootmap_pages;
  208. unsigned long total_pages;
  209. int boot_mapsize;
  210. max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
  211. total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
  212. #ifdef CONFIG_HIGHMEM
  213. total_pages = total_lowmem >> PAGE_SHIFT;
  214. max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
  215. #endif
  216. /*
  217. * Find an area to use for the bootmem bitmap. Calculate the size of
  218. * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
  219. * Add 1 additional page in case the address isn't page-aligned.
  220. */
  221. bootmap_pages = bootmem_bootmap_pages(total_pages);
  222. start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
  223. min_low_pfn = MEMORY_START >> PAGE_SHIFT;
  224. boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
  225. /* Add active regions with valid PFNs */
  226. for (i = 0; i < lmb.memory.cnt; i++) {
  227. unsigned long start_pfn, end_pfn;
  228. start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
  229. end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
  230. add_active_range(0, start_pfn, end_pfn);
  231. }
  232. /* Add all physical memory to the bootmem map, mark each area
  233. * present.
  234. */
  235. #ifdef CONFIG_HIGHMEM
  236. free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
  237. /* reserve the sections we're already using */
  238. for (i = 0; i < lmb.reserved.cnt; i++) {
  239. unsigned long addr = lmb.reserved.region[i].base +
  240. lmb_size_bytes(&lmb.reserved, i) - 1;
  241. if (addr < lowmem_end_addr)
  242. reserve_bootmem(lmb.reserved.region[i].base,
  243. lmb_size_bytes(&lmb.reserved, i),
  244. BOOTMEM_DEFAULT);
  245. else if (lmb.reserved.region[i].base < lowmem_end_addr) {
  246. unsigned long adjusted_size = lowmem_end_addr -
  247. lmb.reserved.region[i].base;
  248. reserve_bootmem(lmb.reserved.region[i].base,
  249. adjusted_size, BOOTMEM_DEFAULT);
  250. }
  251. }
  252. #else
  253. free_bootmem_with_active_regions(0, max_pfn);
  254. /* reserve the sections we're already using */
  255. for (i = 0; i < lmb.reserved.cnt; i++)
  256. reserve_bootmem(lmb.reserved.region[i].base,
  257. lmb_size_bytes(&lmb.reserved, i),
  258. BOOTMEM_DEFAULT);
  259. #endif
  260. /* XXX need to clip this if using highmem? */
  261. sparse_memory_present_with_active_regions(0);
  262. init_bootmem_done = 1;
  263. }
  264. /* mark pages that don't exist as nosave */
  265. static int __init mark_nonram_nosave(void)
  266. {
  267. unsigned long lmb_next_region_start_pfn,
  268. lmb_region_max_pfn;
  269. int i;
  270. for (i = 0; i < lmb.memory.cnt - 1; i++) {
  271. lmb_region_max_pfn =
  272. (lmb.memory.region[i].base >> PAGE_SHIFT) +
  273. (lmb.memory.region[i].size >> PAGE_SHIFT);
  274. lmb_next_region_start_pfn =
  275. lmb.memory.region[i+1].base >> PAGE_SHIFT;
  276. if (lmb_region_max_pfn < lmb_next_region_start_pfn)
  277. register_nosave_region(lmb_region_max_pfn,
  278. lmb_next_region_start_pfn);
  279. }
  280. return 0;
  281. }
  282. /*
  283. * paging_init() sets up the page tables - in fact we've already done this.
  284. */
  285. void __init paging_init(void)
  286. {
  287. unsigned long total_ram = lmb_phys_mem_size();
  288. phys_addr_t top_of_ram = lmb_end_of_DRAM();
  289. unsigned long max_zone_pfns[MAX_NR_ZONES];
  290. #ifdef CONFIG_PPC32
  291. unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
  292. unsigned long end = __fix_to_virt(FIX_HOLE);
  293. for (; v < end; v += PAGE_SIZE)
  294. map_page(v, 0, 0); /* XXX gross */
  295. #endif
  296. #ifdef CONFIG_HIGHMEM
  297. map_page(PKMAP_BASE, 0, 0); /* XXX gross */
  298. pkmap_page_table = virt_to_kpte(PKMAP_BASE);
  299. kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
  300. kmap_prot = PAGE_KERNEL;
  301. #endif /* CONFIG_HIGHMEM */
  302. printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
  303. (u64)top_of_ram, total_ram);
  304. printk(KERN_DEBUG "Memory hole size: %ldMB\n",
  305. (long int)((top_of_ram - total_ram) >> 20));
  306. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  307. #ifdef CONFIG_HIGHMEM
  308. max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
  309. max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
  310. #else
  311. max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
  312. #endif
  313. free_area_init_nodes(max_zone_pfns);
  314. mark_nonram_nosave();
  315. }
  316. #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
  317. void __init mem_init(void)
  318. {
  319. #ifdef CONFIG_NEED_MULTIPLE_NODES
  320. int nid;
  321. #endif
  322. pg_data_t *pgdat;
  323. unsigned long i;
  324. struct page *page;
  325. unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
  326. num_physpages = lmb.memory.size >> PAGE_SHIFT;
  327. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
  328. #ifdef CONFIG_NEED_MULTIPLE_NODES
  329. for_each_online_node(nid) {
  330. if (NODE_DATA(nid)->node_spanned_pages != 0) {
  331. printk("freeing bootmem node %d\n", nid);
  332. totalram_pages +=
  333. free_all_bootmem_node(NODE_DATA(nid));
  334. }
  335. }
  336. #else
  337. max_mapnr = max_pfn;
  338. totalram_pages += free_all_bootmem();
  339. #endif
  340. for_each_online_pgdat(pgdat) {
  341. for (i = 0; i < pgdat->node_spanned_pages; i++) {
  342. if (!pfn_valid(pgdat->node_start_pfn + i))
  343. continue;
  344. page = pgdat_page_nr(pgdat, i);
  345. if (PageReserved(page))
  346. reservedpages++;
  347. }
  348. }
  349. codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
  350. datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
  351. initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
  352. bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
  353. #ifdef CONFIG_HIGHMEM
  354. {
  355. unsigned long pfn, highmem_mapnr;
  356. highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
  357. for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
  358. struct page *page = pfn_to_page(pfn);
  359. if (lmb_is_reserved(pfn << PAGE_SHIFT))
  360. continue;
  361. ClearPageReserved(page);
  362. init_page_count(page);
  363. __free_page(page);
  364. totalhigh_pages++;
  365. reservedpages--;
  366. }
  367. totalram_pages += totalhigh_pages;
  368. printk(KERN_DEBUG "High memory: %luk\n",
  369. totalhigh_pages << (PAGE_SHIFT-10));
  370. }
  371. #endif /* CONFIG_HIGHMEM */
  372. printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
  373. "%luk reserved, %luk data, %luk bss, %luk init)\n",
  374. (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
  375. num_physpages << (PAGE_SHIFT-10),
  376. codesize >> 10,
  377. reservedpages << (PAGE_SHIFT-10),
  378. datasize >> 10,
  379. bsssize >> 10,
  380. initsize >> 10);
  381. mem_init_done = 1;
  382. }
  383. /*
  384. * This is called when a page has been modified by the kernel.
  385. * It just marks the page as not i-cache clean. We do the i-cache
  386. * flush later when the page is given to a user process, if necessary.
  387. */
  388. void flush_dcache_page(struct page *page)
  389. {
  390. if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  391. return;
  392. /* avoid an atomic op if possible */
  393. if (test_bit(PG_arch_1, &page->flags))
  394. clear_bit(PG_arch_1, &page->flags);
  395. }
  396. EXPORT_SYMBOL(flush_dcache_page);
  397. void flush_dcache_icache_page(struct page *page)
  398. {
  399. #ifdef CONFIG_BOOKE
  400. void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
  401. __flush_dcache_icache(start);
  402. kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
  403. #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
  404. /* On 8xx there is no need to kmap since highmem is not supported */
  405. __flush_dcache_icache(page_address(page));
  406. #else
  407. __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
  408. #endif
  409. }
  410. void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
  411. {
  412. clear_page(page);
  413. /*
  414. * We shouldnt have to do this, but some versions of glibc
  415. * require it (ld.so assumes zero filled pages are icache clean)
  416. * - Anton
  417. */
  418. flush_dcache_page(pg);
  419. }
  420. EXPORT_SYMBOL(clear_user_page);
  421. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  422. struct page *pg)
  423. {
  424. copy_page(vto, vfrom);
  425. /*
  426. * We should be able to use the following optimisation, however
  427. * there are two problems.
  428. * Firstly a bug in some versions of binutils meant PLT sections
  429. * were not marked executable.
  430. * Secondly the first word in the GOT section is blrl, used
  431. * to establish the GOT address. Until recently the GOT was
  432. * not marked executable.
  433. * - Anton
  434. */
  435. #if 0
  436. if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
  437. return;
  438. #endif
  439. flush_dcache_page(pg);
  440. }
  441. void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  442. unsigned long addr, int len)
  443. {
  444. unsigned long maddr;
  445. maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
  446. flush_icache_range(maddr, maddr + len);
  447. kunmap(page);
  448. }
  449. EXPORT_SYMBOL(flush_icache_user_range);
  450. /*
  451. * This is called at the end of handling a user page fault, when the
  452. * fault has been handled by updating a PTE in the linux page tables.
  453. * We use it to preload an HPTE into the hash table corresponding to
  454. * the updated linux PTE.
  455. *
  456. * This must always be called with the pte lock held.
  457. */
  458. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  459. pte_t pte)
  460. {
  461. #ifdef CONFIG_PPC_STD_MMU
  462. unsigned long access = 0, trap;
  463. #endif
  464. unsigned long pfn = pte_pfn(pte);
  465. /* handle i-cache coherency */
  466. if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
  467. !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
  468. pfn_valid(pfn)) {
  469. struct page *page = pfn_to_page(pfn);
  470. #ifdef CONFIG_8xx
  471. /* On 8xx, cache control instructions (particularly
  472. * "dcbst" from flush_dcache_icache) fault as write
  473. * operation if there is an unpopulated TLB entry
  474. * for the address in question. To workaround that,
  475. * we invalidate the TLB here, thus avoiding dcbst
  476. * misbehaviour.
  477. */
  478. _tlbie(address, 0 /* 8xx doesn't care about PID */);
  479. #endif
  480. /* The _PAGE_USER test should really be _PAGE_EXEC, but
  481. * older glibc versions execute some code from no-exec
  482. * pages, which for now we are supporting. If exec-only
  483. * pages are ever implemented, this will have to change.
  484. */
  485. if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
  486. && !test_bit(PG_arch_1, &page->flags)) {
  487. if (vma->vm_mm == current->active_mm) {
  488. __flush_dcache_icache((void *) address);
  489. } else
  490. flush_dcache_icache_page(page);
  491. set_bit(PG_arch_1, &page->flags);
  492. }
  493. }
  494. #ifdef CONFIG_PPC_STD_MMU
  495. /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
  496. if (!pte_young(pte) || address >= TASK_SIZE)
  497. return;
  498. /* We try to figure out if we are coming from an instruction
  499. * access fault and pass that down to __hash_page so we avoid
  500. * double-faulting on execution of fresh text. We have to test
  501. * for regs NULL since init will get here first thing at boot
  502. *
  503. * We also avoid filling the hash if not coming from a fault
  504. */
  505. if (current->thread.regs == NULL)
  506. return;
  507. trap = TRAP(current->thread.regs);
  508. if (trap == 0x400)
  509. access |= _PAGE_EXEC;
  510. else if (trap != 0x300)
  511. return;
  512. hash_preload(vma->vm_mm, address, access, trap);
  513. #endif /* CONFIG_PPC_STD_MMU */
  514. }