mem.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7. * Copyright (C) 1996 Paul Mackerras
  8. * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  9. *
  10. * Derived from "arch/i386/mm/init.c"
  11. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License
  15. * as published by the Free Software Foundation; either version
  16. * 2 of the License, or (at your option) any later version.
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/sched.h>
  21. #include <linux/kernel.h>
  22. #include <linux/errno.h>
  23. #include <linux/string.h>
  24. #include <linux/types.h>
  25. #include <linux/mm.h>
  26. #include <linux/stddef.h>
  27. #include <linux/init.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/highmem.h>
  30. #include <linux/initrd.h>
  31. #include <linux/pagemap.h>
  32. #include <linux/suspend.h>
  33. #include <linux/lmb.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/prom.h>
  36. #include <asm/io.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/mmu.h>
  40. #include <asm/smp.h>
  41. #include <asm/machdep.h>
  42. #include <asm/btext.h>
  43. #include <asm/tlb.h>
  44. #include <asm/sections.h>
  45. #include <asm/vdso.h>
  46. #include "mmu_decl.h"
  47. #ifndef CPU_FTR_COHERENT_ICACHE
  48. #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
  49. #define CPU_FTR_NOEXECUTE 0
  50. #endif
  51. int init_bootmem_done;
  52. int mem_init_done;
  53. unsigned long memory_limit;
  54. int page_is_ram(unsigned long pfn)
  55. {
  56. unsigned long paddr = (pfn << PAGE_SHIFT);
  57. #ifndef CONFIG_PPC64 /* XXX for now */
  58. return paddr < __pa(high_memory);
  59. #else
  60. int i;
  61. for (i=0; i < lmb.memory.cnt; i++) {
  62. unsigned long base;
  63. base = lmb.memory.region[i].base;
  64. if ((paddr >= base) &&
  65. (paddr < (base + lmb.memory.region[i].size))) {
  66. return 1;
  67. }
  68. }
  69. return 0;
  70. #endif
  71. }
  72. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  73. unsigned long size, pgprot_t vma_prot)
  74. {
  75. if (ppc_md.phys_mem_access_prot)
  76. return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
  77. if (!page_is_ram(pfn))
  78. vma_prot = __pgprot(pgprot_val(vma_prot)
  79. | _PAGE_GUARDED | _PAGE_NO_CACHE);
  80. return vma_prot;
  81. }
  82. EXPORT_SYMBOL(phys_mem_access_prot);
  83. #ifdef CONFIG_MEMORY_HOTPLUG
  84. void online_page(struct page *page)
  85. {
  86. ClearPageReserved(page);
  87. init_page_count(page);
  88. __free_page(page);
  89. totalram_pages++;
  90. num_physpages++;
  91. }
  92. #ifdef CONFIG_NUMA
  93. int memory_add_physaddr_to_nid(u64 start)
  94. {
  95. return hot_add_scn_to_nid(start);
  96. }
  97. #endif
  98. int arch_add_memory(int nid, u64 start, u64 size)
  99. {
  100. struct pglist_data *pgdata;
  101. struct zone *zone;
  102. unsigned long start_pfn = start >> PAGE_SHIFT;
  103. unsigned long nr_pages = size >> PAGE_SHIFT;
  104. pgdata = NODE_DATA(nid);
  105. start = (unsigned long)__va(start);
  106. create_section_mapping(start, start + size);
  107. /* this should work for most non-highmem platforms */
  108. zone = pgdata->node_zones;
  109. return __add_pages(zone, start_pfn, nr_pages);
  110. }
  111. #ifdef CONFIG_MEMORY_HOTREMOVE
  112. int remove_memory(u64 start, u64 size)
  113. {
  114. unsigned long start_pfn, end_pfn;
  115. int ret;
  116. start_pfn = start >> PAGE_SHIFT;
  117. end_pfn = start_pfn + (size >> PAGE_SHIFT);
  118. ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
  119. if (ret)
  120. goto out;
  121. /* Arch-specific calls go here - next patch */
  122. out:
  123. return ret;
  124. }
  125. #endif /* CONFIG_MEMORY_HOTREMOVE */
  126. /*
  127. * walk_memory_resource() needs to make sure there is no holes in a given
  128. * memory range. On PPC64, since this range comes from /sysfs, the range
  129. * is guaranteed to be valid, non-overlapping and can not contain any
  130. * holes. By the time we get here (memory add or remove), /proc/device-tree
  131. * is updated and correct. Only reason we need to check against device-tree
  132. * would be if we allow user-land to specify a memory range through a
  133. * system call/ioctl etc. instead of doing offline/online through /sysfs.
  134. */
  135. int
  136. walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
  137. int (*func)(unsigned long, unsigned long, void *))
  138. {
  139. return (*func)(start_pfn, nr_pages, arg);
  140. }
  141. #endif /* CONFIG_MEMORY_HOTPLUG */
  142. void show_mem(void)
  143. {
  144. unsigned long total = 0, reserved = 0;
  145. unsigned long shared = 0, cached = 0;
  146. unsigned long highmem = 0;
  147. struct page *page;
  148. pg_data_t *pgdat;
  149. unsigned long i;
  150. printk("Mem-info:\n");
  151. show_free_areas();
  152. for_each_online_pgdat(pgdat) {
  153. unsigned long flags;
  154. pgdat_resize_lock(pgdat, &flags);
  155. for (i = 0; i < pgdat->node_spanned_pages; i++) {
  156. if (!pfn_valid(pgdat->node_start_pfn + i))
  157. continue;
  158. page = pgdat_page_nr(pgdat, i);
  159. total++;
  160. if (PageHighMem(page))
  161. highmem++;
  162. if (PageReserved(page))
  163. reserved++;
  164. else if (PageSwapCache(page))
  165. cached++;
  166. else if (page_count(page))
  167. shared += page_count(page) - 1;
  168. }
  169. pgdat_resize_unlock(pgdat, &flags);
  170. }
  171. printk("%ld pages of RAM\n", total);
  172. #ifdef CONFIG_HIGHMEM
  173. printk("%ld pages of HIGHMEM\n", highmem);
  174. #endif
  175. printk("%ld reserved pages\n", reserved);
  176. printk("%ld pages shared\n", shared);
  177. printk("%ld pages swap cached\n", cached);
  178. }
  179. /*
  180. * Initialize the bootmem system and give it all the memory we
  181. * have available. If we are using highmem, we only put the
  182. * lowmem into the bootmem system.
  183. */
  184. #ifndef CONFIG_NEED_MULTIPLE_NODES
  185. void __init do_init_bootmem(void)
  186. {
  187. unsigned long i;
  188. unsigned long start, bootmap_pages;
  189. unsigned long total_pages;
  190. int boot_mapsize;
  191. max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
  192. total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
  193. #ifdef CONFIG_HIGHMEM
  194. total_pages = total_lowmem >> PAGE_SHIFT;
  195. max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
  196. #endif
  197. /*
  198. * Find an area to use for the bootmem bitmap. Calculate the size of
  199. * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
  200. * Add 1 additional page in case the address isn't page-aligned.
  201. */
  202. bootmap_pages = bootmem_bootmap_pages(total_pages);
  203. start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
  204. boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
  205. /* Add active regions with valid PFNs */
  206. for (i = 0; i < lmb.memory.cnt; i++) {
  207. unsigned long start_pfn, end_pfn;
  208. start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
  209. end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
  210. add_active_range(0, start_pfn, end_pfn);
  211. }
  212. /* Add all physical memory to the bootmem map, mark each area
  213. * present.
  214. */
  215. #ifdef CONFIG_HIGHMEM
  216. free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
  217. /* reserve the sections we're already using */
  218. for (i = 0; i < lmb.reserved.cnt; i++) {
  219. unsigned long addr = lmb.reserved.region[i].base +
  220. lmb_size_bytes(&lmb.reserved, i) - 1;
  221. if (addr < lowmem_end_addr)
  222. reserve_bootmem(lmb.reserved.region[i].base,
  223. lmb_size_bytes(&lmb.reserved, i),
  224. BOOTMEM_DEFAULT);
  225. else if (lmb.reserved.region[i].base < lowmem_end_addr) {
  226. unsigned long adjusted_size = lowmem_end_addr -
  227. lmb.reserved.region[i].base;
  228. reserve_bootmem(lmb.reserved.region[i].base,
  229. adjusted_size, BOOTMEM_DEFAULT);
  230. }
  231. }
  232. #else
  233. free_bootmem_with_active_regions(0, max_pfn);
  234. /* reserve the sections we're already using */
  235. for (i = 0; i < lmb.reserved.cnt; i++)
  236. reserve_bootmem(lmb.reserved.region[i].base,
  237. lmb_size_bytes(&lmb.reserved, i),
  238. BOOTMEM_DEFAULT);
  239. #endif
  240. /* XXX need to clip this if using highmem? */
  241. sparse_memory_present_with_active_regions(0);
  242. init_bootmem_done = 1;
  243. }
  244. /* mark pages that don't exist as nosave */
  245. static int __init mark_nonram_nosave(void)
  246. {
  247. unsigned long lmb_next_region_start_pfn,
  248. lmb_region_max_pfn;
  249. int i;
  250. for (i = 0; i < lmb.memory.cnt - 1; i++) {
  251. lmb_region_max_pfn =
  252. (lmb.memory.region[i].base >> PAGE_SHIFT) +
  253. (lmb.memory.region[i].size >> PAGE_SHIFT);
  254. lmb_next_region_start_pfn =
  255. lmb.memory.region[i+1].base >> PAGE_SHIFT;
  256. if (lmb_region_max_pfn < lmb_next_region_start_pfn)
  257. register_nosave_region(lmb_region_max_pfn,
  258. lmb_next_region_start_pfn);
  259. }
  260. return 0;
  261. }
  262. /*
  263. * paging_init() sets up the page tables - in fact we've already done this.
  264. */
  265. void __init paging_init(void)
  266. {
  267. unsigned long total_ram = lmb_phys_mem_size();
  268. unsigned long top_of_ram = lmb_end_of_DRAM();
  269. unsigned long max_zone_pfns[MAX_NR_ZONES];
  270. #ifdef CONFIG_HIGHMEM
  271. map_page(PKMAP_BASE, 0, 0); /* XXX gross */
  272. pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
  273. (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
  274. map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
  275. kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
  276. (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
  277. KMAP_FIX_BEGIN);
  278. kmap_prot = PAGE_KERNEL;
  279. #endif /* CONFIG_HIGHMEM */
  280. printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
  281. top_of_ram, total_ram);
  282. printk(KERN_DEBUG "Memory hole size: %ldMB\n",
  283. (top_of_ram - total_ram) >> 20);
  284. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  285. #ifdef CONFIG_HIGHMEM
  286. max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
  287. max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
  288. #else
  289. max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
  290. #endif
  291. free_area_init_nodes(max_zone_pfns);
  292. mark_nonram_nosave();
  293. }
  294. #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
  295. void __init mem_init(void)
  296. {
  297. #ifdef CONFIG_NEED_MULTIPLE_NODES
  298. int nid;
  299. #endif
  300. pg_data_t *pgdat;
  301. unsigned long i;
  302. struct page *page;
  303. unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
  304. num_physpages = lmb.memory.size >> PAGE_SHIFT;
  305. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
  306. #ifdef CONFIG_NEED_MULTIPLE_NODES
  307. for_each_online_node(nid) {
  308. if (NODE_DATA(nid)->node_spanned_pages != 0) {
  309. printk("freeing bootmem node %d\n", nid);
  310. totalram_pages +=
  311. free_all_bootmem_node(NODE_DATA(nid));
  312. }
  313. }
  314. #else
  315. max_mapnr = max_pfn;
  316. totalram_pages += free_all_bootmem();
  317. #endif
  318. for_each_online_pgdat(pgdat) {
  319. for (i = 0; i < pgdat->node_spanned_pages; i++) {
  320. if (!pfn_valid(pgdat->node_start_pfn + i))
  321. continue;
  322. page = pgdat_page_nr(pgdat, i);
  323. if (PageReserved(page))
  324. reservedpages++;
  325. }
  326. }
  327. codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
  328. datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
  329. initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
  330. bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
  331. #ifdef CONFIG_HIGHMEM
  332. {
  333. unsigned long pfn, highmem_mapnr;
  334. highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
  335. for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
  336. struct page *page = pfn_to_page(pfn);
  337. if (lmb_is_reserved(pfn << PAGE_SHIFT))
  338. continue;
  339. ClearPageReserved(page);
  340. init_page_count(page);
  341. __free_page(page);
  342. totalhigh_pages++;
  343. reservedpages--;
  344. }
  345. totalram_pages += totalhigh_pages;
  346. printk(KERN_DEBUG "High memory: %luk\n",
  347. totalhigh_pages << (PAGE_SHIFT-10));
  348. }
  349. #endif /* CONFIG_HIGHMEM */
  350. printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
  351. "%luk reserved, %luk data, %luk bss, %luk init)\n",
  352. (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
  353. num_physpages << (PAGE_SHIFT-10),
  354. codesize >> 10,
  355. reservedpages << (PAGE_SHIFT-10),
  356. datasize >> 10,
  357. bsssize >> 10,
  358. initsize >> 10);
  359. mem_init_done = 1;
  360. }
  361. /*
  362. * This is called when a page has been modified by the kernel.
  363. * It just marks the page as not i-cache clean. We do the i-cache
  364. * flush later when the page is given to a user process, if necessary.
  365. */
  366. void flush_dcache_page(struct page *page)
  367. {
  368. if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
  369. return;
  370. /* avoid an atomic op if possible */
  371. if (test_bit(PG_arch_1, &page->flags))
  372. clear_bit(PG_arch_1, &page->flags);
  373. }
  374. EXPORT_SYMBOL(flush_dcache_page);
  375. void flush_dcache_icache_page(struct page *page)
  376. {
  377. #ifdef CONFIG_BOOKE
  378. void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
  379. __flush_dcache_icache(start);
  380. kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
  381. #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
  382. /* On 8xx there is no need to kmap since highmem is not supported */
  383. __flush_dcache_icache(page_address(page));
  384. #else
  385. __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
  386. #endif
  387. }
  388. void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
  389. {
  390. clear_page(page);
  391. /*
  392. * We shouldnt have to do this, but some versions of glibc
  393. * require it (ld.so assumes zero filled pages are icache clean)
  394. * - Anton
  395. */
  396. flush_dcache_page(pg);
  397. }
  398. EXPORT_SYMBOL(clear_user_page);
  399. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  400. struct page *pg)
  401. {
  402. copy_page(vto, vfrom);
  403. /*
  404. * We should be able to use the following optimisation, however
  405. * there are two problems.
  406. * Firstly a bug in some versions of binutils meant PLT sections
  407. * were not marked executable.
  408. * Secondly the first word in the GOT section is blrl, used
  409. * to establish the GOT address. Until recently the GOT was
  410. * not marked executable.
  411. * - Anton
  412. */
  413. #if 0
  414. if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
  415. return;
  416. #endif
  417. flush_dcache_page(pg);
  418. }
  419. void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  420. unsigned long addr, int len)
  421. {
  422. unsigned long maddr;
  423. maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
  424. flush_icache_range(maddr, maddr + len);
  425. kunmap(page);
  426. }
  427. EXPORT_SYMBOL(flush_icache_user_range);
  428. /*
  429. * This is called at the end of handling a user page fault, when the
  430. * fault has been handled by updating a PTE in the linux page tables.
  431. * We use it to preload an HPTE into the hash table corresponding to
  432. * the updated linux PTE.
  433. *
  434. * This must always be called with the pte lock held.
  435. */
  436. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  437. pte_t pte)
  438. {
  439. #ifdef CONFIG_PPC_STD_MMU
  440. unsigned long access = 0, trap;
  441. #endif
  442. unsigned long pfn = pte_pfn(pte);
  443. /* handle i-cache coherency */
  444. if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
  445. !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
  446. pfn_valid(pfn)) {
  447. struct page *page = pfn_to_page(pfn);
  448. #ifdef CONFIG_8xx
  449. /* On 8xx, cache control instructions (particularly
  450. * "dcbst" from flush_dcache_icache) fault as write
  451. * operation if there is an unpopulated TLB entry
  452. * for the address in question. To workaround that,
  453. * we invalidate the TLB here, thus avoiding dcbst
  454. * misbehaviour.
  455. */
  456. _tlbie(address, 0 /* 8xx doesn't care about PID */);
  457. #endif
  458. /* The _PAGE_USER test should really be _PAGE_EXEC, but
  459. * older glibc versions execute some code from no-exec
  460. * pages, which for now we are supporting. If exec-only
  461. * pages are ever implemented, this will have to change.
  462. */
  463. if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
  464. && !test_bit(PG_arch_1, &page->flags)) {
  465. if (vma->vm_mm == current->active_mm) {
  466. __flush_dcache_icache((void *) address);
  467. } else
  468. flush_dcache_icache_page(page);
  469. set_bit(PG_arch_1, &page->flags);
  470. }
  471. }
  472. #ifdef CONFIG_PPC_STD_MMU
  473. /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
  474. if (!pte_young(pte) || address >= TASK_SIZE)
  475. return;
  476. /* We try to figure out if we are coming from an instruction
  477. * access fault and pass that down to __hash_page so we avoid
  478. * double-faulting on execution of fresh text. We have to test
  479. * for regs NULL since init will get here first thing at boot
  480. *
  481. * We also avoid filling the hash if not coming from a fault
  482. */
  483. if (current->thread.regs == NULL)
  484. return;
  485. trap = TRAP(current->thread.regs);
  486. if (trap == 0x400)
  487. access |= _PAGE_EXEC;
  488. else if (trap != 0x300)
  489. return;
  490. hash_preload(vma->vm_mm, address, access, trap);
  491. #endif /* CONFIG_PPC_STD_MMU */
  492. }