init_64.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. /*
  2. * linux/arch/x86_64/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
  7. */
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/swap.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/bootmem.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/module.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/nmi.h>
  30. #include <asm/processor.h>
  31. #include <asm/system.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/dma.h>
  36. #include <asm/fixmap.h>
  37. #include <asm/e820.h>
  38. #include <asm/apic.h>
  39. #include <asm/tlb.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/proto.h>
  42. #include <asm/smp.h>
  43. #include <asm/sections.h>
  44. #include <asm/kdebug.h>
  45. #include <asm/numa.h>
  46. #include <asm/cacheflush.h>
  47. const struct dma_mapping_ops *dma_ops;
  48. EXPORT_SYMBOL(dma_ops);
  49. static unsigned long dma_reserve __initdata;
  50. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  51. int direct_gbpages __meminitdata
  52. #ifdef CONFIG_DIRECT_GBPAGES
  53. = 1
  54. #endif
  55. ;
  56. static int __init parse_direct_gbpages_off(char *arg)
  57. {
  58. direct_gbpages = 0;
  59. return 0;
  60. }
  61. early_param("nogbpages", parse_direct_gbpages_off);
  62. static int __init parse_direct_gbpages_on(char *arg)
  63. {
  64. direct_gbpages = 1;
  65. return 0;
  66. }
  67. early_param("gbpages", parse_direct_gbpages_on);
  68. /*
  69. * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  70. * physical space so we can cache the place of the first one and move
  71. * around without checking the pgd every time.
  72. */
  73. void show_mem(void)
  74. {
  75. long i, total = 0, reserved = 0;
  76. long shared = 0, cached = 0;
  77. struct page *page;
  78. pg_data_t *pgdat;
  79. printk(KERN_INFO "Mem-info:\n");
  80. show_free_areas();
  81. for_each_online_pgdat(pgdat) {
  82. for (i = 0; i < pgdat->node_spanned_pages; ++i) {
  83. /*
  84. * This loop can take a while with 256 GB and
  85. * 4k pages so defer the NMI watchdog:
  86. */
  87. if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
  88. touch_nmi_watchdog();
  89. if (!pfn_valid(pgdat->node_start_pfn + i))
  90. continue;
  91. page = pfn_to_page(pgdat->node_start_pfn + i);
  92. total++;
  93. if (PageReserved(page))
  94. reserved++;
  95. else if (PageSwapCache(page))
  96. cached++;
  97. else if (page_count(page))
  98. shared += page_count(page) - 1;
  99. }
  100. }
  101. printk(KERN_INFO "%lu pages of RAM\n", total);
  102. printk(KERN_INFO "%lu reserved pages\n", reserved);
  103. printk(KERN_INFO "%lu pages shared\n", shared);
  104. printk(KERN_INFO "%lu pages swap cached\n", cached);
  105. }
  106. int after_bootmem;
  107. static __init void *spp_getpage(void)
  108. {
  109. void *ptr;
  110. if (after_bootmem)
  111. ptr = (void *) get_zeroed_page(GFP_ATOMIC);
  112. else
  113. ptr = alloc_bootmem_pages(PAGE_SIZE);
  114. if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
  115. panic("set_pte_phys: cannot allocate page data %s\n",
  116. after_bootmem ? "after bootmem" : "");
  117. }
  118. pr_debug("spp_getpage %p\n", ptr);
  119. return ptr;
  120. }
  121. static __init void
  122. set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
  123. {
  124. pgd_t *pgd;
  125. pud_t *pud;
  126. pmd_t *pmd;
  127. pte_t *pte, new_pte;
  128. pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
  129. pgd = pgd_offset_k(vaddr);
  130. if (pgd_none(*pgd)) {
  131. printk(KERN_ERR
  132. "PGD FIXMAP MISSING, it should be setup in head.S!\n");
  133. return;
  134. }
  135. pud = pud_offset(pgd, vaddr);
  136. if (pud_none(*pud)) {
  137. pmd = (pmd_t *) spp_getpage();
  138. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
  139. if (pmd != pmd_offset(pud, 0)) {
  140. printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
  141. pmd, pmd_offset(pud, 0));
  142. return;
  143. }
  144. }
  145. pmd = pmd_offset(pud, vaddr);
  146. if (pmd_none(*pmd)) {
  147. pte = (pte_t *) spp_getpage();
  148. set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
  149. if (pte != pte_offset_kernel(pmd, 0)) {
  150. printk(KERN_ERR "PAGETABLE BUG #02!\n");
  151. return;
  152. }
  153. }
  154. new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
  155. pte = pte_offset_kernel(pmd, vaddr);
  156. if (!pte_none(*pte) &&
  157. pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
  158. pte_ERROR(*pte);
  159. set_pte(pte, new_pte);
  160. /*
  161. * It's enough to flush this one mapping.
  162. * (PGE mappings get flushed as well)
  163. */
  164. __flush_tlb_one(vaddr);
  165. }
  166. /*
  167. * The head.S code sets up the kernel high mapping:
  168. *
  169. * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
  170. *
  171. * phys_addr holds the negative offset to the kernel, which is added
  172. * to the compile time generated pmds. This results in invalid pmds up
  173. * to the point where we hit the physaddr 0 mapping.
  174. *
  175. * We limit the mappings to the region from _text to _end. _end is
  176. * rounded up to the 2MB boundary. This catches the invalid pmds as
  177. * well, as they are located before _text:
  178. */
  179. void __init cleanup_highmap(void)
  180. {
  181. unsigned long vaddr = __START_KERNEL_map;
  182. unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
  183. pmd_t *pmd = level2_kernel_pgt;
  184. pmd_t *last_pmd = pmd + PTRS_PER_PMD;
  185. for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
  186. if (!pmd_present(*pmd))
  187. continue;
  188. if (vaddr < (unsigned long) _text || vaddr > end)
  189. set_pmd(pmd, __pmd(0));
  190. }
  191. }
  192. /* NOTE: this is meant to be run only at boot */
  193. void __init
  194. __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
  195. {
  196. unsigned long address = __fix_to_virt(idx);
  197. if (idx >= __end_of_fixed_addresses) {
  198. printk(KERN_ERR "Invalid __set_fixmap\n");
  199. return;
  200. }
  201. set_pte_phys(address, phys, prot);
  202. }
  203. static unsigned long __initdata table_start;
  204. static unsigned long __meminitdata table_end;
  205. static __meminit void *alloc_low_page(unsigned long *phys)
  206. {
  207. unsigned long pfn = table_end++;
  208. void *adr;
  209. if (after_bootmem) {
  210. adr = (void *)get_zeroed_page(GFP_ATOMIC);
  211. *phys = __pa(adr);
  212. return adr;
  213. }
  214. if (pfn >= end_pfn)
  215. panic("alloc_low_page: ran out of memory");
  216. adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
  217. memset(adr, 0, PAGE_SIZE);
  218. *phys = pfn * PAGE_SIZE;
  219. return adr;
  220. }
  221. static __meminit void unmap_low_page(void *adr)
  222. {
  223. if (after_bootmem)
  224. return;
  225. early_iounmap(adr, PAGE_SIZE);
  226. }
  227. /* Must run before zap_low_mappings */
  228. __meminit void *early_ioremap(unsigned long addr, unsigned long size)
  229. {
  230. pmd_t *pmd, *last_pmd;
  231. unsigned long vaddr;
  232. int i, pmds;
  233. pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
  234. vaddr = __START_KERNEL_map;
  235. pmd = level2_kernel_pgt;
  236. last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
  237. for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
  238. for (i = 0; i < pmds; i++) {
  239. if (pmd_present(pmd[i]))
  240. goto continue_outer_loop;
  241. }
  242. vaddr += addr & ~PMD_MASK;
  243. addr &= PMD_MASK;
  244. for (i = 0; i < pmds; i++, addr += PMD_SIZE)
  245. set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
  246. __flush_tlb_all();
  247. return (void *)vaddr;
  248. continue_outer_loop:
  249. ;
  250. }
  251. printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
  252. return NULL;
  253. }
  254. /*
  255. * To avoid virtual aliases later:
  256. */
  257. __meminit void early_iounmap(void *addr, unsigned long size)
  258. {
  259. unsigned long vaddr;
  260. pmd_t *pmd;
  261. int i, pmds;
  262. vaddr = (unsigned long)addr;
  263. pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
  264. pmd = level2_kernel_pgt + pmd_index(vaddr);
  265. for (i = 0; i < pmds; i++)
  266. pmd_clear(pmd + i);
  267. __flush_tlb_all();
  268. }
  269. static void __meminit
  270. phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
  271. {
  272. int i = pmd_index(address);
  273. for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
  274. pmd_t *pmd = pmd_page + pmd_index(address);
  275. if (address >= end) {
  276. if (!after_bootmem) {
  277. for (; i < PTRS_PER_PMD; i++, pmd++)
  278. set_pmd(pmd, __pmd(0));
  279. }
  280. break;
  281. }
  282. if (pmd_val(*pmd))
  283. continue;
  284. set_pte((pte_t *)pmd,
  285. pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
  286. }
  287. }
  288. static void __meminit
  289. phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
  290. {
  291. pmd_t *pmd = pmd_offset(pud, 0);
  292. spin_lock(&init_mm.page_table_lock);
  293. phys_pmd_init(pmd, address, end);
  294. spin_unlock(&init_mm.page_table_lock);
  295. __flush_tlb_all();
  296. }
  297. static void __meminit
  298. phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
  299. {
  300. int i = pud_index(addr);
  301. for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
  302. unsigned long pmd_phys;
  303. pud_t *pud = pud_page + pud_index(addr);
  304. pmd_t *pmd;
  305. if (addr >= end)
  306. break;
  307. if (!after_bootmem &&
  308. !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
  309. set_pud(pud, __pud(0));
  310. continue;
  311. }
  312. if (pud_val(*pud)) {
  313. if (!pud_large(*pud))
  314. phys_pmd_update(pud, addr, end);
  315. continue;
  316. }
  317. if (direct_gbpages) {
  318. set_pte((pte_t *)pud,
  319. pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
  320. continue;
  321. }
  322. pmd = alloc_low_page(&pmd_phys);
  323. spin_lock(&init_mm.page_table_lock);
  324. set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
  325. phys_pmd_init(pmd, addr, end);
  326. spin_unlock(&init_mm.page_table_lock);
  327. unmap_low_page(pmd);
  328. }
  329. __flush_tlb_all();
  330. }
  331. static void __init find_early_table_space(unsigned long end)
  332. {
  333. unsigned long puds, pmds, tables, start;
  334. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  335. tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
  336. if (!direct_gbpages) {
  337. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  338. tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
  339. }
  340. /*
  341. * RED-PEN putting page tables only on node 0 could
  342. * cause a hotspot and fill up ZONE_DMA. The page tables
  343. * need roughly 0.5KB per GB.
  344. */
  345. start = 0x8000;
  346. table_start = find_e820_area(start, end, tables, PAGE_SIZE);
  347. if (table_start == -1UL)
  348. panic("Cannot find space for the kernel page tables");
  349. table_start >>= PAGE_SHIFT;
  350. table_end = table_start;
  351. early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
  352. end, table_start << PAGE_SHIFT,
  353. (table_start << PAGE_SHIFT) + tables);
  354. }
  355. static void __init init_gbpages(void)
  356. {
  357. if (direct_gbpages && cpu_has_gbpages)
  358. printk(KERN_INFO "Using GB pages for direct mapping\n");
  359. else
  360. direct_gbpages = 0;
  361. }
  362. #ifdef CONFIG_MEMTEST_BOOTPARAM
  363. static void __init memtest(unsigned long start_phys, unsigned long size,
  364. unsigned pattern)
  365. {
  366. unsigned long i;
  367. unsigned long *start;
  368. unsigned long start_bad;
  369. unsigned long last_bad;
  370. unsigned long val;
  371. unsigned long start_phys_aligned;
  372. unsigned long count;
  373. unsigned long incr;
  374. switch (pattern) {
  375. case 0:
  376. val = 0UL;
  377. break;
  378. case 1:
  379. val = -1UL;
  380. break;
  381. case 2:
  382. val = 0x5555555555555555UL;
  383. break;
  384. case 3:
  385. val = 0xaaaaaaaaaaaaaaaaUL;
  386. break;
  387. default:
  388. return;
  389. }
  390. incr = sizeof(unsigned long);
  391. start_phys_aligned = ALIGN(start_phys, incr);
  392. count = (size - (start_phys_aligned - start_phys))/incr;
  393. start = __va(start_phys_aligned);
  394. start_bad = 0;
  395. last_bad = 0;
  396. for (i = 0; i < count; i++)
  397. start[i] = val;
  398. for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
  399. if (*start != val) {
  400. if (start_phys_aligned == last_bad + incr) {
  401. last_bad += incr;
  402. } else {
  403. if (start_bad) {
  404. printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
  405. val, start_bad, last_bad + incr);
  406. reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
  407. }
  408. start_bad = last_bad = start_phys_aligned;
  409. }
  410. }
  411. }
  412. if (start_bad) {
  413. printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
  414. val, start_bad, last_bad + incr);
  415. reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
  416. }
  417. }
  418. static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE;
  419. static int __init parse_memtest(char *arg)
  420. {
  421. if (arg)
  422. memtest_pattern = simple_strtoul(arg, NULL, 0);
  423. return 0;
  424. }
  425. early_param("memtest", parse_memtest);
  426. static void __init early_memtest(unsigned long start, unsigned long end)
  427. {
  428. unsigned long t_start, t_size;
  429. unsigned pattern;
  430. if (!memtest_pattern)
  431. return;
  432. printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
  433. for (pattern = 0; pattern < memtest_pattern; pattern++) {
  434. t_start = start;
  435. t_size = 0;
  436. while (t_start < end) {
  437. t_start = find_e820_area_size(t_start, &t_size, 1);
  438. /* done ? */
  439. if (t_start >= end)
  440. break;
  441. if (t_start + t_size > end)
  442. t_size = end - t_start;
  443. printk(KERN_CONT "\n %016lx - %016lx pattern %d",
  444. t_start, t_start + t_size, pattern);
  445. memtest(t_start, t_size, pattern);
  446. t_start += t_size;
  447. }
  448. }
  449. printk(KERN_CONT "\n");
  450. }
  451. #else
  452. static void __init early_memtest(unsigned long start, unsigned long end)
  453. {
  454. }
  455. #endif
  456. /*
  457. * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  458. * This runs before bootmem is initialized and gets pages directly from
  459. * the physical memory. To access them they are temporarily mapped.
  460. */
  461. void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
  462. {
  463. unsigned long next;
  464. unsigned long start_phys = start, end_phys = end;
  465. printk(KERN_INFO "init_memory_mapping\n");
  466. /*
  467. * Find space for the kernel direct mapping tables.
  468. *
  469. * Later we should allocate these tables in the local node of the
  470. * memory mapped. Unfortunately this is done currently before the
  471. * nodes are discovered.
  472. */
  473. if (!after_bootmem) {
  474. init_gbpages();
  475. find_early_table_space(end);
  476. }
  477. start = (unsigned long)__va(start);
  478. end = (unsigned long)__va(end);
  479. for (; start < end; start = next) {
  480. pgd_t *pgd = pgd_offset_k(start);
  481. unsigned long pud_phys;
  482. pud_t *pud;
  483. if (after_bootmem)
  484. pud = pud_offset(pgd, start & PGDIR_MASK);
  485. else
  486. pud = alloc_low_page(&pud_phys);
  487. next = start + PGDIR_SIZE;
  488. if (next > end)
  489. next = end;
  490. phys_pud_init(pud, __pa(start), __pa(next));
  491. if (!after_bootmem)
  492. set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
  493. unmap_low_page(pud);
  494. }
  495. if (!after_bootmem)
  496. mmu_cr4_features = read_cr4();
  497. __flush_tlb_all();
  498. if (!after_bootmem)
  499. reserve_early(table_start << PAGE_SHIFT,
  500. table_end << PAGE_SHIFT, "PGTABLE");
  501. if (!after_bootmem)
  502. early_memtest(start_phys, end_phys);
  503. }
  504. #ifndef CONFIG_NUMA
  505. void __init paging_init(void)
  506. {
  507. unsigned long max_zone_pfns[MAX_NR_ZONES];
  508. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  509. max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
  510. max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
  511. max_zone_pfns[ZONE_NORMAL] = end_pfn;
  512. memory_present(0, 0, end_pfn);
  513. sparse_init();
  514. free_area_init_nodes(max_zone_pfns);
  515. }
  516. #endif
  517. /*
  518. * Memory hotplug specific functions
  519. */
  520. void online_page(struct page *page)
  521. {
  522. ClearPageReserved(page);
  523. init_page_count(page);
  524. __free_page(page);
  525. totalram_pages++;
  526. num_physpages++;
  527. }
  528. #ifdef CONFIG_MEMORY_HOTPLUG
  529. /*
  530. * Memory is added always to NORMAL zone. This means you will never get
  531. * additional DMA/DMA32 memory.
  532. */
  533. int arch_add_memory(int nid, u64 start, u64 size)
  534. {
  535. struct pglist_data *pgdat = NODE_DATA(nid);
  536. struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
  537. unsigned long start_pfn = start >> PAGE_SHIFT;
  538. unsigned long nr_pages = size >> PAGE_SHIFT;
  539. int ret;
  540. init_memory_mapping(start, start + size-1);
  541. ret = __add_pages(zone, start_pfn, nr_pages);
  542. WARN_ON(1);
  543. return ret;
  544. }
  545. EXPORT_SYMBOL_GPL(arch_add_memory);
  546. #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
  547. int memory_add_physaddr_to_nid(u64 start)
  548. {
  549. return 0;
  550. }
  551. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  552. #endif
  553. #endif /* CONFIG_MEMORY_HOTPLUG */
  554. static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
  555. kcore_modules, kcore_vsyscall;
  556. void __init mem_init(void)
  557. {
  558. long codesize, reservedpages, datasize, initsize;
  559. pci_iommu_alloc();
  560. /* clear_bss() already clear the empty_zero_page */
  561. reservedpages = 0;
  562. /* this will put all low memory onto the freelists */
  563. #ifdef CONFIG_NUMA
  564. totalram_pages = numa_free_all_bootmem();
  565. #else
  566. totalram_pages = free_all_bootmem();
  567. #endif
  568. reservedpages = end_pfn - totalram_pages -
  569. absent_pages_in_range(0, end_pfn);
  570. after_bootmem = 1;
  571. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  572. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  573. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  574. /* Register memory areas for /proc/kcore */
  575. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  576. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  577. VMALLOC_END-VMALLOC_START);
  578. kclist_add(&kcore_kernel, &_stext, _end - _stext);
  579. kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
  580. kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
  581. VSYSCALL_END - VSYSCALL_START);
  582. printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
  583. "%ldk reserved, %ldk data, %ldk init)\n",
  584. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  585. end_pfn << (PAGE_SHIFT-10),
  586. codesize >> 10,
  587. reservedpages << (PAGE_SHIFT-10),
  588. datasize >> 10,
  589. initsize >> 10);
  590. cpa_init();
  591. }
  592. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  593. {
  594. unsigned long addr = begin;
  595. if (addr >= end)
  596. return;
  597. /*
  598. * If debugging page accesses then do not free this memory but
  599. * mark them not present - any buggy init-section access will
  600. * create a kernel page fault:
  601. */
  602. #ifdef CONFIG_DEBUG_PAGEALLOC
  603. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  604. begin, PAGE_ALIGN(end));
  605. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  606. #else
  607. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  608. for (; addr < end; addr += PAGE_SIZE) {
  609. ClearPageReserved(virt_to_page(addr));
  610. init_page_count(virt_to_page(addr));
  611. memset((void *)(addr & ~(PAGE_SIZE-1)),
  612. POISON_FREE_INITMEM, PAGE_SIZE);
  613. free_page(addr);
  614. totalram_pages++;
  615. }
  616. #endif
  617. }
  618. void free_initmem(void)
  619. {
  620. free_init_pages("unused kernel memory",
  621. (unsigned long)(&__init_begin),
  622. (unsigned long)(&__init_end));
  623. }
  624. #ifdef CONFIG_DEBUG_RODATA
  625. const int rodata_test_data = 0xC3;
  626. EXPORT_SYMBOL_GPL(rodata_test_data);
  627. void mark_rodata_ro(void)
  628. {
  629. unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
  630. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  631. (end - start) >> 10);
  632. set_memory_ro(start, (end - start) >> PAGE_SHIFT);
  633. /*
  634. * The rodata section (but not the kernel text!) should also be
  635. * not-executable.
  636. */
  637. start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
  638. set_memory_nx(start, (end - start) >> PAGE_SHIFT);
  639. rodata_test();
  640. #ifdef CONFIG_CPA_DEBUG
  641. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
  642. set_memory_rw(start, (end-start) >> PAGE_SHIFT);
  643. printk(KERN_INFO "Testing CPA: again\n");
  644. set_memory_ro(start, (end-start) >> PAGE_SHIFT);
  645. #endif
  646. }
  647. #endif
  648. #ifdef CONFIG_BLK_DEV_INITRD
  649. void free_initrd_mem(unsigned long start, unsigned long end)
  650. {
  651. free_init_pages("initrd memory", start, end);
  652. }
  653. #endif
  654. void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
  655. {
  656. #ifdef CONFIG_NUMA
  657. int nid = phys_to_nid(phys);
  658. #endif
  659. unsigned long pfn = phys >> PAGE_SHIFT;
  660. if (pfn >= end_pfn) {
  661. /*
  662. * This can happen with kdump kernels when accessing
  663. * firmware tables:
  664. */
  665. if (pfn < max_pfn_mapped)
  666. return;
  667. printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
  668. phys, len);
  669. return;
  670. }
  671. /* Should check here against the e820 map to avoid double free */
  672. #ifdef CONFIG_NUMA
  673. reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
  674. #else
  675. reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
  676. #endif
  677. if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
  678. dma_reserve += len / PAGE_SIZE;
  679. set_dma_reserve(dma_reserve);
  680. }
  681. }
  682. int kern_addr_valid(unsigned long addr)
  683. {
  684. unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
  685. pgd_t *pgd;
  686. pud_t *pud;
  687. pmd_t *pmd;
  688. pte_t *pte;
  689. if (above != 0 && above != -1UL)
  690. return 0;
  691. pgd = pgd_offset_k(addr);
  692. if (pgd_none(*pgd))
  693. return 0;
  694. pud = pud_offset(pgd, addr);
  695. if (pud_none(*pud))
  696. return 0;
  697. pmd = pmd_offset(pud, addr);
  698. if (pmd_none(*pmd))
  699. return 0;
  700. if (pmd_large(*pmd))
  701. return pfn_valid(pmd_pfn(*pmd));
  702. pte = pte_offset_kernel(pmd, addr);
  703. if (pte_none(*pte))
  704. return 0;
  705. return pfn_valid(pte_pfn(*pte));
  706. }
  707. /*
  708. * A pseudo VMA to allow ptrace access for the vsyscall page. This only
  709. * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
  710. * not need special handling anymore:
  711. */
  712. static struct vm_area_struct gate_vma = {
  713. .vm_start = VSYSCALL_START,
  714. .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
  715. .vm_page_prot = PAGE_READONLY_EXEC,
  716. .vm_flags = VM_READ | VM_EXEC
  717. };
  718. struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
  719. {
  720. #ifdef CONFIG_IA32_EMULATION
  721. if (test_tsk_thread_flag(tsk, TIF_IA32))
  722. return NULL;
  723. #endif
  724. return &gate_vma;
  725. }
  726. int in_gate_area(struct task_struct *task, unsigned long addr)
  727. {
  728. struct vm_area_struct *vma = get_gate_vma(task);
  729. if (!vma)
  730. return 0;
  731. return (addr >= vma->vm_start) && (addr < vma->vm_end);
  732. }
  733. /*
  734. * Use this when you have no reliable task/vma, typically from interrupt
  735. * context. It is less reliable than using the task's vma and may give
  736. * false positives:
  737. */
  738. int in_gate_area_no_task(unsigned long addr)
  739. {
  740. return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
  741. }
  742. const char *arch_vma_name(struct vm_area_struct *vma)
  743. {
  744. if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  745. return "[vdso]";
  746. if (vma == &gate_vma)
  747. return "[vsyscall]";
  748. return NULL;
  749. }
  750. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  751. /*
  752. * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
  753. */
  754. int __meminit
  755. vmemmap_populate(struct page *start_page, unsigned long size, int node)
  756. {
  757. unsigned long addr = (unsigned long)start_page;
  758. unsigned long end = (unsigned long)(start_page + size);
  759. unsigned long next;
  760. pgd_t *pgd;
  761. pud_t *pud;
  762. pmd_t *pmd;
  763. for (; addr < end; addr = next) {
  764. next = pmd_addr_end(addr, end);
  765. pgd = vmemmap_pgd_populate(addr, node);
  766. if (!pgd)
  767. return -ENOMEM;
  768. pud = vmemmap_pud_populate(pgd, addr, node);
  769. if (!pud)
  770. return -ENOMEM;
  771. pmd = pmd_offset(pud, addr);
  772. if (pmd_none(*pmd)) {
  773. pte_t entry;
  774. void *p;
  775. p = vmemmap_alloc_block(PMD_SIZE, node);
  776. if (!p)
  777. return -ENOMEM;
  778. entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
  779. PAGE_KERNEL_LARGE);
  780. set_pmd(pmd, __pmd(pte_val(entry)));
  781. printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
  782. addr, addr + PMD_SIZE - 1, p, node);
  783. } else {
  784. vmemmap_verify((pte_t *)pmd, node, addr, next);
  785. }
  786. }
  787. return 0;
  788. }
  789. #endif