init_64.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * linux/arch/x86_64/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
  7. */
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/swap.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/bootmem.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/module.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/nmi.h>
  30. #include <asm/processor.h>
  31. #include <asm/system.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/dma.h>
  36. #include <asm/fixmap.h>
  37. #include <asm/e820.h>
  38. #include <asm/apic.h>
  39. #include <asm/tlb.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/proto.h>
  42. #include <asm/smp.h>
  43. #include <asm/sections.h>
  44. #include <asm/kdebug.h>
  45. #include <asm/numa.h>
  46. const struct dma_mapping_ops *dma_ops;
  47. EXPORT_SYMBOL(dma_ops);
  48. static unsigned long dma_reserve __initdata;
  49. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  50. /*
  51. * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  52. * physical space so we can cache the place of the first one and move
  53. * around without checking the pgd every time.
  54. */
  55. void show_mem(void)
  56. {
  57. long i, total = 0, reserved = 0;
  58. long shared = 0, cached = 0;
  59. struct page *page;
  60. pg_data_t *pgdat;
  61. printk(KERN_INFO "Mem-info:\n");
  62. show_free_areas();
  63. printk(KERN_INFO "Free swap: %6ldkB\n",
  64. nr_swap_pages << (PAGE_SHIFT-10));
  65. for_each_online_pgdat(pgdat) {
  66. for (i = 0; i < pgdat->node_spanned_pages; ++i) {
  67. /*
  68. * This loop can take a while with 256 GB and
  69. * 4k pages so defer the NMI watchdog:
  70. */
  71. if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
  72. touch_nmi_watchdog();
  73. if (!pfn_valid(pgdat->node_start_pfn + i))
  74. continue;
  75. page = pfn_to_page(pgdat->node_start_pfn + i);
  76. total++;
  77. if (PageReserved(page))
  78. reserved++;
  79. else if (PageSwapCache(page))
  80. cached++;
  81. else if (page_count(page))
  82. shared += page_count(page) - 1;
  83. }
  84. }
  85. printk(KERN_INFO "%lu pages of RAM\n", total);
  86. printk(KERN_INFO "%lu reserved pages\n", reserved);
  87. printk(KERN_INFO "%lu pages shared\n", shared);
  88. printk(KERN_INFO "%lu pages swap cached\n", cached);
  89. }
  90. int after_bootmem;
  91. static __init void *spp_getpage(void)
  92. {
  93. void *ptr;
  94. if (after_bootmem)
  95. ptr = (void *) get_zeroed_page(GFP_ATOMIC);
  96. else
  97. ptr = alloc_bootmem_pages(PAGE_SIZE);
  98. if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
  99. panic("set_pte_phys: cannot allocate page data %s\n",
  100. after_bootmem ? "after bootmem" : "");
  101. }
  102. pr_debug("spp_getpage %p\n", ptr);
  103. return ptr;
  104. }
  105. static __init void
  106. set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
  107. {
  108. pgd_t *pgd;
  109. pud_t *pud;
  110. pmd_t *pmd;
  111. pte_t *pte, new_pte;
  112. pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
  113. pgd = pgd_offset_k(vaddr);
  114. if (pgd_none(*pgd)) {
  115. printk(KERN_ERR
  116. "PGD FIXMAP MISSING, it should be setup in head.S!\n");
  117. return;
  118. }
  119. pud = pud_offset(pgd, vaddr);
  120. if (pud_none(*pud)) {
  121. pmd = (pmd_t *) spp_getpage();
  122. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
  123. if (pmd != pmd_offset(pud, 0)) {
  124. printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
  125. pmd, pmd_offset(pud, 0));
  126. return;
  127. }
  128. }
  129. pmd = pmd_offset(pud, vaddr);
  130. if (pmd_none(*pmd)) {
  131. pte = (pte_t *) spp_getpage();
  132. set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
  133. if (pte != pte_offset_kernel(pmd, 0)) {
  134. printk(KERN_ERR "PAGETABLE BUG #02!\n");
  135. return;
  136. }
  137. }
  138. new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
  139. pte = pte_offset_kernel(pmd, vaddr);
  140. if (!pte_none(*pte) &&
  141. pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
  142. pte_ERROR(*pte);
  143. set_pte(pte, new_pte);
  144. /*
  145. * It's enough to flush this one mapping.
  146. * (PGE mappings get flushed as well)
  147. */
  148. __flush_tlb_one(vaddr);
  149. }
  150. /* NOTE: this is meant to be run only at boot */
  151. void __init
  152. __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
  153. {
  154. unsigned long address = __fix_to_virt(idx);
  155. if (idx >= __end_of_fixed_addresses) {
  156. printk(KERN_ERR "Invalid __set_fixmap\n");
  157. return;
  158. }
  159. set_pte_phys(address, phys, prot);
  160. }
  161. static unsigned long __initdata table_start;
  162. static unsigned long __meminitdata table_end;
  163. static __meminit void *alloc_low_page(unsigned long *phys)
  164. {
  165. unsigned long pfn = table_end++;
  166. void *adr;
  167. if (after_bootmem) {
  168. adr = (void *)get_zeroed_page(GFP_ATOMIC);
  169. *phys = __pa(adr);
  170. return adr;
  171. }
  172. if (pfn >= end_pfn)
  173. panic("alloc_low_page: ran out of memory");
  174. adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
  175. memset(adr, 0, PAGE_SIZE);
  176. *phys = pfn * PAGE_SIZE;
  177. return adr;
  178. }
  179. static __meminit void unmap_low_page(void *adr)
  180. {
  181. if (after_bootmem)
  182. return;
  183. early_iounmap(adr, PAGE_SIZE);
  184. }
  185. /* Must run before zap_low_mappings */
  186. __meminit void *early_ioremap(unsigned long addr, unsigned long size)
  187. {
  188. pmd_t *pmd, *last_pmd;
  189. unsigned long vaddr;
  190. int i, pmds;
  191. pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
  192. vaddr = __START_KERNEL_map;
  193. pmd = level2_kernel_pgt;
  194. last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
  195. for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
  196. for (i = 0; i < pmds; i++) {
  197. if (pmd_present(pmd[i]))
  198. goto continue_outer_loop;
  199. }
  200. vaddr += addr & ~PMD_MASK;
  201. addr &= PMD_MASK;
  202. for (i = 0; i < pmds; i++, addr += PMD_SIZE)
  203. set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
  204. __flush_tlb_all();
  205. return (void *)vaddr;
  206. continue_outer_loop:
  207. ;
  208. }
  209. printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
  210. return NULL;
  211. }
  212. /*
  213. * To avoid virtual aliases later:
  214. */
  215. __meminit void early_iounmap(void *addr, unsigned long size)
  216. {
  217. unsigned long vaddr;
  218. pmd_t *pmd;
  219. int i, pmds;
  220. vaddr = (unsigned long)addr;
  221. pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
  222. pmd = level2_kernel_pgt + pmd_index(vaddr);
  223. for (i = 0; i < pmds; i++)
  224. pmd_clear(pmd + i);
  225. __flush_tlb_all();
  226. }
  227. static void __meminit
  228. phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
  229. {
  230. int i = pmd_index(address);
  231. for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
  232. pmd_t *pmd = pmd_page + pmd_index(address);
  233. if (address >= end) {
  234. if (!after_bootmem) {
  235. for (; i < PTRS_PER_PMD; i++, pmd++)
  236. set_pmd(pmd, __pmd(0));
  237. }
  238. break;
  239. }
  240. if (pmd_val(*pmd))
  241. continue;
  242. set_pte((pte_t *)pmd,
  243. pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
  244. }
  245. }
  246. static void __meminit
  247. phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
  248. {
  249. pmd_t *pmd = pmd_offset(pud, 0);
  250. spin_lock(&init_mm.page_table_lock);
  251. phys_pmd_init(pmd, address, end);
  252. spin_unlock(&init_mm.page_table_lock);
  253. __flush_tlb_all();
  254. }
  255. static void __meminit
  256. phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
  257. {
  258. int i = pud_index(addr);
  259. for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
  260. unsigned long pmd_phys;
  261. pud_t *pud = pud_page + pud_index(addr);
  262. pmd_t *pmd;
  263. if (addr >= end)
  264. break;
  265. if (!after_bootmem &&
  266. !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
  267. set_pud(pud, __pud(0));
  268. continue;
  269. }
  270. if (pud_val(*pud)) {
  271. phys_pmd_update(pud, addr, end);
  272. continue;
  273. }
  274. pmd = alloc_low_page(&pmd_phys);
  275. spin_lock(&init_mm.page_table_lock);
  276. set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
  277. phys_pmd_init(pmd, addr, end);
  278. spin_unlock(&init_mm.page_table_lock);
  279. unmap_low_page(pmd);
  280. }
  281. __flush_tlb_all();
  282. }
  283. static void __init find_early_table_space(unsigned long end)
  284. {
  285. unsigned long puds, pmds, tables, start;
  286. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  287. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  288. tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
  289. round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
  290. /*
  291. * RED-PEN putting page tables only on node 0 could
  292. * cause a hotspot and fill up ZONE_DMA. The page tables
  293. * need roughly 0.5KB per GB.
  294. */
  295. start = 0x8000;
  296. table_start = find_e820_area(start, end, tables, PAGE_SIZE);
  297. if (table_start == -1UL)
  298. panic("Cannot find space for the kernel page tables");
  299. table_start >>= PAGE_SHIFT;
  300. table_end = table_start;
  301. early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
  302. end, table_start << PAGE_SHIFT,
  303. (table_start << PAGE_SHIFT) + tables);
  304. }
  305. /*
  306. * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  307. * This runs before bootmem is initialized and gets pages directly from
  308. * the physical memory. To access them they are temporarily mapped.
  309. */
  310. void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
  311. {
  312. unsigned long next;
  313. pr_debug("init_memory_mapping\n");
  314. /*
  315. * Find space for the kernel direct mapping tables.
  316. *
  317. * Later we should allocate these tables in the local node of the
  318. * memory mapped. Unfortunately this is done currently before the
  319. * nodes are discovered.
  320. */
  321. if (!after_bootmem)
  322. find_early_table_space(end);
  323. start = (unsigned long)__va(start);
  324. end = (unsigned long)__va(end);
  325. for (; start < end; start = next) {
  326. pgd_t *pgd = pgd_offset_k(start);
  327. unsigned long pud_phys;
  328. pud_t *pud;
  329. if (after_bootmem)
  330. pud = pud_offset(pgd, start & PGDIR_MASK);
  331. else
  332. pud = alloc_low_page(&pud_phys);
  333. next = start + PGDIR_SIZE;
  334. if (next > end)
  335. next = end;
  336. phys_pud_init(pud, __pa(start), __pa(next));
  337. if (!after_bootmem)
  338. set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
  339. unmap_low_page(pud);
  340. }
  341. if (!after_bootmem)
  342. mmu_cr4_features = read_cr4();
  343. __flush_tlb_all();
  344. if (!after_bootmem)
  345. reserve_early(table_start << PAGE_SHIFT,
  346. table_end << PAGE_SHIFT, "PGTABLE");
  347. }
  348. #ifndef CONFIG_NUMA
  349. void __init paging_init(void)
  350. {
  351. unsigned long max_zone_pfns[MAX_NR_ZONES];
  352. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  353. max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
  354. max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
  355. max_zone_pfns[ZONE_NORMAL] = end_pfn;
  356. memory_present(0, 0, end_pfn);
  357. sparse_init();
  358. free_area_init_nodes(max_zone_pfns);
  359. }
  360. #endif
  361. /*
  362. * Memory hotplug specific functions
  363. */
  364. void online_page(struct page *page)
  365. {
  366. ClearPageReserved(page);
  367. init_page_count(page);
  368. __free_page(page);
  369. totalram_pages++;
  370. num_physpages++;
  371. }
  372. #ifdef CONFIG_MEMORY_HOTPLUG
  373. /*
  374. * Memory is added always to NORMAL zone. This means you will never get
  375. * additional DMA/DMA32 memory.
  376. */
  377. int arch_add_memory(int nid, u64 start, u64 size)
  378. {
  379. struct pglist_data *pgdat = NODE_DATA(nid);
  380. struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
  381. unsigned long start_pfn = start >> PAGE_SHIFT;
  382. unsigned long nr_pages = size >> PAGE_SHIFT;
  383. int ret;
  384. init_memory_mapping(start, start + size-1);
  385. ret = __add_pages(zone, start_pfn, nr_pages);
  386. WARN_ON(1);
  387. return ret;
  388. }
  389. EXPORT_SYMBOL_GPL(arch_add_memory);
  390. #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
  391. int memory_add_physaddr_to_nid(u64 start)
  392. {
  393. return 0;
  394. }
  395. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  396. #endif
  397. #endif /* CONFIG_MEMORY_HOTPLUG */
  398. static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
  399. kcore_modules, kcore_vsyscall;
  400. void __init mem_init(void)
  401. {
  402. long codesize, reservedpages, datasize, initsize;
  403. pci_iommu_alloc();
  404. /* clear_bss() already clear the empty_zero_page */
  405. /* temporary debugging - double check it's true: */
  406. {
  407. int i;
  408. for (i = 0; i < 1024; i++)
  409. WARN_ON_ONCE(empty_zero_page[i]);
  410. }
  411. reservedpages = 0;
  412. /* this will put all low memory onto the freelists */
  413. #ifdef CONFIG_NUMA
  414. totalram_pages = numa_free_all_bootmem();
  415. #else
  416. totalram_pages = free_all_bootmem();
  417. #endif
  418. reservedpages = end_pfn - totalram_pages -
  419. absent_pages_in_range(0, end_pfn);
  420. after_bootmem = 1;
  421. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  422. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  423. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  424. /* Register memory areas for /proc/kcore */
  425. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  426. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  427. VMALLOC_END-VMALLOC_START);
  428. kclist_add(&kcore_kernel, &_stext, _end - _stext);
  429. kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
  430. kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
  431. VSYSCALL_END - VSYSCALL_START);
  432. printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
  433. "%ldk reserved, %ldk data, %ldk init)\n",
  434. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  435. end_pfn << (PAGE_SHIFT-10),
  436. codesize >> 10,
  437. reservedpages << (PAGE_SHIFT-10),
  438. datasize >> 10,
  439. initsize >> 10);
  440. }
  441. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  442. {
  443. unsigned long addr;
  444. if (begin >= end)
  445. return;
  446. /*
  447. * If debugging page accesses then do not free this memory but
  448. * mark them not present - any buggy init-section access will
  449. * create a kernel page fault:
  450. */
  451. #ifdef CONFIG_DEBUG_PAGEALLOC
  452. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  453. begin, PAGE_ALIGN(end));
  454. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  455. #else
  456. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  457. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  458. ClearPageReserved(virt_to_page(addr));
  459. init_page_count(virt_to_page(addr));
  460. memset((void *)(addr & ~(PAGE_SIZE-1)),
  461. POISON_FREE_INITMEM, PAGE_SIZE);
  462. free_page(addr);
  463. totalram_pages++;
  464. }
  465. #endif
  466. }
  467. void free_initmem(void)
  468. {
  469. free_init_pages("unused kernel memory",
  470. (unsigned long)(&__init_begin),
  471. (unsigned long)(&__init_end));
  472. }
  473. #ifdef CONFIG_DEBUG_RODATA
  474. const int rodata_test_data = 0xC3;
  475. EXPORT_SYMBOL_GPL(rodata_test_data);
  476. void mark_rodata_ro(void)
  477. {
  478. unsigned long start = (unsigned long)_stext, end;
  479. #ifdef CONFIG_HOTPLUG_CPU
  480. /* It must still be possible to apply SMP alternatives. */
  481. if (num_possible_cpus() > 1)
  482. start = (unsigned long)_etext;
  483. #endif
  484. #ifdef CONFIG_KPROBES
  485. start = (unsigned long)__start_rodata;
  486. #endif
  487. end = (unsigned long)__end_rodata;
  488. start = (start + PAGE_SIZE - 1) & PAGE_MASK;
  489. end &= PAGE_MASK;
  490. if (end <= start)
  491. return;
  492. set_memory_ro(start, (end - start) >> PAGE_SHIFT);
  493. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  494. (end - start) >> 10);
  495. rodata_test();
  496. #ifdef CONFIG_CPA_DEBUG
  497. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
  498. set_memory_rw(start, (end-start) >> PAGE_SHIFT);
  499. printk(KERN_INFO "Testing CPA: again\n");
  500. set_memory_ro(start, (end-start) >> PAGE_SHIFT);
  501. #endif
  502. }
  503. #endif
  504. #ifdef CONFIG_BLK_DEV_INITRD
  505. void free_initrd_mem(unsigned long start, unsigned long end)
  506. {
  507. free_init_pages("initrd memory", start, end);
  508. }
  509. #endif
  510. void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
  511. {
  512. #ifdef CONFIG_NUMA
  513. int nid = phys_to_nid(phys);
  514. #endif
  515. unsigned long pfn = phys >> PAGE_SHIFT;
  516. if (pfn >= end_pfn) {
  517. /*
  518. * This can happen with kdump kernels when accessing
  519. * firmware tables:
  520. */
  521. if (pfn < end_pfn_map)
  522. return;
  523. printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
  524. phys, len);
  525. return;
  526. }
  527. /* Should check here against the e820 map to avoid double free */
  528. #ifdef CONFIG_NUMA
  529. reserve_bootmem_node(NODE_DATA(nid), phys, len);
  530. #else
  531. reserve_bootmem(phys, len);
  532. #endif
  533. if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
  534. dma_reserve += len / PAGE_SIZE;
  535. set_dma_reserve(dma_reserve);
  536. }
  537. }
  538. int kern_addr_valid(unsigned long addr)
  539. {
  540. unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
  541. pgd_t *pgd;
  542. pud_t *pud;
  543. pmd_t *pmd;
  544. pte_t *pte;
  545. if (above != 0 && above != -1UL)
  546. return 0;
  547. pgd = pgd_offset_k(addr);
  548. if (pgd_none(*pgd))
  549. return 0;
  550. pud = pud_offset(pgd, addr);
  551. if (pud_none(*pud))
  552. return 0;
  553. pmd = pmd_offset(pud, addr);
  554. if (pmd_none(*pmd))
  555. return 0;
  556. if (pmd_large(*pmd))
  557. return pfn_valid(pmd_pfn(*pmd));
  558. pte = pte_offset_kernel(pmd, addr);
  559. if (pte_none(*pte))
  560. return 0;
  561. return pfn_valid(pte_pfn(*pte));
  562. }
  563. /*
  564. * A pseudo VMA to allow ptrace access for the vsyscall page. This only
  565. * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
  566. * not need special handling anymore:
  567. */
  568. static struct vm_area_struct gate_vma = {
  569. .vm_start = VSYSCALL_START,
  570. .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
  571. .vm_page_prot = PAGE_READONLY_EXEC,
  572. .vm_flags = VM_READ | VM_EXEC
  573. };
  574. struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
  575. {
  576. #ifdef CONFIG_IA32_EMULATION
  577. if (test_tsk_thread_flag(tsk, TIF_IA32))
  578. return NULL;
  579. #endif
  580. return &gate_vma;
  581. }
  582. int in_gate_area(struct task_struct *task, unsigned long addr)
  583. {
  584. struct vm_area_struct *vma = get_gate_vma(task);
  585. if (!vma)
  586. return 0;
  587. return (addr >= vma->vm_start) && (addr < vma->vm_end);
  588. }
  589. /*
  590. * Use this when you have no reliable task/vma, typically from interrupt
  591. * context. It is less reliable than using the task's vma and may give
  592. * false positives:
  593. */
  594. int in_gate_area_no_task(unsigned long addr)
  595. {
  596. return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
  597. }
  598. const char *arch_vma_name(struct vm_area_struct *vma)
  599. {
  600. if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  601. return "[vdso]";
  602. if (vma == &gate_vma)
  603. return "[vsyscall]";
  604. return NULL;
  605. }
  606. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  607. /*
  608. * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
  609. */
  610. int __meminit
  611. vmemmap_populate(struct page *start_page, unsigned long size, int node)
  612. {
  613. unsigned long addr = (unsigned long)start_page;
  614. unsigned long end = (unsigned long)(start_page + size);
  615. unsigned long next;
  616. pgd_t *pgd;
  617. pud_t *pud;
  618. pmd_t *pmd;
  619. for (; addr < end; addr = next) {
  620. next = pmd_addr_end(addr, end);
  621. pgd = vmemmap_pgd_populate(addr, node);
  622. if (!pgd)
  623. return -ENOMEM;
  624. pud = vmemmap_pud_populate(pgd, addr, node);
  625. if (!pud)
  626. return -ENOMEM;
  627. pmd = pmd_offset(pud, addr);
  628. if (pmd_none(*pmd)) {
  629. pte_t entry;
  630. void *p;
  631. p = vmemmap_alloc_block(PMD_SIZE, node);
  632. if (!p)
  633. return -ENOMEM;
  634. entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
  635. PAGE_KERNEL_LARGE);
  636. set_pmd(pmd, __pmd(pte_val(entry)));
  637. printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
  638. addr, addr + PMD_SIZE - 1, p, node);
  639. } else {
  640. vmemmap_verify((pte_t *)pmd, node, addr, next);
  641. }
  642. }
  643. return 0;
  644. }
  645. #endif