init_64.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112
  1. /*
  2. * linux/arch/x86_64/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
  7. */
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/swap.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/initrd.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/proc_fs.h>
  24. #include <linux/pci.h>
  25. #include <linux/pfn.h>
  26. #include <linux/poison.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/module.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/nmi.h>
  31. #include <asm/processor.h>
  32. #include <asm/system.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/pgalloc.h>
  36. #include <asm/dma.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/e820.h>
  39. #include <asm/apic.h>
  40. #include <asm/tlb.h>
  41. #include <asm/mmu_context.h>
  42. #include <asm/proto.h>
  43. #include <asm/smp.h>
  44. #include <asm/sections.h>
  45. #include <asm/kdebug.h>
  46. #include <asm/numa.h>
  47. #include <asm/cacheflush.h>
  48. /*
  49. * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
  50. * The direct mapping extends to max_pfn_mapped, so that we can directly access
  51. * apertures, ACPI and other tables without having to play with fixmaps.
  52. */
  53. unsigned long max_low_pfn_mapped;
  54. unsigned long max_pfn_mapped;
  55. static unsigned long dma_reserve __initdata;
  56. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  57. int direct_gbpages
  58. #ifdef CONFIG_DIRECT_GBPAGES
  59. = 1
  60. #endif
  61. ;
  62. static int __init parse_direct_gbpages_off(char *arg)
  63. {
  64. direct_gbpages = 0;
  65. return 0;
  66. }
  67. early_param("nogbpages", parse_direct_gbpages_off);
  68. static int __init parse_direct_gbpages_on(char *arg)
  69. {
  70. direct_gbpages = 1;
  71. return 0;
  72. }
  73. early_param("gbpages", parse_direct_gbpages_on);
  74. /*
  75. * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  76. * physical space so we can cache the place of the first one and move
  77. * around without checking the pgd every time.
  78. */
  79. int after_bootmem;
  80. /*
  81. * NOTE: This function is marked __ref because it calls __init function
  82. * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
  83. */
  84. static __ref void *spp_getpage(void)
  85. {
  86. void *ptr;
  87. if (after_bootmem)
  88. ptr = (void *) get_zeroed_page(GFP_ATOMIC);
  89. else
  90. ptr = alloc_bootmem_pages(PAGE_SIZE);
  91. if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
  92. panic("set_pte_phys: cannot allocate page data %s\n",
  93. after_bootmem ? "after bootmem" : "");
  94. }
  95. pr_debug("spp_getpage %p\n", ptr);
  96. return ptr;
  97. }
  98. void
  99. set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
  100. {
  101. pud_t *pud;
  102. pmd_t *pmd;
  103. pte_t *pte;
  104. pud = pud_page + pud_index(vaddr);
  105. if (pud_none(*pud)) {
  106. pmd = (pmd_t *) spp_getpage();
  107. pud_populate(&init_mm, pud, pmd);
  108. if (pmd != pmd_offset(pud, 0)) {
  109. printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
  110. pmd, pmd_offset(pud, 0));
  111. return;
  112. }
  113. }
  114. pmd = pmd_offset(pud, vaddr);
  115. if (pmd_none(*pmd)) {
  116. pte = (pte_t *) spp_getpage();
  117. pmd_populate_kernel(&init_mm, pmd, pte);
  118. if (pte != pte_offset_kernel(pmd, 0)) {
  119. printk(KERN_ERR "PAGETABLE BUG #02!\n");
  120. return;
  121. }
  122. }
  123. pte = pte_offset_kernel(pmd, vaddr);
  124. if (!pte_none(*pte) && pte_val(new_pte) &&
  125. pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
  126. pte_ERROR(*pte);
  127. set_pte(pte, new_pte);
  128. /*
  129. * It's enough to flush this one mapping.
  130. * (PGE mappings get flushed as well)
  131. */
  132. __flush_tlb_one(vaddr);
  133. }
  134. void
  135. set_pte_vaddr(unsigned long vaddr, pte_t pteval)
  136. {
  137. pgd_t *pgd;
  138. pud_t *pud_page;
  139. pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
  140. pgd = pgd_offset_k(vaddr);
  141. if (pgd_none(*pgd)) {
  142. printk(KERN_ERR
  143. "PGD FIXMAP MISSING, it should be setup in head.S!\n");
  144. return;
  145. }
  146. pud_page = (pud_t*)pgd_page_vaddr(*pgd);
  147. set_pte_vaddr_pud(pud_page, vaddr, pteval);
  148. }
  149. /*
  150. * Create large page table mappings for a range of physical addresses.
  151. */
  152. static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
  153. pgprot_t prot)
  154. {
  155. pgd_t *pgd;
  156. pud_t *pud;
  157. pmd_t *pmd;
  158. BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
  159. for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
  160. pgd = pgd_offset_k((unsigned long)__va(phys));
  161. if (pgd_none(*pgd)) {
  162. pud = (pud_t *) spp_getpage();
  163. set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
  164. _PAGE_USER));
  165. }
  166. pud = pud_offset(pgd, (unsigned long)__va(phys));
  167. if (pud_none(*pud)) {
  168. pmd = (pmd_t *) spp_getpage();
  169. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
  170. _PAGE_USER));
  171. }
  172. pmd = pmd_offset(pud, phys);
  173. BUG_ON(!pmd_none(*pmd));
  174. set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
  175. }
  176. }
  177. void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
  178. {
  179. __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
  180. }
  181. void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
  182. {
  183. __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
  184. }
  185. /*
  186. * The head.S code sets up the kernel high mapping:
  187. *
  188. * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
  189. *
  190. * phys_addr holds the negative offset to the kernel, which is added
  191. * to the compile time generated pmds. This results in invalid pmds up
  192. * to the point where we hit the physaddr 0 mapping.
  193. *
  194. * We limit the mappings to the region from _text to _end. _end is
  195. * rounded up to the 2MB boundary. This catches the invalid pmds as
  196. * well, as they are located before _text:
  197. */
  198. void __init cleanup_highmap(void)
  199. {
  200. unsigned long vaddr = __START_KERNEL_map;
  201. unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
  202. pmd_t *pmd = level2_kernel_pgt;
  203. pmd_t *last_pmd = pmd + PTRS_PER_PMD;
  204. for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
  205. if (pmd_none(*pmd))
  206. continue;
  207. if (vaddr < (unsigned long) _text || vaddr > end)
  208. set_pmd(pmd, __pmd(0));
  209. }
  210. }
  211. static unsigned long __initdata table_start;
  212. static unsigned long __meminitdata table_end;
  213. static unsigned long __meminitdata table_top;
  214. static __ref void *alloc_low_page(unsigned long *phys)
  215. {
  216. unsigned long pfn = table_end++;
  217. void *adr;
  218. if (after_bootmem) {
  219. adr = (void *)get_zeroed_page(GFP_ATOMIC);
  220. *phys = __pa(adr);
  221. return adr;
  222. }
  223. if (pfn >= table_top)
  224. panic("alloc_low_page: ran out of memory");
  225. adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
  226. memset(adr, 0, PAGE_SIZE);
  227. *phys = pfn * PAGE_SIZE;
  228. return adr;
  229. }
  230. static __ref void unmap_low_page(void *adr)
  231. {
  232. if (after_bootmem)
  233. return;
  234. early_iounmap(adr, PAGE_SIZE);
  235. }
  236. static unsigned long __meminit
  237. phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
  238. {
  239. unsigned pages = 0;
  240. unsigned long last_map_addr = end;
  241. int i;
  242. pte_t *pte = pte_page + pte_index(addr);
  243. for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
  244. if (addr >= end) {
  245. if (!after_bootmem) {
  246. for(; i < PTRS_PER_PTE; i++, pte++)
  247. set_pte(pte, __pte(0));
  248. }
  249. break;
  250. }
  251. if (pte_val(*pte))
  252. continue;
  253. if (0)
  254. printk(" pte=%p addr=%lx pte=%016lx\n",
  255. pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
  256. set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
  257. last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
  258. pages++;
  259. }
  260. update_page_count(PG_LEVEL_4K, pages);
  261. return last_map_addr;
  262. }
  263. static unsigned long __meminit
  264. phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
  265. {
  266. pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
  267. return phys_pte_init(pte, address, end);
  268. }
  269. static unsigned long __meminit
  270. phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
  271. unsigned long page_size_mask)
  272. {
  273. unsigned long pages = 0;
  274. unsigned long last_map_addr = end;
  275. unsigned long start = address;
  276. int i = pmd_index(address);
  277. for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
  278. unsigned long pte_phys;
  279. pmd_t *pmd = pmd_page + pmd_index(address);
  280. pte_t *pte;
  281. if (address >= end) {
  282. if (!after_bootmem) {
  283. for (; i < PTRS_PER_PMD; i++, pmd++)
  284. set_pmd(pmd, __pmd(0));
  285. }
  286. break;
  287. }
  288. if (pmd_val(*pmd)) {
  289. if (!pmd_large(*pmd)) {
  290. spin_lock(&init_mm.page_table_lock);
  291. last_map_addr = phys_pte_update(pmd, address,
  292. end);
  293. spin_unlock(&init_mm.page_table_lock);
  294. }
  295. /* Count entries we're using from level2_ident_pgt */
  296. if (start == 0)
  297. pages++;
  298. continue;
  299. }
  300. if (page_size_mask & (1<<PG_LEVEL_2M)) {
  301. pages++;
  302. spin_lock(&init_mm.page_table_lock);
  303. set_pte((pte_t *)pmd,
  304. pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
  305. spin_unlock(&init_mm.page_table_lock);
  306. last_map_addr = (address & PMD_MASK) + PMD_SIZE;
  307. continue;
  308. }
  309. pte = alloc_low_page(&pte_phys);
  310. last_map_addr = phys_pte_init(pte, address, end);
  311. unmap_low_page(pte);
  312. spin_lock(&init_mm.page_table_lock);
  313. pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
  314. spin_unlock(&init_mm.page_table_lock);
  315. }
  316. update_page_count(PG_LEVEL_2M, pages);
  317. return last_map_addr;
  318. }
  319. static unsigned long __meminit
  320. phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
  321. unsigned long page_size_mask)
  322. {
  323. pmd_t *pmd = pmd_offset(pud, 0);
  324. unsigned long last_map_addr;
  325. last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
  326. __flush_tlb_all();
  327. return last_map_addr;
  328. }
  329. static unsigned long __meminit
  330. phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
  331. unsigned long page_size_mask)
  332. {
  333. unsigned long pages = 0;
  334. unsigned long last_map_addr = end;
  335. int i = pud_index(addr);
  336. for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
  337. unsigned long pmd_phys;
  338. pud_t *pud = pud_page + pud_index(addr);
  339. pmd_t *pmd;
  340. if (addr >= end)
  341. break;
  342. if (!after_bootmem &&
  343. !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
  344. set_pud(pud, __pud(0));
  345. continue;
  346. }
  347. if (pud_val(*pud)) {
  348. if (!pud_large(*pud))
  349. last_map_addr = phys_pmd_update(pud, addr, end,
  350. page_size_mask);
  351. continue;
  352. }
  353. if (page_size_mask & (1<<PG_LEVEL_1G)) {
  354. pages++;
  355. spin_lock(&init_mm.page_table_lock);
  356. set_pte((pte_t *)pud,
  357. pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
  358. spin_unlock(&init_mm.page_table_lock);
  359. last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
  360. continue;
  361. }
  362. pmd = alloc_low_page(&pmd_phys);
  363. last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
  364. unmap_low_page(pmd);
  365. spin_lock(&init_mm.page_table_lock);
  366. pud_populate(&init_mm, pud, __va(pmd_phys));
  367. spin_unlock(&init_mm.page_table_lock);
  368. }
  369. __flush_tlb_all();
  370. update_page_count(PG_LEVEL_1G, pages);
  371. return last_map_addr;
  372. }
  373. static unsigned long __meminit
  374. phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
  375. unsigned long page_size_mask)
  376. {
  377. pud_t *pud;
  378. pud = (pud_t *)pgd_page_vaddr(*pgd);
  379. return phys_pud_init(pud, addr, end, page_size_mask);
  380. }
  381. static void __init find_early_table_space(unsigned long end)
  382. {
  383. unsigned long puds, pmds, ptes, tables, start;
  384. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  385. tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
  386. if (direct_gbpages) {
  387. unsigned long extra;
  388. extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
  389. pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
  390. } else
  391. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  392. tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
  393. if (cpu_has_pse) {
  394. unsigned long extra;
  395. extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
  396. ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
  397. } else
  398. ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
  399. tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE);
  400. /*
  401. * RED-PEN putting page tables only on node 0 could
  402. * cause a hotspot and fill up ZONE_DMA. The page tables
  403. * need roughly 0.5KB per GB.
  404. */
  405. start = 0x8000;
  406. table_start = find_e820_area(start, end, tables, PAGE_SIZE);
  407. if (table_start == -1UL)
  408. panic("Cannot find space for the kernel page tables");
  409. table_start >>= PAGE_SHIFT;
  410. table_end = table_start;
  411. table_top = table_start + (tables >> PAGE_SHIFT);
  412. printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
  413. end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
  414. }
  415. static void __init init_gbpages(void)
  416. {
  417. if (direct_gbpages && cpu_has_gbpages)
  418. printk(KERN_INFO "Using GB pages for direct mapping\n");
  419. else
  420. direct_gbpages = 0;
  421. }
  422. static unsigned long __init kernel_physical_mapping_init(unsigned long start,
  423. unsigned long end,
  424. unsigned long page_size_mask)
  425. {
  426. unsigned long next, last_map_addr = end;
  427. start = (unsigned long)__va(start);
  428. end = (unsigned long)__va(end);
  429. for (; start < end; start = next) {
  430. pgd_t *pgd = pgd_offset_k(start);
  431. unsigned long pud_phys;
  432. pud_t *pud;
  433. next = (start + PGDIR_SIZE) & PGDIR_MASK;
  434. if (next > end)
  435. next = end;
  436. if (pgd_val(*pgd)) {
  437. last_map_addr = phys_pud_update(pgd, __pa(start),
  438. __pa(end), page_size_mask);
  439. continue;
  440. }
  441. pud = alloc_low_page(&pud_phys);
  442. last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
  443. page_size_mask);
  444. unmap_low_page(pud);
  445. spin_lock(&init_mm.page_table_lock);
  446. pgd_populate(&init_mm, pgd, __va(pud_phys));
  447. spin_unlock(&init_mm.page_table_lock);
  448. }
  449. return last_map_addr;
  450. }
  451. struct map_range {
  452. unsigned long start;
  453. unsigned long end;
  454. unsigned page_size_mask;
  455. };
  456. #define NR_RANGE_MR 5
  457. static int save_mr(struct map_range *mr, int nr_range,
  458. unsigned long start_pfn, unsigned long end_pfn,
  459. unsigned long page_size_mask)
  460. {
  461. if (start_pfn < end_pfn) {
  462. if (nr_range >= NR_RANGE_MR)
  463. panic("run out of range for init_memory_mapping\n");
  464. mr[nr_range].start = start_pfn<<PAGE_SHIFT;
  465. mr[nr_range].end = end_pfn<<PAGE_SHIFT;
  466. mr[nr_range].page_size_mask = page_size_mask;
  467. nr_range++;
  468. }
  469. return nr_range;
  470. }
  471. /*
  472. * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  473. * This runs before bootmem is initialized and gets pages directly from
  474. * the physical memory. To access them they are temporarily mapped.
  475. */
  476. unsigned long __init_refok init_memory_mapping(unsigned long start,
  477. unsigned long end)
  478. {
  479. unsigned long last_map_addr = 0;
  480. unsigned long page_size_mask = 0;
  481. unsigned long start_pfn, end_pfn;
  482. struct map_range mr[NR_RANGE_MR];
  483. int nr_range, i;
  484. printk(KERN_INFO "init_memory_mapping\n");
  485. /*
  486. * Find space for the kernel direct mapping tables.
  487. *
  488. * Later we should allocate these tables in the local node of the
  489. * memory mapped. Unfortunately this is done currently before the
  490. * nodes are discovered.
  491. */
  492. if (!after_bootmem)
  493. init_gbpages();
  494. if (direct_gbpages)
  495. page_size_mask |= 1 << PG_LEVEL_1G;
  496. if (cpu_has_pse)
  497. page_size_mask |= 1 << PG_LEVEL_2M;
  498. memset(mr, 0, sizeof(mr));
  499. nr_range = 0;
  500. /* head if not big page alignment ?*/
  501. start_pfn = start >> PAGE_SHIFT;
  502. end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
  503. << (PMD_SHIFT - PAGE_SHIFT);
  504. nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
  505. /* big page (2M) range*/
  506. start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  507. << (PMD_SHIFT - PAGE_SHIFT);
  508. end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
  509. << (PUD_SHIFT - PAGE_SHIFT);
  510. if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
  511. end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
  512. nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
  513. page_size_mask & (1<<PG_LEVEL_2M));
  514. /* big page (1G) range */
  515. start_pfn = end_pfn;
  516. end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
  517. nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
  518. page_size_mask &
  519. ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
  520. /* tail is not big page (1G) alignment */
  521. start_pfn = end_pfn;
  522. end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
  523. nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
  524. page_size_mask & (1<<PG_LEVEL_2M));
  525. /* tail is not big page (2M) alignment */
  526. start_pfn = end_pfn;
  527. end_pfn = end>>PAGE_SHIFT;
  528. nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
  529. /* try to merge same page size and continuous */
  530. for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
  531. unsigned long old_start;
  532. if (mr[i].end != mr[i+1].start ||
  533. mr[i].page_size_mask != mr[i+1].page_size_mask)
  534. continue;
  535. /* move it */
  536. old_start = mr[i].start;
  537. memmove(&mr[i], &mr[i+1],
  538. (nr_range - 1 - i) * sizeof (struct map_range));
  539. mr[i].start = old_start;
  540. nr_range--;
  541. }
  542. for (i = 0; i < nr_range; i++)
  543. printk(KERN_DEBUG " %010lx - %010lx page %s\n",
  544. mr[i].start, mr[i].end,
  545. (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
  546. (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
  547. if (!after_bootmem)
  548. find_early_table_space(end);
  549. for (i = 0; i < nr_range; i++)
  550. last_map_addr = kernel_physical_mapping_init(
  551. mr[i].start, mr[i].end,
  552. mr[i].page_size_mask);
  553. if (!after_bootmem)
  554. mmu_cr4_features = read_cr4();
  555. __flush_tlb_all();
  556. if (!after_bootmem && table_end > table_start)
  557. reserve_early(table_start << PAGE_SHIFT,
  558. table_end << PAGE_SHIFT, "PGTABLE");
  559. printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
  560. last_map_addr, end);
  561. if (!after_bootmem)
  562. early_memtest(start, end);
  563. return last_map_addr >> PAGE_SHIFT;
  564. }
  565. #ifndef CONFIG_NUMA
  566. void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
  567. {
  568. unsigned long bootmap_size, bootmap;
  569. bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
  570. bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
  571. PAGE_SIZE);
  572. if (bootmap == -1L)
  573. panic("Cannot find bootmem map of size %ld\n", bootmap_size);
  574. /* don't touch min_low_pfn */
  575. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
  576. 0, end_pfn);
  577. e820_register_active_regions(0, start_pfn, end_pfn);
  578. free_bootmem_with_active_regions(0, end_pfn);
  579. early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
  580. reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
  581. }
  582. void __init paging_init(void)
  583. {
  584. unsigned long max_zone_pfns[MAX_NR_ZONES];
  585. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  586. max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
  587. max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
  588. max_zone_pfns[ZONE_NORMAL] = max_pfn;
  589. memory_present(0, 0, max_pfn);
  590. sparse_init();
  591. free_area_init_nodes(max_zone_pfns);
  592. }
  593. #endif
  594. /*
  595. * Memory hotplug specific functions
  596. */
  597. #ifdef CONFIG_MEMORY_HOTPLUG
  598. /*
  599. * Memory is added always to NORMAL zone. This means you will never get
  600. * additional DMA/DMA32 memory.
  601. */
  602. int arch_add_memory(int nid, u64 start, u64 size)
  603. {
  604. struct pglist_data *pgdat = NODE_DATA(nid);
  605. struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
  606. unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
  607. unsigned long nr_pages = size >> PAGE_SHIFT;
  608. int ret;
  609. last_mapped_pfn = init_memory_mapping(start, start + size-1);
  610. if (last_mapped_pfn > max_pfn_mapped)
  611. max_pfn_mapped = last_mapped_pfn;
  612. ret = __add_pages(zone, start_pfn, nr_pages);
  613. WARN_ON(1);
  614. return ret;
  615. }
  616. EXPORT_SYMBOL_GPL(arch_add_memory);
  617. #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
  618. int memory_add_physaddr_to_nid(u64 start)
  619. {
  620. return 0;
  621. }
  622. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  623. #endif
  624. #endif /* CONFIG_MEMORY_HOTPLUG */
  625. /*
  626. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  627. * is valid. The argument is a physical page number.
  628. *
  629. *
  630. * On x86, access has to be given to the first megabyte of ram because that area
  631. * contains bios code and data regions used by X and dosemu and similar apps.
  632. * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  633. * mmio resources as well as potential bios/acpi data regions.
  634. */
  635. int devmem_is_allowed(unsigned long pagenr)
  636. {
  637. if (pagenr <= 256)
  638. return 1;
  639. if (!page_is_ram(pagenr))
  640. return 1;
  641. return 0;
  642. }
  643. static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
  644. kcore_modules, kcore_vsyscall;
  645. void __init mem_init(void)
  646. {
  647. long codesize, reservedpages, datasize, initsize;
  648. pci_iommu_alloc();
  649. /* clear_bss() already clear the empty_zero_page */
  650. reservedpages = 0;
  651. /* this will put all low memory onto the freelists */
  652. #ifdef CONFIG_NUMA
  653. totalram_pages = numa_free_all_bootmem();
  654. #else
  655. totalram_pages = free_all_bootmem();
  656. #endif
  657. reservedpages = max_pfn - totalram_pages -
  658. absent_pages_in_range(0, max_pfn);
  659. after_bootmem = 1;
  660. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  661. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  662. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  663. /* Register memory areas for /proc/kcore */
  664. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  665. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  666. VMALLOC_END-VMALLOC_START);
  667. kclist_add(&kcore_kernel, &_stext, _end - _stext);
  668. kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
  669. kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
  670. VSYSCALL_END - VSYSCALL_START);
  671. printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
  672. "%ldk reserved, %ldk data, %ldk init)\n",
  673. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  674. max_pfn << (PAGE_SHIFT-10),
  675. codesize >> 10,
  676. reservedpages << (PAGE_SHIFT-10),
  677. datasize >> 10,
  678. initsize >> 10);
  679. cpa_init();
  680. }
  681. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  682. {
  683. unsigned long addr = begin;
  684. if (addr >= end)
  685. return;
  686. /*
  687. * If debugging page accesses then do not free this memory but
  688. * mark them not present - any buggy init-section access will
  689. * create a kernel page fault:
  690. */
  691. #ifdef CONFIG_DEBUG_PAGEALLOC
  692. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  693. begin, PAGE_ALIGN(end));
  694. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  695. #else
  696. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  697. for (; addr < end; addr += PAGE_SIZE) {
  698. ClearPageReserved(virt_to_page(addr));
  699. init_page_count(virt_to_page(addr));
  700. memset((void *)(addr & ~(PAGE_SIZE-1)),
  701. POISON_FREE_INITMEM, PAGE_SIZE);
  702. free_page(addr);
  703. totalram_pages++;
  704. }
  705. #endif
  706. }
  707. void free_initmem(void)
  708. {
  709. free_init_pages("unused kernel memory",
  710. (unsigned long)(&__init_begin),
  711. (unsigned long)(&__init_end));
  712. }
  713. #ifdef CONFIG_DEBUG_RODATA
  714. const int rodata_test_data = 0xC3;
  715. EXPORT_SYMBOL_GPL(rodata_test_data);
  716. void mark_rodata_ro(void)
  717. {
  718. unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
  719. unsigned long rodata_start =
  720. ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
  721. #ifdef CONFIG_DYNAMIC_FTRACE
  722. /* Dynamic tracing modifies the kernel text section */
  723. start = rodata_start;
  724. #endif
  725. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  726. (end - start) >> 10);
  727. set_memory_ro(start, (end - start) >> PAGE_SHIFT);
  728. /*
  729. * The rodata section (but not the kernel text!) should also be
  730. * not-executable.
  731. */
  732. set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
  733. rodata_test();
  734. #ifdef CONFIG_CPA_DEBUG
  735. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
  736. set_memory_rw(start, (end-start) >> PAGE_SHIFT);
  737. printk(KERN_INFO "Testing CPA: again\n");
  738. set_memory_ro(start, (end-start) >> PAGE_SHIFT);
  739. #endif
  740. }
  741. #endif
  742. #ifdef CONFIG_BLK_DEV_INITRD
  743. void free_initrd_mem(unsigned long start, unsigned long end)
  744. {
  745. free_init_pages("initrd memory", start, end);
  746. }
  747. #endif
  748. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  749. int flags)
  750. {
  751. #ifdef CONFIG_NUMA
  752. int nid, next_nid;
  753. int ret;
  754. #endif
  755. unsigned long pfn = phys >> PAGE_SHIFT;
  756. if (pfn >= max_pfn) {
  757. /*
  758. * This can happen with kdump kernels when accessing
  759. * firmware tables:
  760. */
  761. if (pfn < max_pfn_mapped)
  762. return -EFAULT;
  763. printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
  764. phys, len);
  765. return -EFAULT;
  766. }
  767. /* Should check here against the e820 map to avoid double free */
  768. #ifdef CONFIG_NUMA
  769. nid = phys_to_nid(phys);
  770. next_nid = phys_to_nid(phys + len - 1);
  771. if (nid == next_nid)
  772. ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
  773. else
  774. ret = reserve_bootmem(phys, len, flags);
  775. if (ret != 0)
  776. return ret;
  777. #else
  778. reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
  779. #endif
  780. if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
  781. dma_reserve += len / PAGE_SIZE;
  782. set_dma_reserve(dma_reserve);
  783. }
  784. return 0;
  785. }
  786. int kern_addr_valid(unsigned long addr)
  787. {
  788. unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
  789. pgd_t *pgd;
  790. pud_t *pud;
  791. pmd_t *pmd;
  792. pte_t *pte;
  793. if (above != 0 && above != -1UL)
  794. return 0;
  795. pgd = pgd_offset_k(addr);
  796. if (pgd_none(*pgd))
  797. return 0;
  798. pud = pud_offset(pgd, addr);
  799. if (pud_none(*pud))
  800. return 0;
  801. pmd = pmd_offset(pud, addr);
  802. if (pmd_none(*pmd))
  803. return 0;
  804. if (pmd_large(*pmd))
  805. return pfn_valid(pmd_pfn(*pmd));
  806. pte = pte_offset_kernel(pmd, addr);
  807. if (pte_none(*pte))
  808. return 0;
  809. return pfn_valid(pte_pfn(*pte));
  810. }
  811. /*
  812. * A pseudo VMA to allow ptrace access for the vsyscall page. This only
  813. * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
  814. * not need special handling anymore:
  815. */
  816. static struct vm_area_struct gate_vma = {
  817. .vm_start = VSYSCALL_START,
  818. .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
  819. .vm_page_prot = PAGE_READONLY_EXEC,
  820. .vm_flags = VM_READ | VM_EXEC
  821. };
  822. struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
  823. {
  824. #ifdef CONFIG_IA32_EMULATION
  825. if (test_tsk_thread_flag(tsk, TIF_IA32))
  826. return NULL;
  827. #endif
  828. return &gate_vma;
  829. }
  830. int in_gate_area(struct task_struct *task, unsigned long addr)
  831. {
  832. struct vm_area_struct *vma = get_gate_vma(task);
  833. if (!vma)
  834. return 0;
  835. return (addr >= vma->vm_start) && (addr < vma->vm_end);
  836. }
  837. /*
  838. * Use this when you have no reliable task/vma, typically from interrupt
  839. * context. It is less reliable than using the task's vma and may give
  840. * false positives:
  841. */
  842. int in_gate_area_no_task(unsigned long addr)
  843. {
  844. return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
  845. }
  846. const char *arch_vma_name(struct vm_area_struct *vma)
  847. {
  848. if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  849. return "[vdso]";
  850. if (vma == &gate_vma)
  851. return "[vsyscall]";
  852. return NULL;
  853. }
  854. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  855. /*
  856. * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
  857. */
  858. static long __meminitdata addr_start, addr_end;
  859. static void __meminitdata *p_start, *p_end;
  860. static int __meminitdata node_start;
  861. int __meminit
  862. vmemmap_populate(struct page *start_page, unsigned long size, int node)
  863. {
  864. unsigned long addr = (unsigned long)start_page;
  865. unsigned long end = (unsigned long)(start_page + size);
  866. unsigned long next;
  867. pgd_t *pgd;
  868. pud_t *pud;
  869. pmd_t *pmd;
  870. for (; addr < end; addr = next) {
  871. void *p = NULL;
  872. pgd = vmemmap_pgd_populate(addr, node);
  873. if (!pgd)
  874. return -ENOMEM;
  875. pud = vmemmap_pud_populate(pgd, addr, node);
  876. if (!pud)
  877. return -ENOMEM;
  878. if (!cpu_has_pse) {
  879. next = (addr + PAGE_SIZE) & PAGE_MASK;
  880. pmd = vmemmap_pmd_populate(pud, addr, node);
  881. if (!pmd)
  882. return -ENOMEM;
  883. p = vmemmap_pte_populate(pmd, addr, node);
  884. if (!p)
  885. return -ENOMEM;
  886. addr_end = addr + PAGE_SIZE;
  887. p_end = p + PAGE_SIZE;
  888. } else {
  889. next = pmd_addr_end(addr, end);
  890. pmd = pmd_offset(pud, addr);
  891. if (pmd_none(*pmd)) {
  892. pte_t entry;
  893. p = vmemmap_alloc_block(PMD_SIZE, node);
  894. if (!p)
  895. return -ENOMEM;
  896. entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
  897. PAGE_KERNEL_LARGE);
  898. set_pmd(pmd, __pmd(pte_val(entry)));
  899. /* check to see if we have contiguous blocks */
  900. if (p_end != p || node_start != node) {
  901. if (p_start)
  902. printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
  903. addr_start, addr_end-1, p_start, p_end-1, node_start);
  904. addr_start = addr;
  905. node_start = node;
  906. p_start = p;
  907. }
  908. addr_end = addr + PMD_SIZE;
  909. p_end = p + PMD_SIZE;
  910. } else
  911. vmemmap_verify((pte_t *)pmd, node, addr, next);
  912. }
  913. }
  914. return 0;
  915. }
  916. void __meminit vmemmap_populate_print_last(void)
  917. {
  918. if (p_start) {
  919. printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
  920. addr_start, addr_end-1, p_start, p_end-1, node_start);
  921. p_start = NULL;
  922. p_end = NULL;
  923. node_start = 0;
  924. }
  925. }
  926. #endif