init_32.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <asm/asm.h>
  33. #include <asm/bios_ebda.h>
  34. #include <asm/processor.h>
  35. #include <asm/system.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/dma.h>
  39. #include <asm/fixmap.h>
  40. #include <asm/e820.h>
  41. #include <asm/apic.h>
  42. #include <asm/bugs.h>
  43. #include <asm/tlb.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/pgalloc.h>
  46. #include <asm/sections.h>
  47. #include <asm/paravirt.h>
  48. #include <asm/setup.h>
  49. #include <asm/cacheflush.h>
  50. #include <asm/init.h>
  51. unsigned long max_low_pfn_mapped;
  52. unsigned long max_pfn_mapped;
  53. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  54. unsigned long highstart_pfn, highend_pfn;
  55. static noinline int do_test_wp_bit(void);
  56. bool __read_mostly __vmalloc_start_set = false;
  57. static __init void *alloc_low_page(void)
  58. {
  59. unsigned long pfn = e820_table_end++;
  60. void *adr;
  61. if (pfn >= e820_table_top)
  62. panic("alloc_low_page: ran out of memory");
  63. adr = __va(pfn * PAGE_SIZE);
  64. memset(adr, 0, PAGE_SIZE);
  65. return adr;
  66. }
  67. /*
  68. * Creates a middle page table and puts a pointer to it in the
  69. * given global directory entry. This only returns the gd entry
  70. * in non-PAE compilation mode, since the middle layer is folded.
  71. */
  72. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  73. {
  74. pud_t *pud;
  75. pmd_t *pmd_table;
  76. #ifdef CONFIG_X86_PAE
  77. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  78. if (after_bootmem)
  79. pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  80. else
  81. pmd_table = (pmd_t *)alloc_low_page();
  82. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  83. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  84. pud = pud_offset(pgd, 0);
  85. BUG_ON(pmd_table != pmd_offset(pud, 0));
  86. return pmd_table;
  87. }
  88. #endif
  89. pud = pud_offset(pgd, 0);
  90. pmd_table = pmd_offset(pud, 0);
  91. return pmd_table;
  92. }
  93. /*
  94. * Create a page table and place a pointer to it in a middle page
  95. * directory entry:
  96. */
  97. static pte_t * __init one_page_table_init(pmd_t *pmd)
  98. {
  99. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  100. pte_t *page_table = NULL;
  101. if (after_bootmem) {
  102. #ifdef CONFIG_DEBUG_PAGEALLOC
  103. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  104. #endif
  105. if (!page_table)
  106. page_table =
  107. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  108. } else
  109. page_table = (pte_t *)alloc_low_page();
  110. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  111. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  112. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  113. }
  114. return pte_offset_kernel(pmd, 0);
  115. }
  116. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  117. {
  118. int pgd_idx = pgd_index(vaddr);
  119. int pmd_idx = pmd_index(vaddr);
  120. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  121. }
  122. pte_t * __init populate_extra_pte(unsigned long vaddr)
  123. {
  124. int pte_idx = pte_index(vaddr);
  125. pmd_t *pmd;
  126. pmd = populate_extra_pmd(vaddr);
  127. return one_page_table_init(pmd) + pte_idx;
  128. }
  129. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  130. unsigned long vaddr, pte_t *lastpte)
  131. {
  132. #ifdef CONFIG_HIGHMEM
  133. /*
  134. * Something (early fixmap) may already have put a pte
  135. * page here, which causes the page table allocation
  136. * to become nonlinear. Attempt to fix it, and if it
  137. * is still nonlinear then we have to bug.
  138. */
  139. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  140. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  141. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  142. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  143. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
  144. && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
  145. || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
  146. pte_t *newpte;
  147. int i;
  148. BUG_ON(after_bootmem);
  149. newpte = alloc_low_page();
  150. for (i = 0; i < PTRS_PER_PTE; i++)
  151. set_pte(newpte + i, pte[i]);
  152. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  153. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  154. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  155. __flush_tlb_all();
  156. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  157. pte = newpte;
  158. }
  159. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  160. && vaddr > fix_to_virt(FIX_KMAP_END)
  161. && lastpte && lastpte + PTRS_PER_PTE != pte);
  162. #endif
  163. return pte;
  164. }
  165. /*
  166. * This function initializes a certain range of kernel virtual memory
  167. * with new bootmem page tables, everywhere page tables are missing in
  168. * the given range.
  169. *
  170. * NOTE: The pagetables are allocated contiguous on the physical space
  171. * so we can cache the place of the first one and move around without
  172. * checking the pgd every time.
  173. */
  174. static void __init
  175. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  176. {
  177. int pgd_idx, pmd_idx;
  178. unsigned long vaddr;
  179. pgd_t *pgd;
  180. pmd_t *pmd;
  181. pte_t *pte = NULL;
  182. vaddr = start;
  183. pgd_idx = pgd_index(vaddr);
  184. pmd_idx = pmd_index(vaddr);
  185. pgd = pgd_base + pgd_idx;
  186. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  187. pmd = one_md_table_init(pgd);
  188. pmd = pmd + pmd_index(vaddr);
  189. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  190. pmd++, pmd_idx++) {
  191. pte = page_table_kmap_check(one_page_table_init(pmd),
  192. pmd, vaddr, pte);
  193. vaddr += PMD_SIZE;
  194. }
  195. pmd_idx = 0;
  196. }
  197. }
  198. static inline int is_kernel_text(unsigned long addr)
  199. {
  200. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  201. return 1;
  202. return 0;
  203. }
  204. /*
  205. * This maps the physical memory to kernel virtual address space, a total
  206. * of max_low_pfn pages, by creating page tables starting from address
  207. * PAGE_OFFSET:
  208. */
  209. unsigned long __init
  210. kernel_physical_mapping_init(unsigned long start,
  211. unsigned long end,
  212. unsigned long page_size_mask)
  213. {
  214. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  215. unsigned long start_pfn, end_pfn;
  216. pgd_t *pgd_base = swapper_pg_dir;
  217. int pgd_idx, pmd_idx, pte_ofs;
  218. unsigned long pfn;
  219. pgd_t *pgd;
  220. pmd_t *pmd;
  221. pte_t *pte;
  222. unsigned pages_2m, pages_4k;
  223. int mapping_iter;
  224. start_pfn = start >> PAGE_SHIFT;
  225. end_pfn = end >> PAGE_SHIFT;
  226. /*
  227. * First iteration will setup identity mapping using large/small pages
  228. * based on use_pse, with other attributes same as set by
  229. * the early code in head_32.S
  230. *
  231. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  232. * as desired for the kernel identity mapping.
  233. *
  234. * This two pass mechanism conforms to the TLB app note which says:
  235. *
  236. * "Software should not write to a paging-structure entry in a way
  237. * that would change, for any linear address, both the page size
  238. * and either the page frame or attributes."
  239. */
  240. mapping_iter = 1;
  241. if (!cpu_has_pse)
  242. use_pse = 0;
  243. repeat:
  244. pages_2m = pages_4k = 0;
  245. pfn = start_pfn;
  246. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  247. pgd = pgd_base + pgd_idx;
  248. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  249. pmd = one_md_table_init(pgd);
  250. if (pfn >= end_pfn)
  251. continue;
  252. #ifdef CONFIG_X86_PAE
  253. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  254. pmd += pmd_idx;
  255. #else
  256. pmd_idx = 0;
  257. #endif
  258. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  259. pmd++, pmd_idx++) {
  260. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  261. /*
  262. * Map with big pages if possible, otherwise
  263. * create normal page tables:
  264. */
  265. if (use_pse) {
  266. unsigned int addr2;
  267. pgprot_t prot = PAGE_KERNEL_LARGE;
  268. /*
  269. * first pass will use the same initial
  270. * identity mapping attribute + _PAGE_PSE.
  271. */
  272. pgprot_t init_prot =
  273. __pgprot(PTE_IDENT_ATTR |
  274. _PAGE_PSE);
  275. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  276. PAGE_OFFSET + PAGE_SIZE-1;
  277. if (is_kernel_text(addr) ||
  278. is_kernel_text(addr2))
  279. prot = PAGE_KERNEL_LARGE_EXEC;
  280. pages_2m++;
  281. if (mapping_iter == 1)
  282. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  283. else
  284. set_pmd(pmd, pfn_pmd(pfn, prot));
  285. pfn += PTRS_PER_PTE;
  286. continue;
  287. }
  288. pte = one_page_table_init(pmd);
  289. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  290. pte += pte_ofs;
  291. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  292. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  293. pgprot_t prot = PAGE_KERNEL;
  294. /*
  295. * first pass will use the same initial
  296. * identity mapping attribute.
  297. */
  298. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  299. if (is_kernel_text(addr))
  300. prot = PAGE_KERNEL_EXEC;
  301. pages_4k++;
  302. if (mapping_iter == 1)
  303. set_pte(pte, pfn_pte(pfn, init_prot));
  304. else
  305. set_pte(pte, pfn_pte(pfn, prot));
  306. }
  307. }
  308. }
  309. if (mapping_iter == 1) {
  310. /*
  311. * update direct mapping page count only in the first
  312. * iteration.
  313. */
  314. update_page_count(PG_LEVEL_2M, pages_2m);
  315. update_page_count(PG_LEVEL_4K, pages_4k);
  316. /*
  317. * local global flush tlb, which will flush the previous
  318. * mappings present in both small and large page TLB's.
  319. */
  320. __flush_tlb_all();
  321. /*
  322. * Second iteration will set the actual desired PTE attributes.
  323. */
  324. mapping_iter = 2;
  325. goto repeat;
  326. }
  327. return 0;
  328. }
  329. pte_t *kmap_pte;
  330. pgprot_t kmap_prot;
  331. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  332. {
  333. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  334. vaddr), vaddr), vaddr);
  335. }
  336. static void __init kmap_init(void)
  337. {
  338. unsigned long kmap_vstart;
  339. /*
  340. * Cache the first kmap pte:
  341. */
  342. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  343. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  344. kmap_prot = PAGE_KERNEL;
  345. }
  346. #ifdef CONFIG_HIGHMEM
  347. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  348. {
  349. unsigned long vaddr;
  350. pgd_t *pgd;
  351. pud_t *pud;
  352. pmd_t *pmd;
  353. pte_t *pte;
  354. vaddr = PKMAP_BASE;
  355. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  356. pgd = swapper_pg_dir + pgd_index(vaddr);
  357. pud = pud_offset(pgd, vaddr);
  358. pmd = pmd_offset(pud, vaddr);
  359. pte = pte_offset_kernel(pmd, vaddr);
  360. pkmap_page_table = pte;
  361. }
  362. static void __init add_one_highpage_init(struct page *page, int pfn)
  363. {
  364. ClearPageReserved(page);
  365. init_page_count(page);
  366. __free_page(page);
  367. totalhigh_pages++;
  368. }
  369. struct add_highpages_data {
  370. unsigned long start_pfn;
  371. unsigned long end_pfn;
  372. };
  373. static int __init add_highpages_work_fn(unsigned long start_pfn,
  374. unsigned long end_pfn, void *datax)
  375. {
  376. int node_pfn;
  377. struct page *page;
  378. unsigned long final_start_pfn, final_end_pfn;
  379. struct add_highpages_data *data;
  380. data = (struct add_highpages_data *)datax;
  381. final_start_pfn = max(start_pfn, data->start_pfn);
  382. final_end_pfn = min(end_pfn, data->end_pfn);
  383. if (final_start_pfn >= final_end_pfn)
  384. return 0;
  385. for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
  386. node_pfn++) {
  387. if (!pfn_valid(node_pfn))
  388. continue;
  389. page = pfn_to_page(node_pfn);
  390. add_one_highpage_init(page, node_pfn);
  391. }
  392. return 0;
  393. }
  394. void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
  395. unsigned long end_pfn)
  396. {
  397. struct add_highpages_data data;
  398. data.start_pfn = start_pfn;
  399. data.end_pfn = end_pfn;
  400. work_with_active_regions(nid, add_highpages_work_fn, &data);
  401. }
  402. #else
  403. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  404. {
  405. }
  406. #endif /* CONFIG_HIGHMEM */
  407. void __init native_pagetable_setup_start(pgd_t *base)
  408. {
  409. unsigned long pfn, va;
  410. pgd_t *pgd;
  411. pud_t *pud;
  412. pmd_t *pmd;
  413. pte_t *pte;
  414. /*
  415. * Remove any mappings which extend past the end of physical
  416. * memory from the boot time page table:
  417. */
  418. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  419. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  420. pgd = base + pgd_index(va);
  421. if (!pgd_present(*pgd))
  422. break;
  423. pud = pud_offset(pgd, va);
  424. pmd = pmd_offset(pud, va);
  425. if (!pmd_present(*pmd))
  426. break;
  427. pte = pte_offset_kernel(pmd, va);
  428. if (!pte_present(*pte))
  429. break;
  430. pte_clear(NULL, va, pte);
  431. }
  432. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  433. }
  434. void __init native_pagetable_setup_done(pgd_t *base)
  435. {
  436. }
  437. /*
  438. * Build a proper pagetable for the kernel mappings. Up until this
  439. * point, we've been running on some set of pagetables constructed by
  440. * the boot process.
  441. *
  442. * If we're booting on native hardware, this will be a pagetable
  443. * constructed in arch/x86/kernel/head_32.S. The root of the
  444. * pagetable will be swapper_pg_dir.
  445. *
  446. * If we're booting paravirtualized under a hypervisor, then there are
  447. * more options: we may already be running PAE, and the pagetable may
  448. * or may not be based in swapper_pg_dir. In any case,
  449. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  450. * appropriately for the rest of the initialization to work.
  451. *
  452. * In general, pagetable_init() assumes that the pagetable may already
  453. * be partially populated, and so it avoids stomping on any existing
  454. * mappings.
  455. */
  456. void __init early_ioremap_page_table_range_init(void)
  457. {
  458. pgd_t *pgd_base = swapper_pg_dir;
  459. unsigned long vaddr, end;
  460. /*
  461. * Fixed mappings, only the page table structure has to be
  462. * created - mappings will be set by set_fixmap():
  463. */
  464. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  465. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  466. page_table_range_init(vaddr, end, pgd_base);
  467. early_ioremap_reset();
  468. }
  469. static void __init pagetable_init(void)
  470. {
  471. pgd_t *pgd_base = swapper_pg_dir;
  472. permanent_kmaps_init(pgd_base);
  473. }
  474. #ifdef CONFIG_ACPI_SLEEP
  475. /*
  476. * ACPI suspend needs this for resume, because things like the intel-agp
  477. * driver might have split up a kernel 4MB mapping.
  478. */
  479. char swsusp_pg_dir[PAGE_SIZE]
  480. __attribute__ ((aligned(PAGE_SIZE)));
  481. static inline void save_pg_dir(void)
  482. {
  483. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  484. }
  485. #else /* !CONFIG_ACPI_SLEEP */
  486. static inline void save_pg_dir(void)
  487. {
  488. }
  489. #endif /* !CONFIG_ACPI_SLEEP */
  490. void zap_low_mappings(void)
  491. {
  492. int i;
  493. /*
  494. * Zap initial low-memory mappings.
  495. *
  496. * Note that "pgd_clear()" doesn't do it for
  497. * us, because pgd_clear() is a no-op on i386.
  498. */
  499. for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
  500. #ifdef CONFIG_X86_PAE
  501. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  502. #else
  503. set_pgd(swapper_pg_dir+i, __pgd(0));
  504. #endif
  505. }
  506. flush_tlb_all();
  507. }
  508. int nx_enabled;
  509. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  510. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  511. #ifdef CONFIG_X86_PAE
  512. static int disable_nx __initdata;
  513. /*
  514. * noexec = on|off
  515. *
  516. * Control non executable mappings.
  517. *
  518. * on Enable
  519. * off Disable
  520. */
  521. static int __init noexec_setup(char *str)
  522. {
  523. if (!str || !strcmp(str, "on")) {
  524. if (cpu_has_nx) {
  525. __supported_pte_mask |= _PAGE_NX;
  526. disable_nx = 0;
  527. }
  528. } else {
  529. if (!strcmp(str, "off")) {
  530. disable_nx = 1;
  531. __supported_pte_mask &= ~_PAGE_NX;
  532. } else {
  533. return -EINVAL;
  534. }
  535. }
  536. return 0;
  537. }
  538. early_param("noexec", noexec_setup);
  539. void __init set_nx(void)
  540. {
  541. unsigned int v[4], l, h;
  542. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  543. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  544. if ((v[3] & (1 << 20)) && !disable_nx) {
  545. rdmsr(MSR_EFER, l, h);
  546. l |= EFER_NX;
  547. wrmsr(MSR_EFER, l, h);
  548. nx_enabled = 1;
  549. __supported_pte_mask |= _PAGE_NX;
  550. }
  551. }
  552. }
  553. #endif
  554. /* user-defined highmem size */
  555. static unsigned int highmem_pages = -1;
  556. /*
  557. * highmem=size forces highmem to be exactly 'size' bytes.
  558. * This works even on boxes that have no highmem otherwise.
  559. * This also works to reduce highmem size on bigger boxes.
  560. */
  561. static int __init parse_highmem(char *arg)
  562. {
  563. if (!arg)
  564. return -EINVAL;
  565. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  566. return 0;
  567. }
  568. early_param("highmem", parse_highmem);
  569. #define MSG_HIGHMEM_TOO_BIG \
  570. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  571. #define MSG_LOWMEM_TOO_SMALL \
  572. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  573. /*
  574. * All of RAM fits into lowmem - but if user wants highmem
  575. * artificially via the highmem=x boot parameter then create
  576. * it:
  577. */
  578. void __init lowmem_pfn_init(void)
  579. {
  580. /* max_low_pfn is 0, we already have early_res support */
  581. max_low_pfn = max_pfn;
  582. if (highmem_pages == -1)
  583. highmem_pages = 0;
  584. #ifdef CONFIG_HIGHMEM
  585. if (highmem_pages >= max_pfn) {
  586. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  587. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  588. highmem_pages = 0;
  589. }
  590. if (highmem_pages) {
  591. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  592. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  593. pages_to_mb(highmem_pages));
  594. highmem_pages = 0;
  595. }
  596. max_low_pfn -= highmem_pages;
  597. }
  598. #else
  599. if (highmem_pages)
  600. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  601. #endif
  602. }
  603. #define MSG_HIGHMEM_TOO_SMALL \
  604. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  605. #define MSG_HIGHMEM_TRIMMED \
  606. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  607. /*
  608. * We have more RAM than fits into lowmem - we try to put it into
  609. * highmem, also taking the highmem=x boot parameter into account:
  610. */
  611. void __init highmem_pfn_init(void)
  612. {
  613. max_low_pfn = MAXMEM_PFN;
  614. if (highmem_pages == -1)
  615. highmem_pages = max_pfn - MAXMEM_PFN;
  616. if (highmem_pages + MAXMEM_PFN < max_pfn)
  617. max_pfn = MAXMEM_PFN + highmem_pages;
  618. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  619. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  620. pages_to_mb(max_pfn - MAXMEM_PFN),
  621. pages_to_mb(highmem_pages));
  622. highmem_pages = 0;
  623. }
  624. #ifndef CONFIG_HIGHMEM
  625. /* Maximum memory usable is what is directly addressable */
  626. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  627. if (max_pfn > MAX_NONPAE_PFN)
  628. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  629. else
  630. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  631. max_pfn = MAXMEM_PFN;
  632. #else /* !CONFIG_HIGHMEM */
  633. #ifndef CONFIG_HIGHMEM64G
  634. if (max_pfn > MAX_NONPAE_PFN) {
  635. max_pfn = MAX_NONPAE_PFN;
  636. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  637. }
  638. #endif /* !CONFIG_HIGHMEM64G */
  639. #endif /* !CONFIG_HIGHMEM */
  640. }
  641. /*
  642. * Determine low and high memory ranges:
  643. */
  644. void __init find_low_pfn_range(void)
  645. {
  646. /* it could update max_pfn */
  647. if (max_pfn <= MAXMEM_PFN)
  648. lowmem_pfn_init();
  649. else
  650. highmem_pfn_init();
  651. }
  652. #ifndef CONFIG_NEED_MULTIPLE_NODES
  653. void __init initmem_init(unsigned long start_pfn,
  654. unsigned long end_pfn)
  655. {
  656. #ifdef CONFIG_HIGHMEM
  657. highstart_pfn = highend_pfn = max_pfn;
  658. if (max_pfn > max_low_pfn)
  659. highstart_pfn = max_low_pfn;
  660. memory_present(0, 0, highend_pfn);
  661. e820_register_active_regions(0, 0, highend_pfn);
  662. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  663. pages_to_mb(highend_pfn - highstart_pfn));
  664. num_physpages = highend_pfn;
  665. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  666. #else
  667. memory_present(0, 0, max_low_pfn);
  668. e820_register_active_regions(0, 0, max_low_pfn);
  669. num_physpages = max_low_pfn;
  670. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  671. #endif
  672. #ifdef CONFIG_FLATMEM
  673. max_mapnr = num_physpages;
  674. #endif
  675. __vmalloc_start_set = true;
  676. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  677. pages_to_mb(max_low_pfn));
  678. setup_bootmem_allocator();
  679. }
  680. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  681. static void __init zone_sizes_init(void)
  682. {
  683. unsigned long max_zone_pfns[MAX_NR_ZONES];
  684. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  685. max_zone_pfns[ZONE_DMA] =
  686. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  687. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  688. #ifdef CONFIG_HIGHMEM
  689. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  690. #endif
  691. free_area_init_nodes(max_zone_pfns);
  692. }
  693. static unsigned long __init setup_node_bootmem(int nodeid,
  694. unsigned long start_pfn,
  695. unsigned long end_pfn,
  696. unsigned long bootmap)
  697. {
  698. unsigned long bootmap_size;
  699. if (start_pfn > max_low_pfn)
  700. return bootmap;
  701. if (end_pfn > max_low_pfn)
  702. end_pfn = max_low_pfn;
  703. /* don't touch min_low_pfn */
  704. bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
  705. bootmap >> PAGE_SHIFT,
  706. start_pfn, end_pfn);
  707. printk(KERN_INFO " node %d low ram: %08lx - %08lx\n",
  708. nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
  709. printk(KERN_INFO " node %d bootmap %08lx - %08lx\n",
  710. nodeid, bootmap, bootmap + bootmap_size);
  711. free_bootmem_with_active_regions(nodeid, end_pfn);
  712. early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
  713. return bootmap + bootmap_size;
  714. }
  715. void __init setup_bootmem_allocator(void)
  716. {
  717. int nodeid;
  718. unsigned long bootmap_size, bootmap;
  719. /*
  720. * Initialize the boot-time allocator (with low memory only):
  721. */
  722. bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
  723. bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
  724. PAGE_SIZE);
  725. if (bootmap == -1L)
  726. panic("Cannot find bootmem map of size %ld\n", bootmap_size);
  727. reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
  728. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  729. max_pfn_mapped<<PAGE_SHIFT);
  730. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  731. #ifdef CONFIG_NEED_MULTIPLE_NODES
  732. for_each_online_node(nodeid)
  733. bootmap = setup_node_bootmem(nodeid, node_start_pfn[nodeid],
  734. node_end_pfn[nodeid], bootmap);
  735. #else
  736. bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap);
  737. #endif
  738. after_bootmem = 1;
  739. }
  740. /*
  741. * paging_init() sets up the page tables - note that the first 8MB are
  742. * already mapped by head.S.
  743. *
  744. * This routines also unmaps the page at virtual kernel address 0, so
  745. * that we can trap those pesky NULL-reference errors in the kernel.
  746. */
  747. void __init paging_init(void)
  748. {
  749. pagetable_init();
  750. __flush_tlb_all();
  751. kmap_init();
  752. /*
  753. * NOTE: at this point the bootmem allocator is fully available.
  754. */
  755. sparse_init();
  756. zone_sizes_init();
  757. }
  758. /*
  759. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  760. * and also on some strange 486's. All 586+'s are OK. This used to involve
  761. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  762. * switch to using exceptions got rid of all that.
  763. */
  764. static void __init test_wp_bit(void)
  765. {
  766. printk(KERN_INFO
  767. "Checking if this processor honours the WP bit even in supervisor mode...");
  768. /* Any page-aligned address will do, the test is non-destructive */
  769. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  770. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  771. clear_fixmap(FIX_WP_TEST);
  772. if (!boot_cpu_data.wp_works_ok) {
  773. printk(KERN_CONT "No.\n");
  774. #ifdef CONFIG_X86_WP_WORKS_OK
  775. panic(
  776. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  777. #endif
  778. } else {
  779. printk(KERN_CONT "Ok.\n");
  780. }
  781. }
  782. static struct kcore_list kcore_mem, kcore_vmalloc;
  783. void __init mem_init(void)
  784. {
  785. int codesize, reservedpages, datasize, initsize;
  786. int tmp;
  787. pci_iommu_alloc();
  788. #ifdef CONFIG_FLATMEM
  789. BUG_ON(!mem_map);
  790. #endif
  791. /* this will put all low memory onto the freelists */
  792. totalram_pages += free_all_bootmem();
  793. reservedpages = 0;
  794. for (tmp = 0; tmp < max_low_pfn; tmp++)
  795. /*
  796. * Only count reserved RAM pages:
  797. */
  798. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  799. reservedpages++;
  800. set_highmem_pages_init();
  801. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  802. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  803. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  804. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  805. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  806. VMALLOC_END-VMALLOC_START);
  807. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  808. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  809. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  810. num_physpages << (PAGE_SHIFT-10),
  811. codesize >> 10,
  812. reservedpages << (PAGE_SHIFT-10),
  813. datasize >> 10,
  814. initsize >> 10,
  815. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  816. );
  817. printk(KERN_INFO "virtual kernel memory layout:\n"
  818. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  819. #ifdef CONFIG_HIGHMEM
  820. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  821. #endif
  822. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  823. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  824. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  825. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  826. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  827. FIXADDR_START, FIXADDR_TOP,
  828. (FIXADDR_TOP - FIXADDR_START) >> 10,
  829. #ifdef CONFIG_HIGHMEM
  830. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  831. (LAST_PKMAP*PAGE_SIZE) >> 10,
  832. #endif
  833. VMALLOC_START, VMALLOC_END,
  834. (VMALLOC_END - VMALLOC_START) >> 20,
  835. (unsigned long)__va(0), (unsigned long)high_memory,
  836. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  837. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  838. ((unsigned long)&__init_end -
  839. (unsigned long)&__init_begin) >> 10,
  840. (unsigned long)&_etext, (unsigned long)&_edata,
  841. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  842. (unsigned long)&_text, (unsigned long)&_etext,
  843. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  844. /*
  845. * Check boundaries twice: Some fundamental inconsistencies can
  846. * be detected at build time already.
  847. */
  848. #define __FIXADDR_TOP (-PAGE_SIZE)
  849. #ifdef CONFIG_HIGHMEM
  850. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  851. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  852. #endif
  853. #define high_memory (-128UL << 20)
  854. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  855. #undef high_memory
  856. #undef __FIXADDR_TOP
  857. #ifdef CONFIG_HIGHMEM
  858. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  859. BUG_ON(VMALLOC_END > PKMAP_BASE);
  860. #endif
  861. BUG_ON(VMALLOC_START >= VMALLOC_END);
  862. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  863. if (boot_cpu_data.wp_works_ok < 0)
  864. test_wp_bit();
  865. save_pg_dir();
  866. zap_low_mappings();
  867. }
  868. #ifdef CONFIG_MEMORY_HOTPLUG
  869. int arch_add_memory(int nid, u64 start, u64 size)
  870. {
  871. struct pglist_data *pgdata = NODE_DATA(nid);
  872. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  873. unsigned long start_pfn = start >> PAGE_SHIFT;
  874. unsigned long nr_pages = size >> PAGE_SHIFT;
  875. return __add_pages(nid, zone, start_pfn, nr_pages);
  876. }
  877. #endif
  878. /*
  879. * This function cannot be __init, since exceptions don't work in that
  880. * section. Put this after the callers, so that it cannot be inlined.
  881. */
  882. static noinline int do_test_wp_bit(void)
  883. {
  884. char tmp_reg;
  885. int flag;
  886. __asm__ __volatile__(
  887. " movb %0, %1 \n"
  888. "1: movb %1, %0 \n"
  889. " xorl %2, %2 \n"
  890. "2: \n"
  891. _ASM_EXTABLE(1b,2b)
  892. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  893. "=q" (tmp_reg),
  894. "=r" (flag)
  895. :"2" (1)
  896. :"memory");
  897. return flag;
  898. }
  899. #ifdef CONFIG_DEBUG_RODATA
  900. const int rodata_test_data = 0xC3;
  901. EXPORT_SYMBOL_GPL(rodata_test_data);
  902. void mark_rodata_ro(void)
  903. {
  904. unsigned long start = PFN_ALIGN(_text);
  905. unsigned long size = PFN_ALIGN(_etext) - start;
  906. #ifndef CONFIG_DYNAMIC_FTRACE
  907. /* Dynamic tracing modifies the kernel text section */
  908. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  909. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  910. size >> 10);
  911. #ifdef CONFIG_CPA_DEBUG
  912. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  913. start, start+size);
  914. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  915. printk(KERN_INFO "Testing CPA: write protecting again\n");
  916. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  917. #endif
  918. #endif /* CONFIG_DYNAMIC_FTRACE */
  919. start += size;
  920. size = (unsigned long)__end_rodata - start;
  921. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  922. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  923. size >> 10);
  924. rodata_test();
  925. #ifdef CONFIG_CPA_DEBUG
  926. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  927. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  928. printk(KERN_INFO "Testing CPA: write protecting again\n");
  929. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  930. #endif
  931. }
  932. #endif
  933. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  934. int flags)
  935. {
  936. return reserve_bootmem(phys, len, flags);
  937. }