init_32.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/slab.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <asm/asm.h>
  33. #include <asm/bios_ebda.h>
  34. #include <asm/processor.h>
  35. #include <asm/system.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/dma.h>
  39. #include <asm/fixmap.h>
  40. #include <asm/e820.h>
  41. #include <asm/apic.h>
  42. #include <asm/bugs.h>
  43. #include <asm/tlb.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/pgalloc.h>
  46. #include <asm/sections.h>
  47. #include <asm/paravirt.h>
  48. #include <asm/setup.h>
  49. #include <asm/cacheflush.h>
  50. unsigned int __VMALLOC_RESERVE = 128 << 20;
  51. unsigned long max_low_pfn_mapped;
  52. unsigned long max_pfn_mapped;
  53. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  54. unsigned long highstart_pfn, highend_pfn;
  55. static noinline int do_test_wp_bit(void);
  56. static unsigned long __initdata table_start;
  57. static unsigned long __meminitdata table_end;
  58. static unsigned long __meminitdata table_top;
  59. static int __initdata after_init_bootmem;
  60. static __init void *alloc_low_page(void)
  61. {
  62. unsigned long pfn = table_end++;
  63. void *adr;
  64. if (pfn >= table_top)
  65. panic("alloc_low_page: ran out of memory");
  66. adr = __va(pfn * PAGE_SIZE);
  67. memset(adr, 0, PAGE_SIZE);
  68. return adr;
  69. }
  70. /*
  71. * Creates a middle page table and puts a pointer to it in the
  72. * given global directory entry. This only returns the gd entry
  73. * in non-PAE compilation mode, since the middle layer is folded.
  74. */
  75. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  76. {
  77. pud_t *pud;
  78. pmd_t *pmd_table;
  79. #ifdef CONFIG_X86_PAE
  80. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  81. if (after_init_bootmem)
  82. pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  83. else
  84. pmd_table = (pmd_t *)alloc_low_page();
  85. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  86. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  87. pud = pud_offset(pgd, 0);
  88. BUG_ON(pmd_table != pmd_offset(pud, 0));
  89. return pmd_table;
  90. }
  91. #endif
  92. pud = pud_offset(pgd, 0);
  93. pmd_table = pmd_offset(pud, 0);
  94. return pmd_table;
  95. }
  96. /*
  97. * Create a page table and place a pointer to it in a middle page
  98. * directory entry:
  99. */
  100. static pte_t * __init one_page_table_init(pmd_t *pmd)
  101. {
  102. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  103. pte_t *page_table = NULL;
  104. if (after_init_bootmem) {
  105. #ifdef CONFIG_DEBUG_PAGEALLOC
  106. page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  107. #endif
  108. if (!page_table)
  109. page_table =
  110. (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  111. } else
  112. page_table = (pte_t *)alloc_low_page();
  113. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  114. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  115. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  116. }
  117. return pte_offset_kernel(pmd, 0);
  118. }
  119. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  120. {
  121. int pgd_idx = pgd_index(vaddr);
  122. int pmd_idx = pmd_index(vaddr);
  123. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  124. }
  125. pte_t * __init populate_extra_pte(unsigned long vaddr)
  126. {
  127. int pte_idx = pte_index(vaddr);
  128. pmd_t *pmd;
  129. pmd = populate_extra_pmd(vaddr);
  130. return one_page_table_init(pmd) + pte_idx;
  131. }
  132. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  133. unsigned long vaddr, pte_t *lastpte)
  134. {
  135. #ifdef CONFIG_HIGHMEM
  136. /*
  137. * Something (early fixmap) may already have put a pte
  138. * page here, which causes the page table allocation
  139. * to become nonlinear. Attempt to fix it, and if it
  140. * is still nonlinear then we have to bug.
  141. */
  142. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  143. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  144. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  145. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  146. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
  147. && ((__pa(pte) >> PAGE_SHIFT) < table_start
  148. || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
  149. pte_t *newpte;
  150. int i;
  151. BUG_ON(after_init_bootmem);
  152. newpte = alloc_low_page();
  153. for (i = 0; i < PTRS_PER_PTE; i++)
  154. set_pte(newpte + i, pte[i]);
  155. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  156. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  157. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  158. __flush_tlb_all();
  159. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  160. pte = newpte;
  161. }
  162. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  163. && vaddr > fix_to_virt(FIX_KMAP_END)
  164. && lastpte && lastpte + PTRS_PER_PTE != pte);
  165. #endif
  166. return pte;
  167. }
  168. /*
  169. * This function initializes a certain range of kernel virtual memory
  170. * with new bootmem page tables, everywhere page tables are missing in
  171. * the given range.
  172. *
  173. * NOTE: The pagetables are allocated contiguous on the physical space
  174. * so we can cache the place of the first one and move around without
  175. * checking the pgd every time.
  176. */
  177. static void __init
  178. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  179. {
  180. int pgd_idx, pmd_idx;
  181. unsigned long vaddr;
  182. pgd_t *pgd;
  183. pmd_t *pmd;
  184. pte_t *pte = NULL;
  185. vaddr = start;
  186. pgd_idx = pgd_index(vaddr);
  187. pmd_idx = pmd_index(vaddr);
  188. pgd = pgd_base + pgd_idx;
  189. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  190. pmd = one_md_table_init(pgd);
  191. pmd = pmd + pmd_index(vaddr);
  192. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  193. pmd++, pmd_idx++) {
  194. pte = page_table_kmap_check(one_page_table_init(pmd),
  195. pmd, vaddr, pte);
  196. vaddr += PMD_SIZE;
  197. }
  198. pmd_idx = 0;
  199. }
  200. }
  201. static inline int is_kernel_text(unsigned long addr)
  202. {
  203. if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
  204. return 1;
  205. return 0;
  206. }
  207. /*
  208. * This maps the physical memory to kernel virtual address space, a total
  209. * of max_low_pfn pages, by creating page tables starting from address
  210. * PAGE_OFFSET:
  211. */
  212. static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
  213. unsigned long start_pfn,
  214. unsigned long end_pfn,
  215. int use_pse)
  216. {
  217. int pgd_idx, pmd_idx, pte_ofs;
  218. unsigned long pfn;
  219. pgd_t *pgd;
  220. pmd_t *pmd;
  221. pte_t *pte;
  222. unsigned pages_2m, pages_4k;
  223. int mapping_iter;
  224. /*
  225. * First iteration will setup identity mapping using large/small pages
  226. * based on use_pse, with other attributes same as set by
  227. * the early code in head_32.S
  228. *
  229. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  230. * as desired for the kernel identity mapping.
  231. *
  232. * This two pass mechanism conforms to the TLB app note which says:
  233. *
  234. * "Software should not write to a paging-structure entry in a way
  235. * that would change, for any linear address, both the page size
  236. * and either the page frame or attributes."
  237. */
  238. mapping_iter = 1;
  239. if (!cpu_has_pse)
  240. use_pse = 0;
  241. repeat:
  242. pages_2m = pages_4k = 0;
  243. pfn = start_pfn;
  244. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  245. pgd = pgd_base + pgd_idx;
  246. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  247. pmd = one_md_table_init(pgd);
  248. if (pfn >= end_pfn)
  249. continue;
  250. #ifdef CONFIG_X86_PAE
  251. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  252. pmd += pmd_idx;
  253. #else
  254. pmd_idx = 0;
  255. #endif
  256. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  257. pmd++, pmd_idx++) {
  258. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  259. /*
  260. * Map with big pages if possible, otherwise
  261. * create normal page tables:
  262. */
  263. if (use_pse) {
  264. unsigned int addr2;
  265. pgprot_t prot = PAGE_KERNEL_LARGE;
  266. /*
  267. * first pass will use the same initial
  268. * identity mapping attribute + _PAGE_PSE.
  269. */
  270. pgprot_t init_prot =
  271. __pgprot(PTE_IDENT_ATTR |
  272. _PAGE_PSE);
  273. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  274. PAGE_OFFSET + PAGE_SIZE-1;
  275. if (is_kernel_text(addr) ||
  276. is_kernel_text(addr2))
  277. prot = PAGE_KERNEL_LARGE_EXEC;
  278. pages_2m++;
  279. if (mapping_iter == 1)
  280. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  281. else
  282. set_pmd(pmd, pfn_pmd(pfn, prot));
  283. pfn += PTRS_PER_PTE;
  284. continue;
  285. }
  286. pte = one_page_table_init(pmd);
  287. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  288. pte += pte_ofs;
  289. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  290. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  291. pgprot_t prot = PAGE_KERNEL;
  292. /*
  293. * first pass will use the same initial
  294. * identity mapping attribute.
  295. */
  296. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  297. if (is_kernel_text(addr))
  298. prot = PAGE_KERNEL_EXEC;
  299. pages_4k++;
  300. if (mapping_iter == 1)
  301. set_pte(pte, pfn_pte(pfn, init_prot));
  302. else
  303. set_pte(pte, pfn_pte(pfn, prot));
  304. }
  305. }
  306. }
  307. if (mapping_iter == 1) {
  308. /*
  309. * update direct mapping page count only in the first
  310. * iteration.
  311. */
  312. update_page_count(PG_LEVEL_2M, pages_2m);
  313. update_page_count(PG_LEVEL_4K, pages_4k);
  314. /*
  315. * local global flush tlb, which will flush the previous
  316. * mappings present in both small and large page TLB's.
  317. */
  318. __flush_tlb_all();
  319. /*
  320. * Second iteration will set the actual desired PTE attributes.
  321. */
  322. mapping_iter = 2;
  323. goto repeat;
  324. }
  325. }
  326. /*
  327. * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  328. * is valid. The argument is a physical page number.
  329. *
  330. *
  331. * On x86, access has to be given to the first megabyte of ram because that area
  332. * contains bios code and data regions used by X and dosemu and similar apps.
  333. * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  334. * mmio resources as well as potential bios/acpi data regions.
  335. */
  336. int devmem_is_allowed(unsigned long pagenr)
  337. {
  338. if (pagenr <= 256)
  339. return 1;
  340. if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
  341. return 0;
  342. if (!page_is_ram(pagenr))
  343. return 1;
  344. return 0;
  345. }
  346. pte_t *kmap_pte;
  347. pgprot_t kmap_prot;
  348. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  349. {
  350. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  351. vaddr), vaddr), vaddr);
  352. }
  353. static void __init kmap_init(void)
  354. {
  355. unsigned long kmap_vstart;
  356. /*
  357. * Cache the first kmap pte:
  358. */
  359. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  360. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  361. kmap_prot = PAGE_KERNEL;
  362. }
  363. #ifdef CONFIG_HIGHMEM
  364. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  365. {
  366. unsigned long vaddr;
  367. pgd_t *pgd;
  368. pud_t *pud;
  369. pmd_t *pmd;
  370. pte_t *pte;
  371. vaddr = PKMAP_BASE;
  372. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  373. pgd = swapper_pg_dir + pgd_index(vaddr);
  374. pud = pud_offset(pgd, vaddr);
  375. pmd = pmd_offset(pud, vaddr);
  376. pte = pte_offset_kernel(pmd, vaddr);
  377. pkmap_page_table = pte;
  378. }
  379. static void __init add_one_highpage_init(struct page *page, int pfn)
  380. {
  381. ClearPageReserved(page);
  382. init_page_count(page);
  383. __free_page(page);
  384. totalhigh_pages++;
  385. }
  386. struct add_highpages_data {
  387. unsigned long start_pfn;
  388. unsigned long end_pfn;
  389. };
  390. static int __init add_highpages_work_fn(unsigned long start_pfn,
  391. unsigned long end_pfn, void *datax)
  392. {
  393. int node_pfn;
  394. struct page *page;
  395. unsigned long final_start_pfn, final_end_pfn;
  396. struct add_highpages_data *data;
  397. data = (struct add_highpages_data *)datax;
  398. final_start_pfn = max(start_pfn, data->start_pfn);
  399. final_end_pfn = min(end_pfn, data->end_pfn);
  400. if (final_start_pfn >= final_end_pfn)
  401. return 0;
  402. for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
  403. node_pfn++) {
  404. if (!pfn_valid(node_pfn))
  405. continue;
  406. page = pfn_to_page(node_pfn);
  407. add_one_highpage_init(page, node_pfn);
  408. }
  409. return 0;
  410. }
  411. void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
  412. unsigned long end_pfn)
  413. {
  414. struct add_highpages_data data;
  415. data.start_pfn = start_pfn;
  416. data.end_pfn = end_pfn;
  417. work_with_active_regions(nid, add_highpages_work_fn, &data);
  418. }
  419. #ifndef CONFIG_NUMA
  420. static void __init set_highmem_pages_init(void)
  421. {
  422. add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
  423. totalram_pages += totalhigh_pages;
  424. }
  425. #endif /* !CONFIG_NUMA */
  426. #else
  427. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  428. {
  429. }
  430. static inline void set_highmem_pages_init(void)
  431. {
  432. }
  433. #endif /* CONFIG_HIGHMEM */
  434. void __init native_pagetable_setup_start(pgd_t *base)
  435. {
  436. unsigned long pfn, va;
  437. pgd_t *pgd;
  438. pud_t *pud;
  439. pmd_t *pmd;
  440. pte_t *pte;
  441. /*
  442. * Remove any mappings which extend past the end of physical
  443. * memory from the boot time page table:
  444. */
  445. for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  446. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  447. pgd = base + pgd_index(va);
  448. if (!pgd_present(*pgd))
  449. break;
  450. pud = pud_offset(pgd, va);
  451. pmd = pmd_offset(pud, va);
  452. if (!pmd_present(*pmd))
  453. break;
  454. pte = pte_offset_kernel(pmd, va);
  455. if (!pte_present(*pte))
  456. break;
  457. pte_clear(NULL, va, pte);
  458. }
  459. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  460. }
  461. void __init native_pagetable_setup_done(pgd_t *base)
  462. {
  463. }
  464. /*
  465. * Build a proper pagetable for the kernel mappings. Up until this
  466. * point, we've been running on some set of pagetables constructed by
  467. * the boot process.
  468. *
  469. * If we're booting on native hardware, this will be a pagetable
  470. * constructed in arch/x86/kernel/head_32.S. The root of the
  471. * pagetable will be swapper_pg_dir.
  472. *
  473. * If we're booting paravirtualized under a hypervisor, then there are
  474. * more options: we may already be running PAE, and the pagetable may
  475. * or may not be based in swapper_pg_dir. In any case,
  476. * paravirt_pagetable_setup_start() will set up swapper_pg_dir
  477. * appropriately for the rest of the initialization to work.
  478. *
  479. * In general, pagetable_init() assumes that the pagetable may already
  480. * be partially populated, and so it avoids stomping on any existing
  481. * mappings.
  482. */
  483. static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
  484. {
  485. unsigned long vaddr, end;
  486. /*
  487. * Fixed mappings, only the page table structure has to be
  488. * created - mappings will be set by set_fixmap():
  489. */
  490. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  491. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  492. page_table_range_init(vaddr, end, pgd_base);
  493. early_ioremap_reset();
  494. }
  495. static void __init pagetable_init(void)
  496. {
  497. pgd_t *pgd_base = swapper_pg_dir;
  498. permanent_kmaps_init(pgd_base);
  499. }
  500. #ifdef CONFIG_ACPI_SLEEP
  501. /*
  502. * ACPI suspend needs this for resume, because things like the intel-agp
  503. * driver might have split up a kernel 4MB mapping.
  504. */
  505. char swsusp_pg_dir[PAGE_SIZE]
  506. __attribute__ ((aligned(PAGE_SIZE)));
  507. static inline void save_pg_dir(void)
  508. {
  509. memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
  510. }
  511. #else /* !CONFIG_ACPI_SLEEP */
  512. static inline void save_pg_dir(void)
  513. {
  514. }
  515. #endif /* !CONFIG_ACPI_SLEEP */
  516. void zap_low_mappings(void)
  517. {
  518. int i;
  519. /*
  520. * Zap initial low-memory mappings.
  521. *
  522. * Note that "pgd_clear()" doesn't do it for
  523. * us, because pgd_clear() is a no-op on i386.
  524. */
  525. for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
  526. #ifdef CONFIG_X86_PAE
  527. set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
  528. #else
  529. set_pgd(swapper_pg_dir+i, __pgd(0));
  530. #endif
  531. }
  532. flush_tlb_all();
  533. }
  534. int nx_enabled;
  535. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
  536. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  537. #ifdef CONFIG_X86_PAE
  538. static int disable_nx __initdata;
  539. /*
  540. * noexec = on|off
  541. *
  542. * Control non executable mappings.
  543. *
  544. * on Enable
  545. * off Disable
  546. */
  547. static int __init noexec_setup(char *str)
  548. {
  549. if (!str || !strcmp(str, "on")) {
  550. if (cpu_has_nx) {
  551. __supported_pte_mask |= _PAGE_NX;
  552. disable_nx = 0;
  553. }
  554. } else {
  555. if (!strcmp(str, "off")) {
  556. disable_nx = 1;
  557. __supported_pte_mask &= ~_PAGE_NX;
  558. } else {
  559. return -EINVAL;
  560. }
  561. }
  562. return 0;
  563. }
  564. early_param("noexec", noexec_setup);
  565. static void __init set_nx(void)
  566. {
  567. unsigned int v[4], l, h;
  568. if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  569. cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  570. if ((v[3] & (1 << 20)) && !disable_nx) {
  571. rdmsr(MSR_EFER, l, h);
  572. l |= EFER_NX;
  573. wrmsr(MSR_EFER, l, h);
  574. nx_enabled = 1;
  575. __supported_pte_mask |= _PAGE_NX;
  576. }
  577. }
  578. }
  579. #endif
  580. /* user-defined highmem size */
  581. static unsigned int highmem_pages = -1;
  582. /*
  583. * highmem=size forces highmem to be exactly 'size' bytes.
  584. * This works even on boxes that have no highmem otherwise.
  585. * This also works to reduce highmem size on bigger boxes.
  586. */
  587. static int __init parse_highmem(char *arg)
  588. {
  589. if (!arg)
  590. return -EINVAL;
  591. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  592. return 0;
  593. }
  594. early_param("highmem", parse_highmem);
  595. /*
  596. * Determine low and high memory ranges:
  597. */
  598. void __init find_low_pfn_range(void)
  599. {
  600. /* it could update max_pfn */
  601. /* max_low_pfn is 0, we already have early_res support */
  602. max_low_pfn = max_pfn;
  603. if (max_low_pfn > MAXMEM_PFN) {
  604. if (highmem_pages == -1)
  605. highmem_pages = max_pfn - MAXMEM_PFN;
  606. if (highmem_pages + MAXMEM_PFN < max_pfn)
  607. max_pfn = MAXMEM_PFN + highmem_pages;
  608. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  609. printk(KERN_WARNING "only %luMB highmem pages "
  610. "available, ignoring highmem size of %uMB.\n",
  611. pages_to_mb(max_pfn - MAXMEM_PFN),
  612. pages_to_mb(highmem_pages));
  613. highmem_pages = 0;
  614. }
  615. max_low_pfn = MAXMEM_PFN;
  616. #ifndef CONFIG_HIGHMEM
  617. /* Maximum memory usable is what is directly addressable */
  618. printk(KERN_WARNING "Warning only %ldMB will be used.\n",
  619. MAXMEM>>20);
  620. if (max_pfn > MAX_NONPAE_PFN)
  621. printk(KERN_WARNING
  622. "Use a HIGHMEM64G enabled kernel.\n");
  623. else
  624. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  625. max_pfn = MAXMEM_PFN;
  626. #else /* !CONFIG_HIGHMEM */
  627. #ifndef CONFIG_HIGHMEM64G
  628. if (max_pfn > MAX_NONPAE_PFN) {
  629. max_pfn = MAX_NONPAE_PFN;
  630. printk(KERN_WARNING "Warning only 4GB will be used."
  631. "Use a HIGHMEM64G enabled kernel.\n");
  632. }
  633. #endif /* !CONFIG_HIGHMEM64G */
  634. #endif /* !CONFIG_HIGHMEM */
  635. } else {
  636. if (highmem_pages == -1)
  637. highmem_pages = 0;
  638. #ifdef CONFIG_HIGHMEM
  639. if (highmem_pages >= max_pfn) {
  640. printk(KERN_ERR "highmem size specified (%uMB) is "
  641. "bigger than pages available (%luMB)!.\n",
  642. pages_to_mb(highmem_pages),
  643. pages_to_mb(max_pfn));
  644. highmem_pages = 0;
  645. }
  646. if (highmem_pages) {
  647. if (max_low_pfn - highmem_pages <
  648. 64*1024*1024/PAGE_SIZE){
  649. printk(KERN_ERR "highmem size %uMB results in "
  650. "smaller than 64MB lowmem, ignoring it.\n"
  651. , pages_to_mb(highmem_pages));
  652. highmem_pages = 0;
  653. }
  654. max_low_pfn -= highmem_pages;
  655. }
  656. #else
  657. if (highmem_pages)
  658. printk(KERN_ERR "ignoring highmem size on non-highmem"
  659. " kernel!\n");
  660. #endif
  661. }
  662. }
  663. #ifndef CONFIG_NEED_MULTIPLE_NODES
  664. void __init initmem_init(unsigned long start_pfn,
  665. unsigned long end_pfn)
  666. {
  667. #ifdef CONFIG_HIGHMEM
  668. highstart_pfn = highend_pfn = max_pfn;
  669. if (max_pfn > max_low_pfn)
  670. highstart_pfn = max_low_pfn;
  671. memory_present(0, 0, highend_pfn);
  672. e820_register_active_regions(0, 0, highend_pfn);
  673. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  674. pages_to_mb(highend_pfn - highstart_pfn));
  675. num_physpages = highend_pfn;
  676. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  677. #else
  678. memory_present(0, 0, max_low_pfn);
  679. e820_register_active_regions(0, 0, max_low_pfn);
  680. num_physpages = max_low_pfn;
  681. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  682. #endif
  683. #ifdef CONFIG_FLATMEM
  684. max_mapnr = num_physpages;
  685. #endif
  686. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  687. pages_to_mb(max_low_pfn));
  688. setup_bootmem_allocator();
  689. }
  690. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  691. static void __init zone_sizes_init(void)
  692. {
  693. unsigned long max_zone_pfns[MAX_NR_ZONES];
  694. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  695. max_zone_pfns[ZONE_DMA] =
  696. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  697. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  698. #ifdef CONFIG_HIGHMEM
  699. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  700. #endif
  701. free_area_init_nodes(max_zone_pfns);
  702. }
  703. void __init setup_bootmem_allocator(void)
  704. {
  705. int i;
  706. unsigned long bootmap_size, bootmap;
  707. /*
  708. * Initialize the boot-time allocator (with low memory only):
  709. */
  710. bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
  711. bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
  712. max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
  713. PAGE_SIZE);
  714. if (bootmap == -1L)
  715. panic("Cannot find bootmem map of size %ld\n", bootmap_size);
  716. reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
  717. /* don't touch min_low_pfn */
  718. bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
  719. min_low_pfn, max_low_pfn);
  720. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  721. max_pfn_mapped<<PAGE_SHIFT);
  722. printk(KERN_INFO " low ram: %08lx - %08lx\n",
  723. min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
  724. printk(KERN_INFO " bootmap %08lx - %08lx\n",
  725. bootmap, bootmap + bootmap_size);
  726. for_each_online_node(i)
  727. free_bootmem_with_active_regions(i, max_low_pfn);
  728. early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
  729. after_init_bootmem = 1;
  730. }
  731. static void __init find_early_table_space(unsigned long end, int use_pse)
  732. {
  733. unsigned long puds, pmds, ptes, tables, start;
  734. puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
  735. tables = PAGE_ALIGN(puds * sizeof(pud_t));
  736. pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
  737. tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
  738. if (use_pse) {
  739. unsigned long extra;
  740. extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
  741. extra += PMD_SIZE;
  742. ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
  743. } else
  744. ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
  745. tables += PAGE_ALIGN(ptes * sizeof(pte_t));
  746. /* for fixmap */
  747. tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
  748. /*
  749. * RED-PEN putting page tables only on node 0 could
  750. * cause a hotspot and fill up ZONE_DMA. The page tables
  751. * need roughly 0.5KB per GB.
  752. */
  753. start = 0x7000;
  754. table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
  755. tables, PAGE_SIZE);
  756. if (table_start == -1UL)
  757. panic("Cannot find space for the kernel page tables");
  758. table_start >>= PAGE_SHIFT;
  759. table_end = table_start;
  760. table_top = table_start + (tables>>PAGE_SHIFT);
  761. printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
  762. end, table_start << PAGE_SHIFT,
  763. (table_start << PAGE_SHIFT) + tables);
  764. }
  765. unsigned long __init_refok init_memory_mapping(unsigned long start,
  766. unsigned long end)
  767. {
  768. pgd_t *pgd_base = swapper_pg_dir;
  769. unsigned long start_pfn, end_pfn;
  770. unsigned long big_page_start;
  771. #ifdef CONFIG_DEBUG_PAGEALLOC
  772. /*
  773. * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
  774. * This will simplify cpa(), which otherwise needs to support splitting
  775. * large pages into small in interrupt context, etc.
  776. */
  777. int use_pse = 0;
  778. #else
  779. int use_pse = cpu_has_pse;
  780. #endif
  781. /*
  782. * Find space for the kernel direct mapping tables.
  783. */
  784. if (!after_init_bootmem)
  785. find_early_table_space(end, use_pse);
  786. #ifdef CONFIG_X86_PAE
  787. set_nx();
  788. if (nx_enabled)
  789. printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  790. #endif
  791. /* Enable PSE if available */
  792. if (cpu_has_pse)
  793. set_in_cr4(X86_CR4_PSE);
  794. /* Enable PGE if available */
  795. if (cpu_has_pge) {
  796. set_in_cr4(X86_CR4_PGE);
  797. __supported_pte_mask |= _PAGE_GLOBAL;
  798. }
  799. /*
  800. * Don't use a large page for the first 2/4MB of memory
  801. * because there are often fixed size MTRRs in there
  802. * and overlapping MTRRs into large pages can cause
  803. * slowdowns.
  804. */
  805. big_page_start = PMD_SIZE;
  806. if (start < big_page_start) {
  807. start_pfn = start >> PAGE_SHIFT;
  808. end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
  809. } else {
  810. /* head is not big page alignment ? */
  811. start_pfn = start >> PAGE_SHIFT;
  812. end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  813. << (PMD_SHIFT - PAGE_SHIFT);
  814. }
  815. if (start_pfn < end_pfn)
  816. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
  817. /* big page range */
  818. start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
  819. << (PMD_SHIFT - PAGE_SHIFT);
  820. if (start_pfn < (big_page_start >> PAGE_SHIFT))
  821. start_pfn = big_page_start >> PAGE_SHIFT;
  822. end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
  823. if (start_pfn < end_pfn)
  824. kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
  825. use_pse);
  826. /* tail is not big page alignment ? */
  827. start_pfn = end_pfn;
  828. if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
  829. end_pfn = end >> PAGE_SHIFT;
  830. if (start_pfn < end_pfn)
  831. kernel_physical_mapping_init(pgd_base, start_pfn,
  832. end_pfn, 0);
  833. }
  834. early_ioremap_page_table_range_init(pgd_base);
  835. load_cr3(swapper_pg_dir);
  836. __flush_tlb_all();
  837. if (!after_init_bootmem)
  838. reserve_early(table_start << PAGE_SHIFT,
  839. table_end << PAGE_SHIFT, "PGTABLE");
  840. if (!after_init_bootmem)
  841. early_memtest(start, end);
  842. return end >> PAGE_SHIFT;
  843. }
  844. /*
  845. * paging_init() sets up the page tables - note that the first 8MB are
  846. * already mapped by head.S.
  847. *
  848. * This routines also unmaps the page at virtual kernel address 0, so
  849. * that we can trap those pesky NULL-reference errors in the kernel.
  850. */
  851. void __init paging_init(void)
  852. {
  853. pagetable_init();
  854. __flush_tlb_all();
  855. kmap_init();
  856. /*
  857. * NOTE: at this point the bootmem allocator is fully available.
  858. */
  859. sparse_init();
  860. zone_sizes_init();
  861. }
  862. /*
  863. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  864. * and also on some strange 486's. All 586+'s are OK. This used to involve
  865. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  866. * switch to using exceptions got rid of all that.
  867. */
  868. static void __init test_wp_bit(void)
  869. {
  870. printk(KERN_INFO
  871. "Checking if this processor honours the WP bit even in supervisor mode...");
  872. /* Any page-aligned address will do, the test is non-destructive */
  873. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  874. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  875. clear_fixmap(FIX_WP_TEST);
  876. if (!boot_cpu_data.wp_works_ok) {
  877. printk(KERN_CONT "No.\n");
  878. #ifdef CONFIG_X86_WP_WORKS_OK
  879. panic(
  880. "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
  881. #endif
  882. } else {
  883. printk(KERN_CONT "Ok.\n");
  884. }
  885. }
  886. static struct kcore_list kcore_mem, kcore_vmalloc;
  887. void __init mem_init(void)
  888. {
  889. int codesize, reservedpages, datasize, initsize;
  890. int tmp;
  891. pci_iommu_alloc();
  892. #ifdef CONFIG_FLATMEM
  893. BUG_ON(!mem_map);
  894. #endif
  895. /* this will put all low memory onto the freelists */
  896. totalram_pages += free_all_bootmem();
  897. reservedpages = 0;
  898. for (tmp = 0; tmp < max_low_pfn; tmp++)
  899. /*
  900. * Only count reserved RAM pages:
  901. */
  902. if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  903. reservedpages++;
  904. set_highmem_pages_init();
  905. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  906. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  907. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  908. kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
  909. kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
  910. VMALLOC_END-VMALLOC_START);
  911. printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  912. "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
  913. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  914. num_physpages << (PAGE_SHIFT-10),
  915. codesize >> 10,
  916. reservedpages << (PAGE_SHIFT-10),
  917. datasize >> 10,
  918. initsize >> 10,
  919. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
  920. );
  921. printk(KERN_INFO "virtual kernel memory layout:\n"
  922. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  923. #ifdef CONFIG_HIGHMEM
  924. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  925. #endif
  926. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  927. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  928. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  929. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  930. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  931. FIXADDR_START, FIXADDR_TOP,
  932. (FIXADDR_TOP - FIXADDR_START) >> 10,
  933. #ifdef CONFIG_HIGHMEM
  934. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  935. (LAST_PKMAP*PAGE_SIZE) >> 10,
  936. #endif
  937. VMALLOC_START, VMALLOC_END,
  938. (VMALLOC_END - VMALLOC_START) >> 20,
  939. (unsigned long)__va(0), (unsigned long)high_memory,
  940. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  941. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  942. ((unsigned long)&__init_end -
  943. (unsigned long)&__init_begin) >> 10,
  944. (unsigned long)&_etext, (unsigned long)&_edata,
  945. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  946. (unsigned long)&_text, (unsigned long)&_etext,
  947. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  948. /*
  949. * Check boundaries twice: Some fundamental inconsistencies can
  950. * be detected at build time already.
  951. */
  952. #define __FIXADDR_TOP (-PAGE_SIZE)
  953. #ifdef CONFIG_HIGHMEM
  954. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  955. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  956. #endif
  957. #define high_memory (-128UL << 20)
  958. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  959. #undef high_memory
  960. #undef __FIXADDR_TOP
  961. #ifdef CONFIG_HIGHMEM
  962. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  963. BUG_ON(VMALLOC_END > PKMAP_BASE);
  964. #endif
  965. BUG_ON(VMALLOC_START >= VMALLOC_END);
  966. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  967. if (boot_cpu_data.wp_works_ok < 0)
  968. test_wp_bit();
  969. save_pg_dir();
  970. zap_low_mappings();
  971. }
  972. #ifdef CONFIG_MEMORY_HOTPLUG
  973. int arch_add_memory(int nid, u64 start, u64 size)
  974. {
  975. struct pglist_data *pgdata = NODE_DATA(nid);
  976. struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
  977. unsigned long start_pfn = start >> PAGE_SHIFT;
  978. unsigned long nr_pages = size >> PAGE_SHIFT;
  979. return __add_pages(nid, zone, start_pfn, nr_pages);
  980. }
  981. #endif
  982. /*
  983. * This function cannot be __init, since exceptions don't work in that
  984. * section. Put this after the callers, so that it cannot be inlined.
  985. */
  986. static noinline int do_test_wp_bit(void)
  987. {
  988. char tmp_reg;
  989. int flag;
  990. __asm__ __volatile__(
  991. " movb %0, %1 \n"
  992. "1: movb %1, %0 \n"
  993. " xorl %2, %2 \n"
  994. "2: \n"
  995. _ASM_EXTABLE(1b,2b)
  996. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  997. "=q" (tmp_reg),
  998. "=r" (flag)
  999. :"2" (1)
  1000. :"memory");
  1001. return flag;
  1002. }
  1003. #ifdef CONFIG_DEBUG_RODATA
  1004. const int rodata_test_data = 0xC3;
  1005. EXPORT_SYMBOL_GPL(rodata_test_data);
  1006. void mark_rodata_ro(void)
  1007. {
  1008. unsigned long start = PFN_ALIGN(_text);
  1009. unsigned long size = PFN_ALIGN(_etext) - start;
  1010. #ifndef CONFIG_DYNAMIC_FTRACE
  1011. /* Dynamic tracing modifies the kernel text section */
  1012. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1013. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  1014. size >> 10);
  1015. #ifdef CONFIG_CPA_DEBUG
  1016. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  1017. start, start+size);
  1018. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  1019. printk(KERN_INFO "Testing CPA: write protecting again\n");
  1020. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  1021. #endif
  1022. #endif /* CONFIG_DYNAMIC_FTRACE */
  1023. start += size;
  1024. size = (unsigned long)__end_rodata - start;
  1025. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1026. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  1027. size >> 10);
  1028. rodata_test();
  1029. #ifdef CONFIG_CPA_DEBUG
  1030. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  1031. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  1032. printk(KERN_INFO "Testing CPA: write protecting again\n");
  1033. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  1034. #endif
  1035. }
  1036. #endif
  1037. void free_init_pages(char *what, unsigned long begin, unsigned long end)
  1038. {
  1039. #ifdef CONFIG_DEBUG_PAGEALLOC
  1040. /*
  1041. * If debugging page accesses then do not free this memory but
  1042. * mark them not present - any buggy init-section access will
  1043. * create a kernel page fault:
  1044. */
  1045. printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
  1046. begin, PAGE_ALIGN(end));
  1047. set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
  1048. #else
  1049. unsigned long addr;
  1050. /*
  1051. * We just marked the kernel text read only above, now that
  1052. * we are going to free part of that, we need to make that
  1053. * writeable first.
  1054. */
  1055. set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
  1056. for (addr = begin; addr < end; addr += PAGE_SIZE) {
  1057. ClearPageReserved(virt_to_page(addr));
  1058. init_page_count(virt_to_page(addr));
  1059. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  1060. free_page(addr);
  1061. totalram_pages++;
  1062. }
  1063. printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
  1064. #endif
  1065. }
  1066. void free_initmem(void)
  1067. {
  1068. free_init_pages("unused kernel memory",
  1069. (unsigned long)(&__init_begin),
  1070. (unsigned long)(&__init_end));
  1071. }
  1072. #ifdef CONFIG_BLK_DEV_INITRD
  1073. void free_initrd_mem(unsigned long start, unsigned long end)
  1074. {
  1075. free_init_pages("initrd memory", start, end);
  1076. }
  1077. #endif
  1078. int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
  1079. int flags)
  1080. {
  1081. return reserve_bootmem(phys, len, flags);
  1082. }