init.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /*
  2. * linux/arch/parisc/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Copyright 1999 SuSE GmbH
  6. * changed by Philipp Rumpf
  7. * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  8. * Copyright 2004 Randolph Chung (tausq@debian.org)
  9. * Copyright 2006-2007 Helge Deller (deller@gmx.de)
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/bootmem.h>
  15. #include <linux/gfp.h>
  16. #include <linux/delay.h>
  17. #include <linux/init.h>
  18. #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
  19. #include <linux/initrd.h>
  20. #include <linux/swap.h>
  21. #include <linux/unistd.h>
  22. #include <linux/nodemask.h> /* for node_online_map */
  23. #include <linux/pagemap.h> /* for release_pages and page_cache_release */
  24. #include <asm/pgalloc.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/tlb.h>
  27. #include <asm/pdc_chassis.h>
  28. #include <asm/mmzone.h>
  29. #include <asm/sections.h>
  30. extern int data_start;
  31. extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
  32. #if PT_NLEVELS == 3
  33. /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
  34. * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
  35. * guarantee that global objects will be laid out in memory in the same order
  36. * as the order of declaration, so put these in different sections and use
  37. * the linker script to order them. */
  38. pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
  39. #endif
  40. pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
  41. pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
  42. #ifdef CONFIG_DISCONTIGMEM
  43. struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
  44. signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
  45. #endif
  46. static struct resource data_resource = {
  47. .name = "Kernel data",
  48. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  49. };
  50. static struct resource code_resource = {
  51. .name = "Kernel code",
  52. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  53. };
  54. static struct resource pdcdata_resource = {
  55. .name = "PDC data (Page Zero)",
  56. .start = 0,
  57. .end = 0x9ff,
  58. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  59. };
  60. static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
  61. /* The following array is initialized from the firmware specific
  62. * information retrieved in kernel/inventory.c.
  63. */
  64. physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
  65. int npmem_ranges __read_mostly;
  66. #ifdef CONFIG_64BIT
  67. #define MAX_MEM (~0UL)
  68. #else /* !CONFIG_64BIT */
  69. #define MAX_MEM (3584U*1024U*1024U)
  70. #endif /* !CONFIG_64BIT */
  71. static unsigned long mem_limit __read_mostly = MAX_MEM;
  72. static void __init mem_limit_func(void)
  73. {
  74. char *cp, *end;
  75. unsigned long limit;
  76. /* We need this before __setup() functions are called */
  77. limit = MAX_MEM;
  78. for (cp = boot_command_line; *cp; ) {
  79. if (memcmp(cp, "mem=", 4) == 0) {
  80. cp += 4;
  81. limit = memparse(cp, &end);
  82. if (end != cp)
  83. break;
  84. cp = end;
  85. } else {
  86. while (*cp != ' ' && *cp)
  87. ++cp;
  88. while (*cp == ' ')
  89. ++cp;
  90. }
  91. }
  92. if (limit < mem_limit)
  93. mem_limit = limit;
  94. }
  95. #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
  96. static void __init setup_bootmem(void)
  97. {
  98. unsigned long bootmap_size;
  99. unsigned long mem_max;
  100. unsigned long bootmap_pages;
  101. unsigned long bootmap_start_pfn;
  102. unsigned long bootmap_pfn;
  103. #ifndef CONFIG_DISCONTIGMEM
  104. physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
  105. int npmem_holes;
  106. #endif
  107. int i, sysram_resource_count;
  108. disable_sr_hashing(); /* Turn off space register hashing */
  109. /*
  110. * Sort the ranges. Since the number of ranges is typically
  111. * small, and performance is not an issue here, just do
  112. * a simple insertion sort.
  113. */
  114. for (i = 1; i < npmem_ranges; i++) {
  115. int j;
  116. for (j = i; j > 0; j--) {
  117. unsigned long tmp;
  118. if (pmem_ranges[j-1].start_pfn <
  119. pmem_ranges[j].start_pfn) {
  120. break;
  121. }
  122. tmp = pmem_ranges[j-1].start_pfn;
  123. pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
  124. pmem_ranges[j].start_pfn = tmp;
  125. tmp = pmem_ranges[j-1].pages;
  126. pmem_ranges[j-1].pages = pmem_ranges[j].pages;
  127. pmem_ranges[j].pages = tmp;
  128. }
  129. }
  130. #ifndef CONFIG_DISCONTIGMEM
  131. /*
  132. * Throw out ranges that are too far apart (controlled by
  133. * MAX_GAP).
  134. */
  135. for (i = 1; i < npmem_ranges; i++) {
  136. if (pmem_ranges[i].start_pfn -
  137. (pmem_ranges[i-1].start_pfn +
  138. pmem_ranges[i-1].pages) > MAX_GAP) {
  139. npmem_ranges = i;
  140. printk("Large gap in memory detected (%ld pages). "
  141. "Consider turning on CONFIG_DISCONTIGMEM\n",
  142. pmem_ranges[i].start_pfn -
  143. (pmem_ranges[i-1].start_pfn +
  144. pmem_ranges[i-1].pages));
  145. break;
  146. }
  147. }
  148. #endif
  149. if (npmem_ranges > 1) {
  150. /* Print the memory ranges */
  151. printk(KERN_INFO "Memory Ranges:\n");
  152. for (i = 0; i < npmem_ranges; i++) {
  153. unsigned long start;
  154. unsigned long size;
  155. size = (pmem_ranges[i].pages << PAGE_SHIFT);
  156. start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
  157. printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
  158. i,start, start + (size - 1), size >> 20);
  159. }
  160. }
  161. sysram_resource_count = npmem_ranges;
  162. for (i = 0; i < sysram_resource_count; i++) {
  163. struct resource *res = &sysram_resources[i];
  164. res->name = "System RAM";
  165. res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
  166. res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
  167. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  168. request_resource(&iomem_resource, res);
  169. }
  170. /*
  171. * For 32 bit kernels we limit the amount of memory we can
  172. * support, in order to preserve enough kernel address space
  173. * for other purposes. For 64 bit kernels we don't normally
  174. * limit the memory, but this mechanism can be used to
  175. * artificially limit the amount of memory (and it is written
  176. * to work with multiple memory ranges).
  177. */
  178. mem_limit_func(); /* check for "mem=" argument */
  179. mem_max = 0;
  180. for (i = 0; i < npmem_ranges; i++) {
  181. unsigned long rsize;
  182. rsize = pmem_ranges[i].pages << PAGE_SHIFT;
  183. if ((mem_max + rsize) > mem_limit) {
  184. printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
  185. if (mem_max == mem_limit)
  186. npmem_ranges = i;
  187. else {
  188. pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
  189. - (mem_max >> PAGE_SHIFT);
  190. npmem_ranges = i + 1;
  191. mem_max = mem_limit;
  192. }
  193. break;
  194. }
  195. mem_max += rsize;
  196. }
  197. printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
  198. #ifndef CONFIG_DISCONTIGMEM
  199. /* Merge the ranges, keeping track of the holes */
  200. {
  201. unsigned long end_pfn;
  202. unsigned long hole_pages;
  203. npmem_holes = 0;
  204. end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
  205. for (i = 1; i < npmem_ranges; i++) {
  206. hole_pages = pmem_ranges[i].start_pfn - end_pfn;
  207. if (hole_pages) {
  208. pmem_holes[npmem_holes].start_pfn = end_pfn;
  209. pmem_holes[npmem_holes++].pages = hole_pages;
  210. end_pfn += hole_pages;
  211. }
  212. end_pfn += pmem_ranges[i].pages;
  213. }
  214. pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
  215. npmem_ranges = 1;
  216. }
  217. #endif
  218. bootmap_pages = 0;
  219. for (i = 0; i < npmem_ranges; i++)
  220. bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
  221. bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
  222. #ifdef CONFIG_DISCONTIGMEM
  223. for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
  224. memset(NODE_DATA(i), 0, sizeof(pg_data_t));
  225. NODE_DATA(i)->bdata = &bootmem_node_data[i];
  226. }
  227. memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
  228. for (i = 0; i < npmem_ranges; i++) {
  229. node_set_state(i, N_NORMAL_MEMORY);
  230. node_set_online(i);
  231. }
  232. #endif
  233. /*
  234. * Initialize and free the full range of memory in each range.
  235. * Note that the only writing these routines do are to the bootmap,
  236. * and we've made sure to locate the bootmap properly so that they
  237. * won't be writing over anything important.
  238. */
  239. bootmap_pfn = bootmap_start_pfn;
  240. max_pfn = 0;
  241. for (i = 0; i < npmem_ranges; i++) {
  242. unsigned long start_pfn;
  243. unsigned long npages;
  244. start_pfn = pmem_ranges[i].start_pfn;
  245. npages = pmem_ranges[i].pages;
  246. bootmap_size = init_bootmem_node(NODE_DATA(i),
  247. bootmap_pfn,
  248. start_pfn,
  249. (start_pfn + npages) );
  250. free_bootmem_node(NODE_DATA(i),
  251. (start_pfn << PAGE_SHIFT),
  252. (npages << PAGE_SHIFT) );
  253. bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  254. if ((start_pfn + npages) > max_pfn)
  255. max_pfn = start_pfn + npages;
  256. }
  257. /* IOMMU is always used to access "high mem" on those boxes
  258. * that can support enough mem that a PCI device couldn't
  259. * directly DMA to any physical addresses.
  260. * ISA DMA support will need to revisit this.
  261. */
  262. max_low_pfn = max_pfn;
  263. /* bootmap sizing messed up? */
  264. BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
  265. /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
  266. #define PDC_CONSOLE_IO_IODC_SIZE 32768
  267. reserve_bootmem_node(NODE_DATA(0), 0UL,
  268. (unsigned long)(PAGE0->mem_free +
  269. PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
  270. reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
  271. (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
  272. BOOTMEM_DEFAULT);
  273. reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
  274. ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
  275. BOOTMEM_DEFAULT);
  276. #ifndef CONFIG_DISCONTIGMEM
  277. /* reserve the holes */
  278. for (i = 0; i < npmem_holes; i++) {
  279. reserve_bootmem_node(NODE_DATA(0),
  280. (pmem_holes[i].start_pfn << PAGE_SHIFT),
  281. (pmem_holes[i].pages << PAGE_SHIFT),
  282. BOOTMEM_DEFAULT);
  283. }
  284. #endif
  285. #ifdef CONFIG_BLK_DEV_INITRD
  286. if (initrd_start) {
  287. printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
  288. if (__pa(initrd_start) < mem_max) {
  289. unsigned long initrd_reserve;
  290. if (__pa(initrd_end) > mem_max) {
  291. initrd_reserve = mem_max - __pa(initrd_start);
  292. } else {
  293. initrd_reserve = initrd_end - initrd_start;
  294. }
  295. initrd_below_start_ok = 1;
  296. printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
  297. reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
  298. initrd_reserve, BOOTMEM_DEFAULT);
  299. }
  300. }
  301. #endif
  302. data_resource.start = virt_to_phys(&data_start);
  303. data_resource.end = virt_to_phys(_end) - 1;
  304. code_resource.start = virt_to_phys(_text);
  305. code_resource.end = virt_to_phys(&data_start)-1;
  306. /* We don't know which region the kernel will be in, so try
  307. * all of them.
  308. */
  309. for (i = 0; i < sysram_resource_count; i++) {
  310. struct resource *res = &sysram_resources[i];
  311. request_resource(res, &code_resource);
  312. request_resource(res, &data_resource);
  313. }
  314. request_resource(&sysram_resources[0], &pdcdata_resource);
  315. }
  316. static int __init parisc_text_address(unsigned long vaddr)
  317. {
  318. static unsigned long head_ptr __initdata;
  319. if (!head_ptr)
  320. head_ptr = PAGE_MASK & (unsigned long)
  321. dereference_function_descriptor(&parisc_kernel_start);
  322. return core_kernel_text(vaddr) || vaddr == head_ptr;
  323. }
  324. static void __init map_pages(unsigned long start_vaddr,
  325. unsigned long start_paddr, unsigned long size,
  326. pgprot_t pgprot, int force)
  327. {
  328. pgd_t *pg_dir;
  329. pmd_t *pmd;
  330. pte_t *pg_table;
  331. unsigned long end_paddr;
  332. unsigned long start_pmd;
  333. unsigned long start_pte;
  334. unsigned long tmp1;
  335. unsigned long tmp2;
  336. unsigned long address;
  337. unsigned long vaddr;
  338. unsigned long ro_start;
  339. unsigned long ro_end;
  340. unsigned long fv_addr;
  341. unsigned long gw_addr;
  342. extern const unsigned long fault_vector_20;
  343. extern void * const linux_gateway_page;
  344. ro_start = __pa((unsigned long)_text);
  345. ro_end = __pa((unsigned long)&data_start);
  346. fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
  347. gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
  348. end_paddr = start_paddr + size;
  349. pg_dir = pgd_offset_k(start_vaddr);
  350. #if PTRS_PER_PMD == 1
  351. start_pmd = 0;
  352. #else
  353. start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
  354. #endif
  355. start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
  356. address = start_paddr;
  357. vaddr = start_vaddr;
  358. while (address < end_paddr) {
  359. #if PTRS_PER_PMD == 1
  360. pmd = (pmd_t *)__pa(pg_dir);
  361. #else
  362. pmd = (pmd_t *)pgd_address(*pg_dir);
  363. /*
  364. * pmd is physical at this point
  365. */
  366. if (!pmd) {
  367. pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
  368. pmd = (pmd_t *) __pa(pmd);
  369. }
  370. pgd_populate(NULL, pg_dir, __va(pmd));
  371. #endif
  372. pg_dir++;
  373. /* now change pmd to kernel virtual addresses */
  374. pmd = (pmd_t *)__va(pmd) + start_pmd;
  375. for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
  376. /*
  377. * pg_table is physical at this point
  378. */
  379. pg_table = (pte_t *)pmd_address(*pmd);
  380. if (!pg_table) {
  381. pg_table = (pte_t *)
  382. alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
  383. pg_table = (pte_t *) __pa(pg_table);
  384. }
  385. pmd_populate_kernel(NULL, pmd, __va(pg_table));
  386. /* now change pg_table to kernel virtual addresses */
  387. pg_table = (pte_t *) __va(pg_table) + start_pte;
  388. for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
  389. pte_t pte;
  390. /*
  391. * Map the fault vector writable so we can
  392. * write the HPMC checksum.
  393. */
  394. if (force)
  395. pte = __mk_pte(address, pgprot);
  396. else if (parisc_text_address(vaddr) &&
  397. address != fv_addr)
  398. pte = __mk_pte(address, PAGE_KERNEL_EXEC);
  399. else
  400. #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
  401. if (address >= ro_start && address < ro_end
  402. && address != fv_addr
  403. && address != gw_addr)
  404. pte = __mk_pte(address, PAGE_KERNEL_RO);
  405. else
  406. #endif
  407. pte = __mk_pte(address, pgprot);
  408. if (address >= end_paddr) {
  409. if (force)
  410. break;
  411. else
  412. pte_val(pte) = 0;
  413. }
  414. set_pte(pg_table, pte);
  415. address += PAGE_SIZE;
  416. vaddr += PAGE_SIZE;
  417. }
  418. start_pte = 0;
  419. if (address >= end_paddr)
  420. break;
  421. }
  422. start_pmd = 0;
  423. }
  424. }
  425. void free_initmem(void)
  426. {
  427. unsigned long init_begin = (unsigned long)__init_begin;
  428. unsigned long init_end = (unsigned long)__init_end;
  429. /* The init text pages are marked R-X. We have to
  430. * flush the icache and mark them RW-
  431. *
  432. * This is tricky, because map_pages is in the init section.
  433. * Do a dummy remap of the data section first (the data
  434. * section is already PAGE_KERNEL) to pull in the TLB entries
  435. * for map_kernel */
  436. map_pages(init_begin, __pa(init_begin), init_end - init_begin,
  437. PAGE_KERNEL_RWX, 1);
  438. /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
  439. * map_pages */
  440. map_pages(init_begin, __pa(init_begin), init_end - init_begin,
  441. PAGE_KERNEL, 1);
  442. /* force the kernel to see the new TLB entries */
  443. __flush_tlb_range(0, init_begin, init_end);
  444. /* Attempt to catch anyone trying to execute code here
  445. * by filling the page with BRK insns.
  446. */
  447. memset((void *)init_begin, 0x00, init_end - init_begin);
  448. /* finally dump all the instructions which were cached, since the
  449. * pages are no-longer executable */
  450. flush_icache_range(init_begin, init_end);
  451. free_initmem_default(-1);
  452. /* set up a new led state on systems shipped LED State panel */
  453. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
  454. }
  455. #ifdef CONFIG_DEBUG_RODATA
  456. void mark_rodata_ro(void)
  457. {
  458. /* rodata memory was already mapped with KERNEL_RO access rights by
  459. pagetable_init() and map_pages(). No need to do additional stuff here */
  460. printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  461. (unsigned long)(__end_rodata - __start_rodata) >> 10);
  462. }
  463. #endif
  464. /*
  465. * Just an arbitrary offset to serve as a "hole" between mapping areas
  466. * (between top of physical memory and a potential pcxl dma mapping
  467. * area, and below the vmalloc mapping area).
  468. *
  469. * The current 32K value just means that there will be a 32K "hole"
  470. * between mapping areas. That means that any out-of-bounds memory
  471. * accesses will hopefully be caught. The vmalloc() routines leaves
  472. * a hole of 4kB between each vmalloced area for the same reason.
  473. */
  474. /* Leave room for gateway page expansion */
  475. #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
  476. #error KERNEL_MAP_START is in gateway reserved region
  477. #endif
  478. #define MAP_START (KERNEL_MAP_START)
  479. #define VM_MAP_OFFSET (32*1024)
  480. #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
  481. & ~(VM_MAP_OFFSET-1)))
  482. void *parisc_vmalloc_start __read_mostly;
  483. EXPORT_SYMBOL(parisc_vmalloc_start);
  484. #ifdef CONFIG_PA11
  485. unsigned long pcxl_dma_start __read_mostly;
  486. #endif
  487. void __init mem_init(void)
  488. {
  489. /* Do sanity checks on page table constants */
  490. BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
  491. BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
  492. BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
  493. BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
  494. > BITS_PER_LONG);
  495. high_memory = __va((max_pfn << PAGE_SHIFT));
  496. set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
  497. free_all_bootmem();
  498. #ifdef CONFIG_PA11
  499. if (hppa_dma_ops == &pcxl_dma_ops) {
  500. pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
  501. parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
  502. + PCXL_DMA_MAP_SIZE);
  503. } else {
  504. pcxl_dma_start = 0;
  505. parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
  506. }
  507. #else
  508. parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
  509. #endif
  510. mem_init_print_info(NULL);
  511. #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
  512. printk("virtual kernel memory layout:\n"
  513. " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
  514. " memory : 0x%p - 0x%p (%4ld MB)\n"
  515. " .init : 0x%p - 0x%p (%4ld kB)\n"
  516. " .data : 0x%p - 0x%p (%4ld kB)\n"
  517. " .text : 0x%p - 0x%p (%4ld kB)\n",
  518. (void*)VMALLOC_START, (void*)VMALLOC_END,
  519. (VMALLOC_END - VMALLOC_START) >> 20,
  520. __va(0), high_memory,
  521. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  522. __init_begin, __init_end,
  523. ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
  524. _etext, _edata,
  525. ((unsigned long)_edata - (unsigned long)_etext) >> 10,
  526. _text, _etext,
  527. ((unsigned long)_etext - (unsigned long)_text) >> 10);
  528. #endif
  529. }
  530. unsigned long *empty_zero_page __read_mostly;
  531. EXPORT_SYMBOL(empty_zero_page);
  532. void show_mem(unsigned int filter)
  533. {
  534. int i,free = 0,total = 0,reserved = 0;
  535. int shared = 0, cached = 0;
  536. printk(KERN_INFO "Mem-info:\n");
  537. show_free_areas(filter);
  538. if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
  539. return;
  540. #ifndef CONFIG_DISCONTIGMEM
  541. i = max_mapnr;
  542. while (i-- > 0) {
  543. total++;
  544. if (PageReserved(mem_map+i))
  545. reserved++;
  546. else if (PageSwapCache(mem_map+i))
  547. cached++;
  548. else if (!page_count(&mem_map[i]))
  549. free++;
  550. else
  551. shared += page_count(&mem_map[i]) - 1;
  552. }
  553. #else
  554. for (i = 0; i < npmem_ranges; i++) {
  555. int j;
  556. for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
  557. struct page *p;
  558. unsigned long flags;
  559. pgdat_resize_lock(NODE_DATA(i), &flags);
  560. p = nid_page_nr(i, j) - node_start_pfn(i);
  561. total++;
  562. if (PageReserved(p))
  563. reserved++;
  564. else if (PageSwapCache(p))
  565. cached++;
  566. else if (!page_count(p))
  567. free++;
  568. else
  569. shared += page_count(p) - 1;
  570. pgdat_resize_unlock(NODE_DATA(i), &flags);
  571. }
  572. }
  573. #endif
  574. printk(KERN_INFO "%d pages of RAM\n", total);
  575. printk(KERN_INFO "%d reserved pages\n", reserved);
  576. printk(KERN_INFO "%d pages shared\n", shared);
  577. printk(KERN_INFO "%d pages swap cached\n", cached);
  578. #ifdef CONFIG_DISCONTIGMEM
  579. {
  580. struct zonelist *zl;
  581. int i, j;
  582. for (i = 0; i < npmem_ranges; i++) {
  583. zl = node_zonelist(i, 0);
  584. for (j = 0; j < MAX_NR_ZONES; j++) {
  585. struct zoneref *z;
  586. struct zone *zone;
  587. printk("Zone list for zone %d on node %d: ", j, i);
  588. for_each_zone_zonelist(zone, z, zl, j)
  589. printk("[%d/%s] ", zone_to_nid(zone),
  590. zone->name);
  591. printk("\n");
  592. }
  593. }
  594. }
  595. #endif
  596. }
  597. /*
  598. * pagetable_init() sets up the page tables
  599. *
  600. * Note that gateway_init() places the Linux gateway page at page 0.
  601. * Since gateway pages cannot be dereferenced this has the desirable
  602. * side effect of trapping those pesky NULL-reference errors in the
  603. * kernel.
  604. */
  605. static void __init pagetable_init(void)
  606. {
  607. int range;
  608. /* Map each physical memory range to its kernel vaddr */
  609. for (range = 0; range < npmem_ranges; range++) {
  610. unsigned long start_paddr;
  611. unsigned long end_paddr;
  612. unsigned long size;
  613. start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
  614. end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
  615. size = pmem_ranges[range].pages << PAGE_SHIFT;
  616. map_pages((unsigned long)__va(start_paddr), start_paddr,
  617. size, PAGE_KERNEL, 0);
  618. }
  619. #ifdef CONFIG_BLK_DEV_INITRD
  620. if (initrd_end && initrd_end > mem_limit) {
  621. printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
  622. map_pages(initrd_start, __pa(initrd_start),
  623. initrd_end - initrd_start, PAGE_KERNEL, 0);
  624. }
  625. #endif
  626. empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
  627. memset(empty_zero_page, 0, PAGE_SIZE);
  628. }
  629. static void __init gateway_init(void)
  630. {
  631. unsigned long linux_gateway_page_addr;
  632. /* FIXME: This is 'const' in order to trick the compiler
  633. into not treating it as DP-relative data. */
  634. extern void * const linux_gateway_page;
  635. linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
  636. /*
  637. * Setup Linux Gateway page.
  638. *
  639. * The Linux gateway page will reside in kernel space (on virtual
  640. * page 0), so it doesn't need to be aliased into user space.
  641. */
  642. map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
  643. PAGE_SIZE, PAGE_GATEWAY, 1);
  644. }
  645. #ifdef CONFIG_HPUX
  646. void
  647. map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
  648. {
  649. pgd_t *pg_dir;
  650. pmd_t *pmd;
  651. pte_t *pg_table;
  652. unsigned long start_pmd;
  653. unsigned long start_pte;
  654. unsigned long address;
  655. unsigned long hpux_gw_page_addr;
  656. /* FIXME: This is 'const' in order to trick the compiler
  657. into not treating it as DP-relative data. */
  658. extern void * const hpux_gateway_page;
  659. hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
  660. /*
  661. * Setup HP-UX Gateway page.
  662. *
  663. * The HP-UX gateway page resides in the user address space,
  664. * so it needs to be aliased into each process.
  665. */
  666. pg_dir = pgd_offset(mm,hpux_gw_page_addr);
  667. #if PTRS_PER_PMD == 1
  668. start_pmd = 0;
  669. #else
  670. start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
  671. #endif
  672. start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
  673. address = __pa(&hpux_gateway_page);
  674. #if PTRS_PER_PMD == 1
  675. pmd = (pmd_t *)__pa(pg_dir);
  676. #else
  677. pmd = (pmd_t *) pgd_address(*pg_dir);
  678. /*
  679. * pmd is physical at this point
  680. */
  681. if (!pmd) {
  682. pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
  683. pmd = (pmd_t *) __pa(pmd);
  684. }
  685. __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
  686. #endif
  687. /* now change pmd to kernel virtual addresses */
  688. pmd = (pmd_t *)__va(pmd) + start_pmd;
  689. /*
  690. * pg_table is physical at this point
  691. */
  692. pg_table = (pte_t *) pmd_address(*pmd);
  693. if (!pg_table)
  694. pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
  695. __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
  696. /* now change pg_table to kernel virtual addresses */
  697. pg_table = (pte_t *) __va(pg_table) + start_pte;
  698. set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
  699. }
  700. EXPORT_SYMBOL(map_hpux_gateway_page);
  701. #endif
  702. void __init paging_init(void)
  703. {
  704. int i;
  705. setup_bootmem();
  706. pagetable_init();
  707. gateway_init();
  708. flush_cache_all_local(); /* start with known state */
  709. flush_tlb_all_local(NULL);
  710. for (i = 0; i < npmem_ranges; i++) {
  711. unsigned long zones_size[MAX_NR_ZONES] = { 0, };
  712. zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
  713. #ifdef CONFIG_DISCONTIGMEM
  714. /* Need to initialize the pfnnid_map before we can initialize
  715. the zone */
  716. {
  717. int j;
  718. for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
  719. j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
  720. j++) {
  721. pfnnid_map[j] = i;
  722. }
  723. }
  724. #endif
  725. free_area_init_node(i, zones_size,
  726. pmem_ranges[i].start_pfn, NULL);
  727. }
  728. }
  729. #ifdef CONFIG_PA20
  730. /*
  731. * Currently, all PA20 chips have 18 bit protection IDs, which is the
  732. * limiting factor (space ids are 32 bits).
  733. */
  734. #define NR_SPACE_IDS 262144
  735. #else
  736. /*
  737. * Currently we have a one-to-one relationship between space IDs and
  738. * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
  739. * support 15 bit protection IDs, so that is the limiting factor.
  740. * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
  741. * probably not worth the effort for a special case here.
  742. */
  743. #define NR_SPACE_IDS 32768
  744. #endif /* !CONFIG_PA20 */
  745. #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
  746. #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
  747. static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
  748. static unsigned long dirty_space_id[SID_ARRAY_SIZE];
  749. static unsigned long space_id_index;
  750. static unsigned long free_space_ids = NR_SPACE_IDS - 1;
  751. static unsigned long dirty_space_ids = 0;
  752. static DEFINE_SPINLOCK(sid_lock);
  753. unsigned long alloc_sid(void)
  754. {
  755. unsigned long index;
  756. spin_lock(&sid_lock);
  757. if (free_space_ids == 0) {
  758. if (dirty_space_ids != 0) {
  759. spin_unlock(&sid_lock);
  760. flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
  761. spin_lock(&sid_lock);
  762. }
  763. BUG_ON(free_space_ids == 0);
  764. }
  765. free_space_ids--;
  766. index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
  767. space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
  768. space_id_index = index;
  769. spin_unlock(&sid_lock);
  770. return index << SPACEID_SHIFT;
  771. }
  772. void free_sid(unsigned long spaceid)
  773. {
  774. unsigned long index = spaceid >> SPACEID_SHIFT;
  775. unsigned long *dirty_space_offset;
  776. dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
  777. index &= (BITS_PER_LONG - 1);
  778. spin_lock(&sid_lock);
  779. BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
  780. *dirty_space_offset |= (1L << index);
  781. dirty_space_ids++;
  782. spin_unlock(&sid_lock);
  783. }
  784. #ifdef CONFIG_SMP
  785. static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
  786. {
  787. int i;
  788. /* NOTE: sid_lock must be held upon entry */
  789. *ndirtyptr = dirty_space_ids;
  790. if (dirty_space_ids != 0) {
  791. for (i = 0; i < SID_ARRAY_SIZE; i++) {
  792. dirty_array[i] = dirty_space_id[i];
  793. dirty_space_id[i] = 0;
  794. }
  795. dirty_space_ids = 0;
  796. }
  797. return;
  798. }
  799. static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
  800. {
  801. int i;
  802. /* NOTE: sid_lock must be held upon entry */
  803. if (ndirty != 0) {
  804. for (i = 0; i < SID_ARRAY_SIZE; i++) {
  805. space_id[i] ^= dirty_array[i];
  806. }
  807. free_space_ids += ndirty;
  808. space_id_index = 0;
  809. }
  810. }
  811. #else /* CONFIG_SMP */
  812. static void recycle_sids(void)
  813. {
  814. int i;
  815. /* NOTE: sid_lock must be held upon entry */
  816. if (dirty_space_ids != 0) {
  817. for (i = 0; i < SID_ARRAY_SIZE; i++) {
  818. space_id[i] ^= dirty_space_id[i];
  819. dirty_space_id[i] = 0;
  820. }
  821. free_space_ids += dirty_space_ids;
  822. dirty_space_ids = 0;
  823. space_id_index = 0;
  824. }
  825. }
  826. #endif
  827. /*
  828. * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
  829. * purged, we can safely reuse the space ids that were released but
  830. * not flushed from the tlb.
  831. */
  832. #ifdef CONFIG_SMP
  833. static unsigned long recycle_ndirty;
  834. static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
  835. static unsigned int recycle_inuse;
  836. void flush_tlb_all(void)
  837. {
  838. int do_recycle;
  839. __inc_irq_stat(irq_tlb_count);
  840. do_recycle = 0;
  841. spin_lock(&sid_lock);
  842. if (dirty_space_ids > RECYCLE_THRESHOLD) {
  843. BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
  844. get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
  845. recycle_inuse++;
  846. do_recycle++;
  847. }
  848. spin_unlock(&sid_lock);
  849. on_each_cpu(flush_tlb_all_local, NULL, 1);
  850. if (do_recycle) {
  851. spin_lock(&sid_lock);
  852. recycle_sids(recycle_ndirty,recycle_dirty_array);
  853. recycle_inuse = 0;
  854. spin_unlock(&sid_lock);
  855. }
  856. }
  857. #else
  858. void flush_tlb_all(void)
  859. {
  860. __inc_irq_stat(irq_tlb_count);
  861. spin_lock(&sid_lock);
  862. flush_tlb_all_local(NULL);
  863. recycle_sids();
  864. spin_unlock(&sid_lock);
  865. }
  866. #endif
  867. #ifdef CONFIG_BLK_DEV_INITRD
  868. void free_initrd_mem(unsigned long start, unsigned long end)
  869. {
  870. free_reserved_area((void *)start, (void *)end, -1, "initrd");
  871. }
  872. #endif