init.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019
  1. /*
  2. * linux/arch/parisc/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Copyright 1999 SuSE GmbH
  6. * changed by Philipp Rumpf
  7. * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  8. * Copyright 2004 Randolph Chung (tausq@debian.org)
  9. *
  10. */
  11. #include <linux/config.h>
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/bootmem.h>
  15. #include <linux/delay.h>
  16. #include <linux/init.h>
  17. #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
  18. #include <linux/initrd.h>
  19. #include <linux/swap.h>
  20. #include <linux/unistd.h>
  21. #include <linux/nodemask.h> /* for node_online_map */
  22. #include <linux/pagemap.h> /* for release_pages and page_cache_release */
  23. #include <asm/pgalloc.h>
  24. #include <asm/tlb.h>
  25. #include <asm/pdc_chassis.h>
  26. #include <asm/mmzone.h>
  27. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  28. extern char _text; /* start of kernel code, defined by linker */
  29. extern int data_start;
  30. extern char _end; /* end of BSS, defined by linker */
  31. extern char __init_begin, __init_end;
  32. #ifdef CONFIG_DISCONTIGMEM
  33. struct node_map_data node_data[MAX_NUMNODES];
  34. bootmem_data_t bmem_data[MAX_NUMNODES];
  35. unsigned char pfnnid_map[PFNNID_MAP_MAX];
  36. #endif
  37. static struct resource data_resource = {
  38. .name = "Kernel data",
  39. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  40. };
  41. static struct resource code_resource = {
  42. .name = "Kernel code",
  43. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  44. };
  45. static struct resource pdcdata_resource = {
  46. .name = "PDC data (Page Zero)",
  47. .start = 0,
  48. .end = 0x9ff,
  49. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  50. };
  51. static struct resource sysram_resources[MAX_PHYSMEM_RANGES];
  52. /* The following array is initialized from the firmware specific
  53. * information retrieved in kernel/inventory.c.
  54. */
  55. physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES];
  56. int npmem_ranges;
  57. #ifdef __LP64__
  58. #define MAX_MEM (~0UL)
  59. #else /* !__LP64__ */
  60. #define MAX_MEM (3584U*1024U*1024U)
  61. #endif /* !__LP64__ */
  62. static unsigned long mem_limit = MAX_MEM;
  63. static void __init mem_limit_func(void)
  64. {
  65. char *cp, *end;
  66. unsigned long limit;
  67. extern char saved_command_line[];
  68. /* We need this before __setup() functions are called */
  69. limit = MAX_MEM;
  70. for (cp = saved_command_line; *cp; ) {
  71. if (memcmp(cp, "mem=", 4) == 0) {
  72. cp += 4;
  73. limit = memparse(cp, &end);
  74. if (end != cp)
  75. break;
  76. cp = end;
  77. } else {
  78. while (*cp != ' ' && *cp)
  79. ++cp;
  80. while (*cp == ' ')
  81. ++cp;
  82. }
  83. }
  84. if (limit < mem_limit)
  85. mem_limit = limit;
  86. }
  87. #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
  88. static void __init setup_bootmem(void)
  89. {
  90. unsigned long bootmap_size;
  91. unsigned long mem_max;
  92. unsigned long bootmap_pages;
  93. unsigned long bootmap_start_pfn;
  94. unsigned long bootmap_pfn;
  95. #ifndef CONFIG_DISCONTIGMEM
  96. physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
  97. int npmem_holes;
  98. #endif
  99. int i, sysram_resource_count;
  100. disable_sr_hashing(); /* Turn off space register hashing */
  101. /*
  102. * Sort the ranges. Since the number of ranges is typically
  103. * small, and performance is not an issue here, just do
  104. * a simple insertion sort.
  105. */
  106. for (i = 1; i < npmem_ranges; i++) {
  107. int j;
  108. for (j = i; j > 0; j--) {
  109. unsigned long tmp;
  110. if (pmem_ranges[j-1].start_pfn <
  111. pmem_ranges[j].start_pfn) {
  112. break;
  113. }
  114. tmp = pmem_ranges[j-1].start_pfn;
  115. pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
  116. pmem_ranges[j].start_pfn = tmp;
  117. tmp = pmem_ranges[j-1].pages;
  118. pmem_ranges[j-1].pages = pmem_ranges[j].pages;
  119. pmem_ranges[j].pages = tmp;
  120. }
  121. }
  122. #ifndef CONFIG_DISCONTIGMEM
  123. /*
  124. * Throw out ranges that are too far apart (controlled by
  125. * MAX_GAP).
  126. */
  127. for (i = 1; i < npmem_ranges; i++) {
  128. if (pmem_ranges[i].start_pfn -
  129. (pmem_ranges[i-1].start_pfn +
  130. pmem_ranges[i-1].pages) > MAX_GAP) {
  131. npmem_ranges = i;
  132. printk("Large gap in memory detected (%ld pages). "
  133. "Consider turning on CONFIG_DISCONTIGMEM\n",
  134. pmem_ranges[i].start_pfn -
  135. (pmem_ranges[i-1].start_pfn +
  136. pmem_ranges[i-1].pages));
  137. break;
  138. }
  139. }
  140. #endif
  141. if (npmem_ranges > 1) {
  142. /* Print the memory ranges */
  143. printk(KERN_INFO "Memory Ranges:\n");
  144. for (i = 0; i < npmem_ranges; i++) {
  145. unsigned long start;
  146. unsigned long size;
  147. size = (pmem_ranges[i].pages << PAGE_SHIFT);
  148. start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
  149. printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
  150. i,start, start + (size - 1), size >> 20);
  151. }
  152. }
  153. sysram_resource_count = npmem_ranges;
  154. for (i = 0; i < sysram_resource_count; i++) {
  155. struct resource *res = &sysram_resources[i];
  156. res->name = "System RAM";
  157. res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
  158. res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
  159. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  160. request_resource(&iomem_resource, res);
  161. }
  162. /*
  163. * For 32 bit kernels we limit the amount of memory we can
  164. * support, in order to preserve enough kernel address space
  165. * for other purposes. For 64 bit kernels we don't normally
  166. * limit the memory, but this mechanism can be used to
  167. * artificially limit the amount of memory (and it is written
  168. * to work with multiple memory ranges).
  169. */
  170. mem_limit_func(); /* check for "mem=" argument */
  171. mem_max = 0;
  172. num_physpages = 0;
  173. for (i = 0; i < npmem_ranges; i++) {
  174. unsigned long rsize;
  175. rsize = pmem_ranges[i].pages << PAGE_SHIFT;
  176. if ((mem_max + rsize) > mem_limit) {
  177. printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
  178. if (mem_max == mem_limit)
  179. npmem_ranges = i;
  180. else {
  181. pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
  182. - (mem_max >> PAGE_SHIFT);
  183. npmem_ranges = i + 1;
  184. mem_max = mem_limit;
  185. }
  186. num_physpages += pmem_ranges[i].pages;
  187. break;
  188. }
  189. num_physpages += pmem_ranges[i].pages;
  190. mem_max += rsize;
  191. }
  192. printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
  193. #ifndef CONFIG_DISCONTIGMEM
  194. /* Merge the ranges, keeping track of the holes */
  195. {
  196. unsigned long end_pfn;
  197. unsigned long hole_pages;
  198. npmem_holes = 0;
  199. end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
  200. for (i = 1; i < npmem_ranges; i++) {
  201. hole_pages = pmem_ranges[i].start_pfn - end_pfn;
  202. if (hole_pages) {
  203. pmem_holes[npmem_holes].start_pfn = end_pfn;
  204. pmem_holes[npmem_holes++].pages = hole_pages;
  205. end_pfn += hole_pages;
  206. }
  207. end_pfn += pmem_ranges[i].pages;
  208. }
  209. pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
  210. npmem_ranges = 1;
  211. }
  212. #endif
  213. bootmap_pages = 0;
  214. for (i = 0; i < npmem_ranges; i++)
  215. bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
  216. bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
  217. #ifdef CONFIG_DISCONTIGMEM
  218. for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
  219. memset(NODE_DATA(i), 0, sizeof(pg_data_t));
  220. NODE_DATA(i)->bdata = &bmem_data[i];
  221. }
  222. memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
  223. for (i = 0; i < npmem_ranges; i++)
  224. node_set_online(i);
  225. #endif
  226. /*
  227. * Initialize and free the full range of memory in each range.
  228. * Note that the only writing these routines do are to the bootmap,
  229. * and we've made sure to locate the bootmap properly so that they
  230. * won't be writing over anything important.
  231. */
  232. bootmap_pfn = bootmap_start_pfn;
  233. max_pfn = 0;
  234. for (i = 0; i < npmem_ranges; i++) {
  235. unsigned long start_pfn;
  236. unsigned long npages;
  237. start_pfn = pmem_ranges[i].start_pfn;
  238. npages = pmem_ranges[i].pages;
  239. bootmap_size = init_bootmem_node(NODE_DATA(i),
  240. bootmap_pfn,
  241. start_pfn,
  242. (start_pfn + npages) );
  243. free_bootmem_node(NODE_DATA(i),
  244. (start_pfn << PAGE_SHIFT),
  245. (npages << PAGE_SHIFT) );
  246. bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  247. if ((start_pfn + npages) > max_pfn)
  248. max_pfn = start_pfn + npages;
  249. }
  250. if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
  251. printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
  252. BUG();
  253. }
  254. /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
  255. #define PDC_CONSOLE_IO_IODC_SIZE 32768
  256. reserve_bootmem_node(NODE_DATA(0), 0UL,
  257. (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
  258. reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
  259. (unsigned long)(&_end - &_text));
  260. reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
  261. ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
  262. #ifndef CONFIG_DISCONTIGMEM
  263. /* reserve the holes */
  264. for (i = 0; i < npmem_holes; i++) {
  265. reserve_bootmem_node(NODE_DATA(0),
  266. (pmem_holes[i].start_pfn << PAGE_SHIFT),
  267. (pmem_holes[i].pages << PAGE_SHIFT));
  268. }
  269. #endif
  270. #ifdef CONFIG_BLK_DEV_INITRD
  271. if (initrd_start) {
  272. printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
  273. if (__pa(initrd_start) < mem_max) {
  274. unsigned long initrd_reserve;
  275. if (__pa(initrd_end) > mem_max) {
  276. initrd_reserve = mem_max - __pa(initrd_start);
  277. } else {
  278. initrd_reserve = initrd_end - initrd_start;
  279. }
  280. initrd_below_start_ok = 1;
  281. printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
  282. reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
  283. }
  284. }
  285. #endif
  286. data_resource.start = virt_to_phys(&data_start);
  287. data_resource.end = virt_to_phys(&_end)-1;
  288. code_resource.start = virt_to_phys(&_text);
  289. code_resource.end = virt_to_phys(&data_start)-1;
  290. /* We don't know which region the kernel will be in, so try
  291. * all of them.
  292. */
  293. for (i = 0; i < sysram_resource_count; i++) {
  294. struct resource *res = &sysram_resources[i];
  295. request_resource(res, &code_resource);
  296. request_resource(res, &data_resource);
  297. }
  298. request_resource(&sysram_resources[0], &pdcdata_resource);
  299. }
  300. void free_initmem(void)
  301. {
  302. /* FIXME: */
  303. #if 0
  304. printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
  305. (&__init_end - &__init_begin) >> 10);
  306. return;
  307. #else
  308. unsigned long addr;
  309. printk(KERN_INFO "Freeing unused kernel memory: ");
  310. #if 1
  311. /* Attempt to catch anyone trying to execute code here
  312. * by filling the page with BRK insns.
  313. *
  314. * If we disable interrupts for all CPUs, then IPI stops working.
  315. * Kinda breaks the global cache flushing.
  316. */
  317. local_irq_disable();
  318. memset(&__init_begin, 0x00,
  319. (unsigned long)&__init_end - (unsigned long)&__init_begin);
  320. flush_data_cache();
  321. asm volatile("sync" : : );
  322. flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end);
  323. asm volatile("sync" : : );
  324. local_irq_enable();
  325. #endif
  326. addr = (unsigned long)(&__init_begin);
  327. for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
  328. ClearPageReserved(virt_to_page(addr));
  329. set_page_count(virt_to_page(addr), 1);
  330. free_page(addr);
  331. num_physpages++;
  332. totalram_pages++;
  333. }
  334. /* set up a new led state on systems shipped LED State panel */
  335. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
  336. printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
  337. #endif
  338. }
  339. /*
  340. * Just an arbitrary offset to serve as a "hole" between mapping areas
  341. * (between top of physical memory and a potential pcxl dma mapping
  342. * area, and below the vmalloc mapping area).
  343. *
  344. * The current 32K value just means that there will be a 32K "hole"
  345. * between mapping areas. That means that any out-of-bounds memory
  346. * accesses will hopefully be caught. The vmalloc() routines leaves
  347. * a hole of 4kB between each vmalloced area for the same reason.
  348. */
  349. /* Leave room for gateway page expansion */
  350. #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
  351. #error KERNEL_MAP_START is in gateway reserved region
  352. #endif
  353. #define MAP_START (KERNEL_MAP_START)
  354. #define VM_MAP_OFFSET (32*1024)
  355. #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
  356. & ~(VM_MAP_OFFSET-1)))
  357. void *vmalloc_start;
  358. EXPORT_SYMBOL(vmalloc_start);
  359. #ifdef CONFIG_PA11
  360. unsigned long pcxl_dma_start;
  361. #endif
  362. void __init mem_init(void)
  363. {
  364. high_memory = __va((max_pfn << PAGE_SHIFT));
  365. #ifndef CONFIG_DISCONTIGMEM
  366. max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
  367. totalram_pages += free_all_bootmem();
  368. #else
  369. {
  370. int i;
  371. for (i = 0; i < npmem_ranges; i++)
  372. totalram_pages += free_all_bootmem_node(NODE_DATA(i));
  373. }
  374. #endif
  375. printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
  376. #ifdef CONFIG_PA11
  377. if (hppa_dma_ops == &pcxl_dma_ops) {
  378. pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
  379. vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
  380. } else {
  381. pcxl_dma_start = 0;
  382. vmalloc_start = SET_MAP_OFFSET(MAP_START);
  383. }
  384. #else
  385. vmalloc_start = SET_MAP_OFFSET(MAP_START);
  386. #endif
  387. }
  388. int do_check_pgt_cache(int low, int high)
  389. {
  390. return 0;
  391. }
  392. unsigned long *empty_zero_page;
  393. void show_mem(void)
  394. {
  395. int i,free = 0,total = 0,reserved = 0;
  396. int shared = 0, cached = 0;
  397. printk(KERN_INFO "Mem-info:\n");
  398. show_free_areas();
  399. printk(KERN_INFO "Free swap: %6ldkB\n",
  400. nr_swap_pages<<(PAGE_SHIFT-10));
  401. #ifndef CONFIG_DISCONTIGMEM
  402. i = max_mapnr;
  403. while (i-- > 0) {
  404. total++;
  405. if (PageReserved(mem_map+i))
  406. reserved++;
  407. else if (PageSwapCache(mem_map+i))
  408. cached++;
  409. else if (!page_count(&mem_map[i]))
  410. free++;
  411. else
  412. shared += page_count(&mem_map[i]) - 1;
  413. }
  414. #else
  415. for (i = 0; i < npmem_ranges; i++) {
  416. int j;
  417. for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
  418. struct page *p;
  419. p = nid_page_nr(i, j) - node_start_pfn(i);
  420. total++;
  421. if (PageReserved(p))
  422. reserved++;
  423. else if (PageSwapCache(p))
  424. cached++;
  425. else if (!page_count(p))
  426. free++;
  427. else
  428. shared += page_count(p) - 1;
  429. }
  430. }
  431. #endif
  432. printk(KERN_INFO "%d pages of RAM\n", total);
  433. printk(KERN_INFO "%d reserved pages\n", reserved);
  434. printk(KERN_INFO "%d pages shared\n", shared);
  435. printk(KERN_INFO "%d pages swap cached\n", cached);
  436. #ifdef CONFIG_DISCONTIGMEM
  437. {
  438. struct zonelist *zl;
  439. int i, j, k;
  440. for (i = 0; i < npmem_ranges; i++) {
  441. for (j = 0; j < MAX_NR_ZONES; j++) {
  442. zl = NODE_DATA(i)->node_zonelists + j;
  443. printk("Zone list for zone %d on node %d: ", j, i);
  444. for (k = 0; zl->zones[k] != NULL; k++)
  445. printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
  446. printk("\n");
  447. }
  448. }
  449. }
  450. #endif
  451. }
  452. static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
  453. {
  454. pgd_t *pg_dir;
  455. pmd_t *pmd;
  456. pte_t *pg_table;
  457. unsigned long end_paddr;
  458. unsigned long start_pmd;
  459. unsigned long start_pte;
  460. unsigned long tmp1;
  461. unsigned long tmp2;
  462. unsigned long address;
  463. unsigned long ro_start;
  464. unsigned long ro_end;
  465. unsigned long fv_addr;
  466. unsigned long gw_addr;
  467. extern const unsigned long fault_vector_20;
  468. extern void * const linux_gateway_page;
  469. ro_start = __pa((unsigned long)&_text);
  470. ro_end = __pa((unsigned long)&data_start);
  471. fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
  472. gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
  473. end_paddr = start_paddr + size;
  474. pg_dir = pgd_offset_k(start_vaddr);
  475. #if PTRS_PER_PMD == 1
  476. start_pmd = 0;
  477. #else
  478. start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
  479. #endif
  480. start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
  481. address = start_paddr;
  482. while (address < end_paddr) {
  483. #if PTRS_PER_PMD == 1
  484. pmd = (pmd_t *)__pa(pg_dir);
  485. #else
  486. pmd = (pmd_t *)pgd_address(*pg_dir);
  487. /*
  488. * pmd is physical at this point
  489. */
  490. if (!pmd) {
  491. pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
  492. pmd = (pmd_t *) __pa(pmd);
  493. }
  494. pgd_populate(NULL, pg_dir, __va(pmd));
  495. #endif
  496. pg_dir++;
  497. /* now change pmd to kernel virtual addresses */
  498. pmd = (pmd_t *)__va(pmd) + start_pmd;
  499. for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
  500. /*
  501. * pg_table is physical at this point
  502. */
  503. pg_table = (pte_t *)pmd_address(*pmd);
  504. if (!pg_table) {
  505. pg_table = (pte_t *)
  506. alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
  507. pg_table = (pte_t *) __pa(pg_table);
  508. }
  509. pmd_populate_kernel(NULL, pmd, __va(pg_table));
  510. /* now change pg_table to kernel virtual addresses */
  511. pg_table = (pte_t *) __va(pg_table) + start_pte;
  512. for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
  513. pte_t pte;
  514. /*
  515. * Map the fault vector writable so we can
  516. * write the HPMC checksum.
  517. */
  518. if (address >= ro_start && address < ro_end
  519. && address != fv_addr
  520. && address != gw_addr)
  521. pte = __mk_pte(address, PAGE_KERNEL_RO);
  522. else
  523. pte = __mk_pte(address, pgprot);
  524. if (address >= end_paddr)
  525. pte_val(pte) = 0;
  526. set_pte(pg_table, pte);
  527. address += PAGE_SIZE;
  528. }
  529. start_pte = 0;
  530. if (address >= end_paddr)
  531. break;
  532. }
  533. start_pmd = 0;
  534. }
  535. }
  536. /*
  537. * pagetable_init() sets up the page tables
  538. *
  539. * Note that gateway_init() places the Linux gateway page at page 0.
  540. * Since gateway pages cannot be dereferenced this has the desirable
  541. * side effect of trapping those pesky NULL-reference errors in the
  542. * kernel.
  543. */
  544. static void __init pagetable_init(void)
  545. {
  546. int range;
  547. /* Map each physical memory range to its kernel vaddr */
  548. for (range = 0; range < npmem_ranges; range++) {
  549. unsigned long start_paddr;
  550. unsigned long end_paddr;
  551. unsigned long size;
  552. start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
  553. end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
  554. size = pmem_ranges[range].pages << PAGE_SHIFT;
  555. map_pages((unsigned long)__va(start_paddr), start_paddr,
  556. size, PAGE_KERNEL);
  557. }
  558. #ifdef CONFIG_BLK_DEV_INITRD
  559. if (initrd_end && initrd_end > mem_limit) {
  560. printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
  561. map_pages(initrd_start, __pa(initrd_start),
  562. initrd_end - initrd_start, PAGE_KERNEL);
  563. }
  564. #endif
  565. empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
  566. memset(empty_zero_page, 0, PAGE_SIZE);
  567. }
  568. static void __init gateway_init(void)
  569. {
  570. unsigned long linux_gateway_page_addr;
  571. /* FIXME: This is 'const' in order to trick the compiler
  572. into not treating it as DP-relative data. */
  573. extern void * const linux_gateway_page;
  574. linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
  575. /*
  576. * Setup Linux Gateway page.
  577. *
  578. * The Linux gateway page will reside in kernel space (on virtual
  579. * page 0), so it doesn't need to be aliased into user space.
  580. */
  581. map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
  582. PAGE_SIZE, PAGE_GATEWAY);
  583. }
  584. #ifdef CONFIG_HPUX
  585. void
  586. map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
  587. {
  588. pgd_t *pg_dir;
  589. pmd_t *pmd;
  590. pte_t *pg_table;
  591. unsigned long start_pmd;
  592. unsigned long start_pte;
  593. unsigned long address;
  594. unsigned long hpux_gw_page_addr;
  595. /* FIXME: This is 'const' in order to trick the compiler
  596. into not treating it as DP-relative data. */
  597. extern void * const hpux_gateway_page;
  598. hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
  599. /*
  600. * Setup HP-UX Gateway page.
  601. *
  602. * The HP-UX gateway page resides in the user address space,
  603. * so it needs to be aliased into each process.
  604. */
  605. pg_dir = pgd_offset(mm,hpux_gw_page_addr);
  606. #if PTRS_PER_PMD == 1
  607. start_pmd = 0;
  608. #else
  609. start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
  610. #endif
  611. start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
  612. address = __pa(&hpux_gateway_page);
  613. #if PTRS_PER_PMD == 1
  614. pmd = (pmd_t *)__pa(pg_dir);
  615. #else
  616. pmd = (pmd_t *) pgd_address(*pg_dir);
  617. /*
  618. * pmd is physical at this point
  619. */
  620. if (!pmd) {
  621. pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
  622. pmd = (pmd_t *) __pa(pmd);
  623. }
  624. __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
  625. #endif
  626. /* now change pmd to kernel virtual addresses */
  627. pmd = (pmd_t *)__va(pmd) + start_pmd;
  628. /*
  629. * pg_table is physical at this point
  630. */
  631. pg_table = (pte_t *) pmd_address(*pmd);
  632. if (!pg_table)
  633. pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
  634. __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
  635. /* now change pg_table to kernel virtual addresses */
  636. pg_table = (pte_t *) __va(pg_table) + start_pte;
  637. set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
  638. }
  639. EXPORT_SYMBOL(map_hpux_gateway_page);
  640. #endif
  641. extern void flush_tlb_all_local(void);
  642. void __init paging_init(void)
  643. {
  644. int i;
  645. setup_bootmem();
  646. pagetable_init();
  647. gateway_init();
  648. flush_cache_all_local(); /* start with known state */
  649. flush_tlb_all_local();
  650. for (i = 0; i < npmem_ranges; i++) {
  651. unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
  652. /* We have an IOMMU, so all memory can go into a single
  653. ZONE_DMA zone. */
  654. zones_size[ZONE_DMA] = pmem_ranges[i].pages;
  655. #ifdef CONFIG_DISCONTIGMEM
  656. /* Need to initialize the pfnnid_map before we can initialize
  657. the zone */
  658. {
  659. int j;
  660. for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
  661. j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
  662. j++) {
  663. pfnnid_map[j] = i;
  664. }
  665. }
  666. #endif
  667. free_area_init_node(i, NODE_DATA(i), zones_size,
  668. pmem_ranges[i].start_pfn, NULL);
  669. }
  670. }
  671. #ifdef CONFIG_PA20
  672. /*
  673. * Currently, all PA20 chips have 18 bit protection id's, which is the
  674. * limiting factor (space ids are 32 bits).
  675. */
  676. #define NR_SPACE_IDS 262144
  677. #else
  678. /*
  679. * Currently we have a one-to-one relationship between space id's and
  680. * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
  681. * support 15 bit protection id's, so that is the limiting factor.
  682. * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
  683. * probably not worth the effort for a special case here.
  684. */
  685. #define NR_SPACE_IDS 32768
  686. #endif /* !CONFIG_PA20 */
  687. #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
  688. #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
  689. static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
  690. static unsigned long dirty_space_id[SID_ARRAY_SIZE];
  691. static unsigned long space_id_index;
  692. static unsigned long free_space_ids = NR_SPACE_IDS - 1;
  693. static unsigned long dirty_space_ids = 0;
  694. static DEFINE_SPINLOCK(sid_lock);
  695. unsigned long alloc_sid(void)
  696. {
  697. unsigned long index;
  698. spin_lock(&sid_lock);
  699. if (free_space_ids == 0) {
  700. if (dirty_space_ids != 0) {
  701. spin_unlock(&sid_lock);
  702. flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
  703. spin_lock(&sid_lock);
  704. }
  705. if (free_space_ids == 0)
  706. BUG();
  707. }
  708. free_space_ids--;
  709. index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
  710. space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
  711. space_id_index = index;
  712. spin_unlock(&sid_lock);
  713. return index << SPACEID_SHIFT;
  714. }
  715. void free_sid(unsigned long spaceid)
  716. {
  717. unsigned long index = spaceid >> SPACEID_SHIFT;
  718. unsigned long *dirty_space_offset;
  719. dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
  720. index &= (BITS_PER_LONG - 1);
  721. spin_lock(&sid_lock);
  722. if (*dirty_space_offset & (1L << index))
  723. BUG(); /* attempt to free space id twice */
  724. *dirty_space_offset |= (1L << index);
  725. dirty_space_ids++;
  726. spin_unlock(&sid_lock);
  727. }
  728. #ifdef CONFIG_SMP
  729. static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
  730. {
  731. int i;
  732. /* NOTE: sid_lock must be held upon entry */
  733. *ndirtyptr = dirty_space_ids;
  734. if (dirty_space_ids != 0) {
  735. for (i = 0; i < SID_ARRAY_SIZE; i++) {
  736. dirty_array[i] = dirty_space_id[i];
  737. dirty_space_id[i] = 0;
  738. }
  739. dirty_space_ids = 0;
  740. }
  741. return;
  742. }
  743. static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
  744. {
  745. int i;
  746. /* NOTE: sid_lock must be held upon entry */
  747. if (ndirty != 0) {
  748. for (i = 0; i < SID_ARRAY_SIZE; i++) {
  749. space_id[i] ^= dirty_array[i];
  750. }
  751. free_space_ids += ndirty;
  752. space_id_index = 0;
  753. }
  754. }
  755. #else /* CONFIG_SMP */
  756. static void recycle_sids(void)
  757. {
  758. int i;
  759. /* NOTE: sid_lock must be held upon entry */
  760. if (dirty_space_ids != 0) {
  761. for (i = 0; i < SID_ARRAY_SIZE; i++) {
  762. space_id[i] ^= dirty_space_id[i];
  763. dirty_space_id[i] = 0;
  764. }
  765. free_space_ids += dirty_space_ids;
  766. dirty_space_ids = 0;
  767. space_id_index = 0;
  768. }
  769. }
  770. #endif
  771. /*
  772. * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
  773. * purged, we can safely reuse the space ids that were released but
  774. * not flushed from the tlb.
  775. */
  776. #ifdef CONFIG_SMP
  777. static unsigned long recycle_ndirty;
  778. static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
  779. static unsigned int recycle_inuse = 0;
  780. void flush_tlb_all(void)
  781. {
  782. int do_recycle;
  783. do_recycle = 0;
  784. spin_lock(&sid_lock);
  785. if (dirty_space_ids > RECYCLE_THRESHOLD) {
  786. if (recycle_inuse) {
  787. BUG(); /* FIXME: Use a semaphore/wait queue here */
  788. }
  789. get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
  790. recycle_inuse++;
  791. do_recycle++;
  792. }
  793. spin_unlock(&sid_lock);
  794. on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
  795. if (do_recycle) {
  796. spin_lock(&sid_lock);
  797. recycle_sids(recycle_ndirty,recycle_dirty_array);
  798. recycle_inuse = 0;
  799. spin_unlock(&sid_lock);
  800. }
  801. }
  802. #else
  803. void flush_tlb_all(void)
  804. {
  805. spin_lock(&sid_lock);
  806. flush_tlb_all_local();
  807. recycle_sids();
  808. spin_unlock(&sid_lock);
  809. }
  810. #endif
  811. #ifdef CONFIG_BLK_DEV_INITRD
  812. void free_initrd_mem(unsigned long start, unsigned long end)
  813. {
  814. #if 0
  815. if (start < end)
  816. printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  817. for (; start < end; start += PAGE_SIZE) {
  818. ClearPageReserved(virt_to_page(start));
  819. set_page_count(virt_to_page(start), 1);
  820. free_page(start);
  821. num_physpages++;
  822. totalram_pages++;
  823. }
  824. #endif
  825. }
  826. #endif