init.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /*
  2. * arch/s390/mm/init.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. *
  8. * Derived from "arch/i386/mm/init.c"
  9. * Copyright (C) 1995 Linus Torvalds
  10. */
  11. #include <linux/signal.h>
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/mman.h>
  19. #include <linux/mm.h>
  20. #include <linux/swap.h>
  21. #include <linux/smp.h>
  22. #include <linux/init.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/bootmem.h>
  25. #include <linux/pfn.h>
  26. #include <asm/processor.h>
  27. #include <asm/system.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/pgalloc.h>
  31. #include <asm/dma.h>
  32. #include <asm/lowcore.h>
  33. #include <asm/tlb.h>
  34. #include <asm/tlbflush.h>
  35. #include <asm/sections.h>
  36. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  37. pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
  38. char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  39. void diag10(unsigned long addr)
  40. {
  41. if (addr >= 0x7ff00000)
  42. return;
  43. asm volatile(
  44. #ifdef CONFIG_64BIT
  45. " sam31\n"
  46. " diag %0,%0,0x10\n"
  47. "0: sam64\n"
  48. #else
  49. " diag %0,%0,0x10\n"
  50. "0:\n"
  51. #endif
  52. EX_TABLE(0b,0b)
  53. : : "a" (addr));
  54. }
  55. void show_mem(void)
  56. {
  57. int i, total = 0, reserved = 0;
  58. int shared = 0, cached = 0;
  59. printk("Mem-info:\n");
  60. show_free_areas();
  61. printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
  62. i = max_mapnr;
  63. while (i-- > 0) {
  64. total++;
  65. if (PageReserved(mem_map+i))
  66. reserved++;
  67. else if (PageSwapCache(mem_map+i))
  68. cached++;
  69. else if (page_count(mem_map+i))
  70. shared += page_count(mem_map+i) - 1;
  71. }
  72. printk("%d pages of RAM\n",total);
  73. printk("%d reserved pages\n",reserved);
  74. printk("%d pages shared\n",shared);
  75. printk("%d pages swap cached\n",cached);
  76. }
  77. extern unsigned long __initdata zholes_size[];
  78. /*
  79. * paging_init() sets up the page tables
  80. */
  81. #ifndef CONFIG_64BIT
  82. void __init paging_init(void)
  83. {
  84. pgd_t * pg_dir;
  85. pte_t * pg_table;
  86. pte_t pte;
  87. int i;
  88. unsigned long tmp;
  89. unsigned long pfn = 0;
  90. unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
  91. static const int ssm_mask = 0x04000000L;
  92. unsigned long ro_start_pfn, ro_end_pfn;
  93. unsigned long zones_size[MAX_NR_ZONES];
  94. ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
  95. ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
  96. memset(zones_size, 0, sizeof(zones_size));
  97. zones_size[ZONE_DMA] = max_low_pfn;
  98. free_area_init_node(0, &contig_page_data, zones_size,
  99. __pa(PAGE_OFFSET) >> PAGE_SHIFT,
  100. zholes_size);
  101. /* unmap whole virtual address space */
  102. pg_dir = swapper_pg_dir;
  103. for (i = 0; i < PTRS_PER_PGD; i++)
  104. pmd_clear((pmd_t *) pg_dir++);
  105. /*
  106. * map whole physical memory to virtual memory (identity mapping)
  107. */
  108. pg_dir = swapper_pg_dir;
  109. while (pfn < max_low_pfn) {
  110. /*
  111. * pg_table is physical at this point
  112. */
  113. pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  114. pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
  115. pg_dir++;
  116. for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
  117. if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
  118. pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
  119. else
  120. pte = pfn_pte(pfn, PAGE_KERNEL);
  121. if (pfn >= max_low_pfn)
  122. pte_val(pte) = _PAGE_TYPE_EMPTY;
  123. set_pte(pg_table, pte);
  124. pfn++;
  125. }
  126. }
  127. S390_lowcore.kernel_asce = pgdir_k;
  128. /* enable virtual mapping in kernel mode */
  129. __ctl_load(pgdir_k, 1, 1);
  130. __ctl_load(pgdir_k, 7, 7);
  131. __ctl_load(pgdir_k, 13, 13);
  132. __raw_local_irq_ssm(ssm_mask);
  133. local_flush_tlb();
  134. return;
  135. }
  136. #else /* CONFIG_64BIT */
  137. void __init paging_init(void)
  138. {
  139. pgd_t * pg_dir;
  140. pmd_t * pm_dir;
  141. pte_t * pt_dir;
  142. pte_t pte;
  143. int i,j,k;
  144. unsigned long pfn = 0;
  145. unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
  146. _KERN_REGION_TABLE;
  147. static const int ssm_mask = 0x04000000L;
  148. unsigned long zones_size[MAX_NR_ZONES];
  149. unsigned long dma_pfn, high_pfn;
  150. unsigned long ro_start_pfn, ro_end_pfn;
  151. memset(zones_size, 0, sizeof(zones_size));
  152. dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
  153. high_pfn = max_low_pfn;
  154. ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
  155. ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
  156. if (dma_pfn > high_pfn)
  157. zones_size[ZONE_DMA] = high_pfn;
  158. else {
  159. zones_size[ZONE_DMA] = dma_pfn;
  160. zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
  161. }
  162. /* Initialize mem_map[]. */
  163. free_area_init_node(0, &contig_page_data, zones_size,
  164. __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
  165. /*
  166. * map whole physical memory to virtual memory (identity mapping)
  167. */
  168. pg_dir = swapper_pg_dir;
  169. for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
  170. if (pfn >= max_low_pfn) {
  171. pgd_clear(pg_dir);
  172. continue;
  173. }
  174. pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4);
  175. pgd_populate(&init_mm, pg_dir, pm_dir);
  176. for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
  177. if (pfn >= max_low_pfn) {
  178. pmd_clear(pm_dir);
  179. continue;
  180. }
  181. pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
  182. pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
  183. for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
  184. if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
  185. pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
  186. else
  187. pte = pfn_pte(pfn, PAGE_KERNEL);
  188. if (pfn >= max_low_pfn)
  189. pte_val(pte) = _PAGE_TYPE_EMPTY;
  190. set_pte(pt_dir, pte);
  191. pfn++;
  192. }
  193. }
  194. }
  195. S390_lowcore.kernel_asce = pgdir_k;
  196. /* enable virtual mapping in kernel mode */
  197. __ctl_load(pgdir_k, 1, 1);
  198. __ctl_load(pgdir_k, 7, 7);
  199. __ctl_load(pgdir_k, 13, 13);
  200. __raw_local_irq_ssm(ssm_mask);
  201. local_flush_tlb();
  202. return;
  203. }
  204. #endif /* CONFIG_64BIT */
  205. void __init mem_init(void)
  206. {
  207. unsigned long codesize, reservedpages, datasize, initsize;
  208. max_mapnr = num_physpages = max_low_pfn;
  209. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
  210. /* clear the zero-page */
  211. memset(empty_zero_page, 0, PAGE_SIZE);
  212. /* this will put all low memory onto the freelists */
  213. totalram_pages += free_all_bootmem();
  214. reservedpages = 0;
  215. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  216. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  217. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  218. printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
  219. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
  220. max_mapnr << (PAGE_SHIFT-10),
  221. codesize >> 10,
  222. reservedpages << (PAGE_SHIFT-10),
  223. datasize >>10,
  224. initsize >> 10);
  225. printk("Write protected kernel read-only data: %#lx - %#lx\n",
  226. (unsigned long)&__start_rodata,
  227. PFN_ALIGN((unsigned long)&__end_rodata) - 1);
  228. }
  229. void free_initmem(void)
  230. {
  231. unsigned long addr;
  232. addr = (unsigned long)(&__init_begin);
  233. for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
  234. ClearPageReserved(virt_to_page(addr));
  235. init_page_count(virt_to_page(addr));
  236. free_page(addr);
  237. totalram_pages++;
  238. }
  239. printk ("Freeing unused kernel memory: %ldk freed\n",
  240. ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10);
  241. }
  242. #ifdef CONFIG_BLK_DEV_INITRD
  243. void free_initrd_mem(unsigned long start, unsigned long end)
  244. {
  245. if (start < end)
  246. printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  247. for (; start < end; start += PAGE_SIZE) {
  248. ClearPageReserved(virt_to_page(start));
  249. init_page_count(virt_to_page(start));
  250. free_page(start);
  251. totalram_pages++;
  252. }
  253. }
  254. #endif