vmem.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * Copyright IBM Corp. 2006
  3. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  4. */
  5. #include <linux/bootmem.h>
  6. #include <linux/pfn.h>
  7. #include <linux/mm.h>
  8. #include <linux/module.h>
  9. #include <linux/list.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/slab.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/setup.h>
  15. #include <asm/tlbflush.h>
  16. #include <asm/sections.h>
  17. static DEFINE_MUTEX(vmem_mutex);
  18. struct memory_segment {
  19. struct list_head list;
  20. unsigned long start;
  21. unsigned long size;
  22. };
  23. static LIST_HEAD(mem_segs);
  24. static void __ref *vmem_alloc_pages(unsigned int order)
  25. {
  26. if (slab_is_available())
  27. return (void *)__get_free_pages(GFP_KERNEL, order);
  28. return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
  29. }
  30. static inline pud_t *vmem_pud_alloc(void)
  31. {
  32. pud_t *pud = NULL;
  33. #ifdef CONFIG_64BIT
  34. pud = vmem_alloc_pages(2);
  35. if (!pud)
  36. return NULL;
  37. clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
  38. #endif
  39. return pud;
  40. }
  41. static inline pmd_t *vmem_pmd_alloc(void)
  42. {
  43. pmd_t *pmd = NULL;
  44. #ifdef CONFIG_64BIT
  45. pmd = vmem_alloc_pages(2);
  46. if (!pmd)
  47. return NULL;
  48. clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
  49. #endif
  50. return pmd;
  51. }
  52. static pte_t __ref *vmem_pte_alloc(unsigned long address)
  53. {
  54. pte_t *pte;
  55. if (slab_is_available())
  56. pte = (pte_t *) page_table_alloc(&init_mm, address);
  57. else
  58. pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
  59. if (!pte)
  60. return NULL;
  61. clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
  62. PTRS_PER_PTE * sizeof(pte_t));
  63. return pte;
  64. }
  65. /*
  66. * Add a physical memory range to the 1:1 mapping.
  67. */
  68. static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
  69. {
  70. unsigned long end = start + size;
  71. unsigned long address = start;
  72. pgd_t *pg_dir;
  73. pud_t *pu_dir;
  74. pmd_t *pm_dir;
  75. pte_t *pt_dir;
  76. int ret = -ENOMEM;
  77. while (address < end) {
  78. pg_dir = pgd_offset_k(address);
  79. if (pgd_none(*pg_dir)) {
  80. pu_dir = vmem_pud_alloc();
  81. if (!pu_dir)
  82. goto out;
  83. pgd_populate(&init_mm, pg_dir, pu_dir);
  84. }
  85. pu_dir = pud_offset(pg_dir, address);
  86. #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
  87. if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
  88. !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
  89. pud_val(*pu_dir) = __pa(address) |
  90. _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
  91. (ro ? _REGION_ENTRY_RO : 0);
  92. address += PUD_SIZE;
  93. continue;
  94. }
  95. #endif
  96. if (pud_none(*pu_dir)) {
  97. pm_dir = vmem_pmd_alloc();
  98. if (!pm_dir)
  99. goto out;
  100. pud_populate(&init_mm, pu_dir, pm_dir);
  101. }
  102. pm_dir = pmd_offset(pu_dir, address);
  103. #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
  104. if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
  105. !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
  106. pmd_val(*pm_dir) = __pa(address) |
  107. _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
  108. (ro ? _SEGMENT_ENTRY_RO : 0);
  109. address += PMD_SIZE;
  110. continue;
  111. }
  112. #endif
  113. if (pmd_none(*pm_dir)) {
  114. pt_dir = vmem_pte_alloc(address);
  115. if (!pt_dir)
  116. goto out;
  117. pmd_populate(&init_mm, pm_dir, pt_dir);
  118. }
  119. pt_dir = pte_offset_kernel(pm_dir, address);
  120. pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
  121. address += PAGE_SIZE;
  122. }
  123. ret = 0;
  124. out:
  125. flush_tlb_kernel_range(start, end);
  126. return ret;
  127. }
  128. /*
  129. * Remove a physical memory range from the 1:1 mapping.
  130. * Currently only invalidates page table entries.
  131. */
  132. static void vmem_remove_range(unsigned long start, unsigned long size)
  133. {
  134. unsigned long end = start + size;
  135. unsigned long address = start;
  136. pgd_t *pg_dir;
  137. pud_t *pu_dir;
  138. pmd_t *pm_dir;
  139. pte_t *pt_dir;
  140. pte_t pte;
  141. pte_val(pte) = _PAGE_TYPE_EMPTY;
  142. while (address < end) {
  143. pg_dir = pgd_offset_k(address);
  144. if (pgd_none(*pg_dir)) {
  145. address += PGDIR_SIZE;
  146. continue;
  147. }
  148. pu_dir = pud_offset(pg_dir, address);
  149. if (pud_none(*pu_dir)) {
  150. address += PUD_SIZE;
  151. continue;
  152. }
  153. if (pud_large(*pu_dir)) {
  154. pud_clear(pu_dir);
  155. address += PUD_SIZE;
  156. continue;
  157. }
  158. pm_dir = pmd_offset(pu_dir, address);
  159. if (pmd_none(*pm_dir)) {
  160. address += PMD_SIZE;
  161. continue;
  162. }
  163. if (pmd_large(*pm_dir)) {
  164. pmd_clear(pm_dir);
  165. address += PMD_SIZE;
  166. continue;
  167. }
  168. pt_dir = pte_offset_kernel(pm_dir, address);
  169. *pt_dir = pte;
  170. address += PAGE_SIZE;
  171. }
  172. flush_tlb_kernel_range(start, end);
  173. }
  174. /*
  175. * Add a backed mem_map array to the virtual mem_map array.
  176. */
  177. int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
  178. {
  179. unsigned long address, start_addr, end_addr;
  180. pgd_t *pg_dir;
  181. pud_t *pu_dir;
  182. pmd_t *pm_dir;
  183. pte_t *pt_dir;
  184. int ret = -ENOMEM;
  185. start_addr = (unsigned long) start;
  186. end_addr = (unsigned long) (start + nr);
  187. for (address = start_addr; address < end_addr;) {
  188. pg_dir = pgd_offset_k(address);
  189. if (pgd_none(*pg_dir)) {
  190. pu_dir = vmem_pud_alloc();
  191. if (!pu_dir)
  192. goto out;
  193. pgd_populate(&init_mm, pg_dir, pu_dir);
  194. }
  195. pu_dir = pud_offset(pg_dir, address);
  196. if (pud_none(*pu_dir)) {
  197. pm_dir = vmem_pmd_alloc();
  198. if (!pm_dir)
  199. goto out;
  200. pud_populate(&init_mm, pu_dir, pm_dir);
  201. }
  202. pm_dir = pmd_offset(pu_dir, address);
  203. if (pmd_none(*pm_dir)) {
  204. #ifdef CONFIG_64BIT
  205. /* Use 1MB frames for vmemmap if available. We always
  206. * use large frames even if they are only partially
  207. * used.
  208. * Otherwise we would have also page tables since
  209. * vmemmap_populate gets called for each section
  210. * separately. */
  211. if (MACHINE_HAS_EDAT1) {
  212. void *new_page;
  213. new_page = vmemmap_alloc_block(PMD_SIZE, node);
  214. if (!new_page)
  215. goto out;
  216. pmd_val(*pm_dir) = __pa(new_page) |
  217. _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
  218. address = (address + PMD_SIZE) & PMD_MASK;
  219. continue;
  220. }
  221. #endif
  222. pt_dir = vmem_pte_alloc(address);
  223. if (!pt_dir)
  224. goto out;
  225. pmd_populate(&init_mm, pm_dir, pt_dir);
  226. } else if (pmd_large(*pm_dir)) {
  227. address = (address + PMD_SIZE) & PMD_MASK;
  228. continue;
  229. }
  230. pt_dir = pte_offset_kernel(pm_dir, address);
  231. if (pte_none(*pt_dir)) {
  232. unsigned long new_page;
  233. new_page =__pa(vmem_alloc_pages(0));
  234. if (!new_page)
  235. goto out;
  236. pte_val(*pt_dir) = __pa(new_page);
  237. }
  238. address += PAGE_SIZE;
  239. }
  240. memset(start, 0, nr * sizeof(struct page));
  241. ret = 0;
  242. out:
  243. flush_tlb_kernel_range(start_addr, end_addr);
  244. return ret;
  245. }
  246. /*
  247. * Add memory segment to the segment list if it doesn't overlap with
  248. * an already present segment.
  249. */
  250. static int insert_memory_segment(struct memory_segment *seg)
  251. {
  252. struct memory_segment *tmp;
  253. if (seg->start + seg->size > VMEM_MAX_PHYS ||
  254. seg->start + seg->size < seg->start)
  255. return -ERANGE;
  256. list_for_each_entry(tmp, &mem_segs, list) {
  257. if (seg->start >= tmp->start + tmp->size)
  258. continue;
  259. if (seg->start + seg->size <= tmp->start)
  260. continue;
  261. return -ENOSPC;
  262. }
  263. list_add(&seg->list, &mem_segs);
  264. return 0;
  265. }
  266. /*
  267. * Remove memory segment from the segment list.
  268. */
  269. static void remove_memory_segment(struct memory_segment *seg)
  270. {
  271. list_del(&seg->list);
  272. }
  273. static void __remove_shared_memory(struct memory_segment *seg)
  274. {
  275. remove_memory_segment(seg);
  276. vmem_remove_range(seg->start, seg->size);
  277. }
  278. int vmem_remove_mapping(unsigned long start, unsigned long size)
  279. {
  280. struct memory_segment *seg;
  281. int ret;
  282. mutex_lock(&vmem_mutex);
  283. ret = -ENOENT;
  284. list_for_each_entry(seg, &mem_segs, list) {
  285. if (seg->start == start && seg->size == size)
  286. break;
  287. }
  288. if (seg->start != start || seg->size != size)
  289. goto out;
  290. ret = 0;
  291. __remove_shared_memory(seg);
  292. kfree(seg);
  293. out:
  294. mutex_unlock(&vmem_mutex);
  295. return ret;
  296. }
  297. int vmem_add_mapping(unsigned long start, unsigned long size)
  298. {
  299. struct memory_segment *seg;
  300. int ret;
  301. mutex_lock(&vmem_mutex);
  302. ret = -ENOMEM;
  303. seg = kzalloc(sizeof(*seg), GFP_KERNEL);
  304. if (!seg)
  305. goto out;
  306. seg->start = start;
  307. seg->size = size;
  308. ret = insert_memory_segment(seg);
  309. if (ret)
  310. goto out_free;
  311. ret = vmem_add_mem(start, size, 0);
  312. if (ret)
  313. goto out_remove;
  314. goto out;
  315. out_remove:
  316. __remove_shared_memory(seg);
  317. out_free:
  318. kfree(seg);
  319. out:
  320. mutex_unlock(&vmem_mutex);
  321. return ret;
  322. }
  323. /*
  324. * map whole physical memory to virtual memory (identity mapping)
  325. * we reserve enough space in the vmalloc area for vmemmap to hotplug
  326. * additional memory segments.
  327. */
  328. void __init vmem_map_init(void)
  329. {
  330. unsigned long ro_start, ro_end;
  331. unsigned long start, end;
  332. int i;
  333. ro_start = PFN_ALIGN((unsigned long)&_stext);
  334. ro_end = (unsigned long)&_eshared & PAGE_MASK;
  335. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  336. if (memory_chunk[i].type == CHUNK_CRASHK ||
  337. memory_chunk[i].type == CHUNK_OLDMEM)
  338. continue;
  339. start = memory_chunk[i].addr;
  340. end = memory_chunk[i].addr + memory_chunk[i].size;
  341. if (start >= ro_end || end <= ro_start)
  342. vmem_add_mem(start, end - start, 0);
  343. else if (start >= ro_start && end <= ro_end)
  344. vmem_add_mem(start, end - start, 1);
  345. else if (start >= ro_start) {
  346. vmem_add_mem(start, ro_end - start, 1);
  347. vmem_add_mem(ro_end, end - ro_end, 0);
  348. } else if (end < ro_end) {
  349. vmem_add_mem(start, ro_start - start, 0);
  350. vmem_add_mem(ro_start, end - ro_start, 1);
  351. } else {
  352. vmem_add_mem(start, ro_start - start, 0);
  353. vmem_add_mem(ro_start, ro_end - ro_start, 1);
  354. vmem_add_mem(ro_end, end - ro_end, 0);
  355. }
  356. }
  357. }
  358. /*
  359. * Convert memory chunk array to a memory segment list so there is a single
  360. * list that contains both r/w memory and shared memory segments.
  361. */
  362. static int __init vmem_convert_memory_chunk(void)
  363. {
  364. struct memory_segment *seg;
  365. int i;
  366. mutex_lock(&vmem_mutex);
  367. for (i = 0; i < MEMORY_CHUNKS; i++) {
  368. if (!memory_chunk[i].size)
  369. continue;
  370. if (memory_chunk[i].type == CHUNK_CRASHK ||
  371. memory_chunk[i].type == CHUNK_OLDMEM)
  372. continue;
  373. seg = kzalloc(sizeof(*seg), GFP_KERNEL);
  374. if (!seg)
  375. panic("Out of memory...\n");
  376. seg->start = memory_chunk[i].addr;
  377. seg->size = memory_chunk[i].size;
  378. insert_memory_segment(seg);
  379. }
  380. mutex_unlock(&vmem_mutex);
  381. return 0;
  382. }
  383. core_initcall(vmem_convert_memory_chunk);