kexec.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506
  1. /*
  2. * kexec.c - kexec system call
  3. * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
  4. *
  5. * This source code is licensed under the GNU General Public License,
  6. * Version 2. See the file COPYING for more details.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/mm.h>
  10. #include <linux/file.h>
  11. #include <linux/slab.h>
  12. #include <linux/fs.h>
  13. #include <linux/kexec.h>
  14. #include <linux/mutex.h>
  15. #include <linux/list.h>
  16. #include <linux/highmem.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/reboot.h>
  19. #include <linux/ioport.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/elf.h>
  22. #include <linux/elfcore.h>
  23. #include <linux/utsrelease.h>
  24. #include <linux/utsname.h>
  25. #include <linux/numa.h>
  26. #include <linux/suspend.h>
  27. #include <linux/device.h>
  28. #include <linux/freezer.h>
  29. #include <linux/pm.h>
  30. #include <linux/cpu.h>
  31. #include <linux/console.h>
  32. #include <linux/vmalloc.h>
  33. #include <asm/page.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/io.h>
  36. #include <asm/system.h>
  37. #include <asm/sections.h>
  38. /* Per cpu memory for storing cpu states in case of system crash. */
  39. note_buf_t* crash_notes;
  40. /* vmcoreinfo stuff */
  41. static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
  42. u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
  43. size_t vmcoreinfo_size;
  44. size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
  45. /* Location of the reserved area for the crash kernel */
  46. struct resource crashk_res = {
  47. .name = "Crash kernel",
  48. .start = 0,
  49. .end = 0,
  50. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  51. };
  52. int kexec_should_crash(struct task_struct *p)
  53. {
  54. if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  55. return 1;
  56. return 0;
  57. }
  58. /*
  59. * When kexec transitions to the new kernel there is a one-to-one
  60. * mapping between physical and virtual addresses. On processors
  61. * where you can disable the MMU this is trivial, and easy. For
  62. * others it is still a simple predictable page table to setup.
  63. *
  64. * In that environment kexec copies the new kernel to its final
  65. * resting place. This means I can only support memory whose
  66. * physical address can fit in an unsigned long. In particular
  67. * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
  68. * If the assembly stub has more restrictive requirements
  69. * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
  70. * defined more restrictively in <asm/kexec.h>.
  71. *
  72. * The code for the transition from the current kernel to the
  73. * the new kernel is placed in the control_code_buffer, whose size
  74. * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
  75. * page of memory is necessary, but some architectures require more.
  76. * Because this memory must be identity mapped in the transition from
  77. * virtual to physical addresses it must live in the range
  78. * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
  79. * modifiable.
  80. *
  81. * The assembly stub in the control code buffer is passed a linked list
  82. * of descriptor pages detailing the source pages of the new kernel,
  83. * and the destination addresses of those source pages. As this data
  84. * structure is not used in the context of the current OS, it must
  85. * be self-contained.
  86. *
  87. * The code has been made to work with highmem pages and will use a
  88. * destination page in its final resting place (if it happens
  89. * to allocate it). The end product of this is that most of the
  90. * physical address space, and most of RAM can be used.
  91. *
  92. * Future directions include:
  93. * - allocating a page table with the control code buffer identity
  94. * mapped, to simplify machine_kexec and make kexec_on_panic more
  95. * reliable.
  96. */
  97. /*
  98. * KIMAGE_NO_DEST is an impossible destination address..., for
  99. * allocating pages whose destination address we do not care about.
  100. */
  101. #define KIMAGE_NO_DEST (-1UL)
  102. static int kimage_is_destination_range(struct kimage *image,
  103. unsigned long start, unsigned long end);
  104. static struct page *kimage_alloc_page(struct kimage *image,
  105. gfp_t gfp_mask,
  106. unsigned long dest);
  107. static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
  108. unsigned long nr_segments,
  109. struct kexec_segment __user *segments)
  110. {
  111. size_t segment_bytes;
  112. struct kimage *image;
  113. unsigned long i;
  114. int result;
  115. /* Allocate a controlling structure */
  116. result = -ENOMEM;
  117. image = kzalloc(sizeof(*image), GFP_KERNEL);
  118. if (!image)
  119. goto out;
  120. image->head = 0;
  121. image->entry = &image->head;
  122. image->last_entry = &image->head;
  123. image->control_page = ~0; /* By default this does not apply */
  124. image->start = entry;
  125. image->type = KEXEC_TYPE_DEFAULT;
  126. /* Initialize the list of control pages */
  127. INIT_LIST_HEAD(&image->control_pages);
  128. /* Initialize the list of destination pages */
  129. INIT_LIST_HEAD(&image->dest_pages);
  130. /* Initialize the list of unuseable pages */
  131. INIT_LIST_HEAD(&image->unuseable_pages);
  132. /* Read in the segments */
  133. image->nr_segments = nr_segments;
  134. segment_bytes = nr_segments * sizeof(*segments);
  135. result = copy_from_user(image->segment, segments, segment_bytes);
  136. if (result)
  137. goto out;
  138. /*
  139. * Verify we have good destination addresses. The caller is
  140. * responsible for making certain we don't attempt to load
  141. * the new image into invalid or reserved areas of RAM. This
  142. * just verifies it is an address we can use.
  143. *
  144. * Since the kernel does everything in page size chunks ensure
  145. * the destination addreses are page aligned. Too many
  146. * special cases crop of when we don't do this. The most
  147. * insidious is getting overlapping destination addresses
  148. * simply because addresses are changed to page size
  149. * granularity.
  150. */
  151. result = -EADDRNOTAVAIL;
  152. for (i = 0; i < nr_segments; i++) {
  153. unsigned long mstart, mend;
  154. mstart = image->segment[i].mem;
  155. mend = mstart + image->segment[i].memsz;
  156. if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
  157. goto out;
  158. if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
  159. goto out;
  160. }
  161. /* Verify our destination addresses do not overlap.
  162. * If we alloed overlapping destination addresses
  163. * through very weird things can happen with no
  164. * easy explanation as one segment stops on another.
  165. */
  166. result = -EINVAL;
  167. for (i = 0; i < nr_segments; i++) {
  168. unsigned long mstart, mend;
  169. unsigned long j;
  170. mstart = image->segment[i].mem;
  171. mend = mstart + image->segment[i].memsz;
  172. for (j = 0; j < i; j++) {
  173. unsigned long pstart, pend;
  174. pstart = image->segment[j].mem;
  175. pend = pstart + image->segment[j].memsz;
  176. /* Do the segments overlap ? */
  177. if ((mend > pstart) && (mstart < pend))
  178. goto out;
  179. }
  180. }
  181. /* Ensure our buffer sizes are strictly less than
  182. * our memory sizes. This should always be the case,
  183. * and it is easier to check up front than to be surprised
  184. * later on.
  185. */
  186. result = -EINVAL;
  187. for (i = 0; i < nr_segments; i++) {
  188. if (image->segment[i].bufsz > image->segment[i].memsz)
  189. goto out;
  190. }
  191. result = 0;
  192. out:
  193. if (result == 0)
  194. *rimage = image;
  195. else
  196. kfree(image);
  197. return result;
  198. }
  199. static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
  200. unsigned long nr_segments,
  201. struct kexec_segment __user *segments)
  202. {
  203. int result;
  204. struct kimage *image;
  205. /* Allocate and initialize a controlling structure */
  206. image = NULL;
  207. result = do_kimage_alloc(&image, entry, nr_segments, segments);
  208. if (result)
  209. goto out;
  210. *rimage = image;
  211. /*
  212. * Find a location for the control code buffer, and add it
  213. * the vector of segments so that it's pages will also be
  214. * counted as destination pages.
  215. */
  216. result = -ENOMEM;
  217. image->control_code_page = kimage_alloc_control_pages(image,
  218. get_order(KEXEC_CONTROL_PAGE_SIZE));
  219. if (!image->control_code_page) {
  220. printk(KERN_ERR "Could not allocate control_code_buffer\n");
  221. goto out;
  222. }
  223. image->swap_page = kimage_alloc_control_pages(image, 0);
  224. if (!image->swap_page) {
  225. printk(KERN_ERR "Could not allocate swap buffer\n");
  226. goto out;
  227. }
  228. result = 0;
  229. out:
  230. if (result == 0)
  231. *rimage = image;
  232. else
  233. kfree(image);
  234. return result;
  235. }
  236. static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
  237. unsigned long nr_segments,
  238. struct kexec_segment __user *segments)
  239. {
  240. int result;
  241. struct kimage *image;
  242. unsigned long i;
  243. image = NULL;
  244. /* Verify we have a valid entry point */
  245. if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
  246. result = -EADDRNOTAVAIL;
  247. goto out;
  248. }
  249. /* Allocate and initialize a controlling structure */
  250. result = do_kimage_alloc(&image, entry, nr_segments, segments);
  251. if (result)
  252. goto out;
  253. /* Enable the special crash kernel control page
  254. * allocation policy.
  255. */
  256. image->control_page = crashk_res.start;
  257. image->type = KEXEC_TYPE_CRASH;
  258. /*
  259. * Verify we have good destination addresses. Normally
  260. * the caller is responsible for making certain we don't
  261. * attempt to load the new image into invalid or reserved
  262. * areas of RAM. But crash kernels are preloaded into a
  263. * reserved area of ram. We must ensure the addresses
  264. * are in the reserved area otherwise preloading the
  265. * kernel could corrupt things.
  266. */
  267. result = -EADDRNOTAVAIL;
  268. for (i = 0; i < nr_segments; i++) {
  269. unsigned long mstart, mend;
  270. mstart = image->segment[i].mem;
  271. mend = mstart + image->segment[i].memsz - 1;
  272. /* Ensure we are within the crash kernel limits */
  273. if ((mstart < crashk_res.start) || (mend > crashk_res.end))
  274. goto out;
  275. }
  276. /*
  277. * Find a location for the control code buffer, and add
  278. * the vector of segments so that it's pages will also be
  279. * counted as destination pages.
  280. */
  281. result = -ENOMEM;
  282. image->control_code_page = kimage_alloc_control_pages(image,
  283. get_order(KEXEC_CONTROL_PAGE_SIZE));
  284. if (!image->control_code_page) {
  285. printk(KERN_ERR "Could not allocate control_code_buffer\n");
  286. goto out;
  287. }
  288. result = 0;
  289. out:
  290. if (result == 0)
  291. *rimage = image;
  292. else
  293. kfree(image);
  294. return result;
  295. }
  296. static int kimage_is_destination_range(struct kimage *image,
  297. unsigned long start,
  298. unsigned long end)
  299. {
  300. unsigned long i;
  301. for (i = 0; i < image->nr_segments; i++) {
  302. unsigned long mstart, mend;
  303. mstart = image->segment[i].mem;
  304. mend = mstart + image->segment[i].memsz;
  305. if ((end > mstart) && (start < mend))
  306. return 1;
  307. }
  308. return 0;
  309. }
  310. static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
  311. {
  312. struct page *pages;
  313. pages = alloc_pages(gfp_mask, order);
  314. if (pages) {
  315. unsigned int count, i;
  316. pages->mapping = NULL;
  317. set_page_private(pages, order);
  318. count = 1 << order;
  319. for (i = 0; i < count; i++)
  320. SetPageReserved(pages + i);
  321. }
  322. return pages;
  323. }
  324. static void kimage_free_pages(struct page *page)
  325. {
  326. unsigned int order, count, i;
  327. order = page_private(page);
  328. count = 1 << order;
  329. for (i = 0; i < count; i++)
  330. ClearPageReserved(page + i);
  331. __free_pages(page, order);
  332. }
  333. static void kimage_free_page_list(struct list_head *list)
  334. {
  335. struct list_head *pos, *next;
  336. list_for_each_safe(pos, next, list) {
  337. struct page *page;
  338. page = list_entry(pos, struct page, lru);
  339. list_del(&page->lru);
  340. kimage_free_pages(page);
  341. }
  342. }
  343. static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
  344. unsigned int order)
  345. {
  346. /* Control pages are special, they are the intermediaries
  347. * that are needed while we copy the rest of the pages
  348. * to their final resting place. As such they must
  349. * not conflict with either the destination addresses
  350. * or memory the kernel is already using.
  351. *
  352. * The only case where we really need more than one of
  353. * these are for architectures where we cannot disable
  354. * the MMU and must instead generate an identity mapped
  355. * page table for all of the memory.
  356. *
  357. * At worst this runs in O(N) of the image size.
  358. */
  359. struct list_head extra_pages;
  360. struct page *pages;
  361. unsigned int count;
  362. count = 1 << order;
  363. INIT_LIST_HEAD(&extra_pages);
  364. /* Loop while I can allocate a page and the page allocated
  365. * is a destination page.
  366. */
  367. do {
  368. unsigned long pfn, epfn, addr, eaddr;
  369. pages = kimage_alloc_pages(GFP_KERNEL, order);
  370. if (!pages)
  371. break;
  372. pfn = page_to_pfn(pages);
  373. epfn = pfn + count;
  374. addr = pfn << PAGE_SHIFT;
  375. eaddr = epfn << PAGE_SHIFT;
  376. if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
  377. kimage_is_destination_range(image, addr, eaddr)) {
  378. list_add(&pages->lru, &extra_pages);
  379. pages = NULL;
  380. }
  381. } while (!pages);
  382. if (pages) {
  383. /* Remember the allocated page... */
  384. list_add(&pages->lru, &image->control_pages);
  385. /* Because the page is already in it's destination
  386. * location we will never allocate another page at
  387. * that address. Therefore kimage_alloc_pages
  388. * will not return it (again) and we don't need
  389. * to give it an entry in image->segment[].
  390. */
  391. }
  392. /* Deal with the destination pages I have inadvertently allocated.
  393. *
  394. * Ideally I would convert multi-page allocations into single
  395. * page allocations, and add everyting to image->dest_pages.
  396. *
  397. * For now it is simpler to just free the pages.
  398. */
  399. kimage_free_page_list(&extra_pages);
  400. return pages;
  401. }
  402. static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
  403. unsigned int order)
  404. {
  405. /* Control pages are special, they are the intermediaries
  406. * that are needed while we copy the rest of the pages
  407. * to their final resting place. As such they must
  408. * not conflict with either the destination addresses
  409. * or memory the kernel is already using.
  410. *
  411. * Control pages are also the only pags we must allocate
  412. * when loading a crash kernel. All of the other pages
  413. * are specified by the segments and we just memcpy
  414. * into them directly.
  415. *
  416. * The only case where we really need more than one of
  417. * these are for architectures where we cannot disable
  418. * the MMU and must instead generate an identity mapped
  419. * page table for all of the memory.
  420. *
  421. * Given the low demand this implements a very simple
  422. * allocator that finds the first hole of the appropriate
  423. * size in the reserved memory region, and allocates all
  424. * of the memory up to and including the hole.
  425. */
  426. unsigned long hole_start, hole_end, size;
  427. struct page *pages;
  428. pages = NULL;
  429. size = (1 << order) << PAGE_SHIFT;
  430. hole_start = (image->control_page + (size - 1)) & ~(size - 1);
  431. hole_end = hole_start + size - 1;
  432. while (hole_end <= crashk_res.end) {
  433. unsigned long i;
  434. if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
  435. break;
  436. if (hole_end > crashk_res.end)
  437. break;
  438. /* See if I overlap any of the segments */
  439. for (i = 0; i < image->nr_segments; i++) {
  440. unsigned long mstart, mend;
  441. mstart = image->segment[i].mem;
  442. mend = mstart + image->segment[i].memsz - 1;
  443. if ((hole_end >= mstart) && (hole_start <= mend)) {
  444. /* Advance the hole to the end of the segment */
  445. hole_start = (mend + (size - 1)) & ~(size - 1);
  446. hole_end = hole_start + size - 1;
  447. break;
  448. }
  449. }
  450. /* If I don't overlap any segments I have found my hole! */
  451. if (i == image->nr_segments) {
  452. pages = pfn_to_page(hole_start >> PAGE_SHIFT);
  453. break;
  454. }
  455. }
  456. if (pages)
  457. image->control_page = hole_end;
  458. return pages;
  459. }
  460. struct page *kimage_alloc_control_pages(struct kimage *image,
  461. unsigned int order)
  462. {
  463. struct page *pages = NULL;
  464. switch (image->type) {
  465. case KEXEC_TYPE_DEFAULT:
  466. pages = kimage_alloc_normal_control_pages(image, order);
  467. break;
  468. case KEXEC_TYPE_CRASH:
  469. pages = kimage_alloc_crash_control_pages(image, order);
  470. break;
  471. }
  472. return pages;
  473. }
  474. static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
  475. {
  476. if (*image->entry != 0)
  477. image->entry++;
  478. if (image->entry == image->last_entry) {
  479. kimage_entry_t *ind_page;
  480. struct page *page;
  481. page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
  482. if (!page)
  483. return -ENOMEM;
  484. ind_page = page_address(page);
  485. *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
  486. image->entry = ind_page;
  487. image->last_entry = ind_page +
  488. ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
  489. }
  490. *image->entry = entry;
  491. image->entry++;
  492. *image->entry = 0;
  493. return 0;
  494. }
  495. static int kimage_set_destination(struct kimage *image,
  496. unsigned long destination)
  497. {
  498. int result;
  499. destination &= PAGE_MASK;
  500. result = kimage_add_entry(image, destination | IND_DESTINATION);
  501. if (result == 0)
  502. image->destination = destination;
  503. return result;
  504. }
  505. static int kimage_add_page(struct kimage *image, unsigned long page)
  506. {
  507. int result;
  508. page &= PAGE_MASK;
  509. result = kimage_add_entry(image, page | IND_SOURCE);
  510. if (result == 0)
  511. image->destination += PAGE_SIZE;
  512. return result;
  513. }
  514. static void kimage_free_extra_pages(struct kimage *image)
  515. {
  516. /* Walk through and free any extra destination pages I may have */
  517. kimage_free_page_list(&image->dest_pages);
  518. /* Walk through and free any unuseable pages I have cached */
  519. kimage_free_page_list(&image->unuseable_pages);
  520. }
  521. static void kimage_terminate(struct kimage *image)
  522. {
  523. if (*image->entry != 0)
  524. image->entry++;
  525. *image->entry = IND_DONE;
  526. }
  527. #define for_each_kimage_entry(image, ptr, entry) \
  528. for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
  529. ptr = (entry & IND_INDIRECTION)? \
  530. phys_to_virt((entry & PAGE_MASK)): ptr +1)
  531. static void kimage_free_entry(kimage_entry_t entry)
  532. {
  533. struct page *page;
  534. page = pfn_to_page(entry >> PAGE_SHIFT);
  535. kimage_free_pages(page);
  536. }
  537. static void kimage_free(struct kimage *image)
  538. {
  539. kimage_entry_t *ptr, entry;
  540. kimage_entry_t ind = 0;
  541. if (!image)
  542. return;
  543. kimage_free_extra_pages(image);
  544. for_each_kimage_entry(image, ptr, entry) {
  545. if (entry & IND_INDIRECTION) {
  546. /* Free the previous indirection page */
  547. if (ind & IND_INDIRECTION)
  548. kimage_free_entry(ind);
  549. /* Save this indirection page until we are
  550. * done with it.
  551. */
  552. ind = entry;
  553. }
  554. else if (entry & IND_SOURCE)
  555. kimage_free_entry(entry);
  556. }
  557. /* Free the final indirection page */
  558. if (ind & IND_INDIRECTION)
  559. kimage_free_entry(ind);
  560. /* Handle any machine specific cleanup */
  561. machine_kexec_cleanup(image);
  562. /* Free the kexec control pages... */
  563. kimage_free_page_list(&image->control_pages);
  564. kfree(image);
  565. }
  566. static kimage_entry_t *kimage_dst_used(struct kimage *image,
  567. unsigned long page)
  568. {
  569. kimage_entry_t *ptr, entry;
  570. unsigned long destination = 0;
  571. for_each_kimage_entry(image, ptr, entry) {
  572. if (entry & IND_DESTINATION)
  573. destination = entry & PAGE_MASK;
  574. else if (entry & IND_SOURCE) {
  575. if (page == destination)
  576. return ptr;
  577. destination += PAGE_SIZE;
  578. }
  579. }
  580. return NULL;
  581. }
  582. static struct page *kimage_alloc_page(struct kimage *image,
  583. gfp_t gfp_mask,
  584. unsigned long destination)
  585. {
  586. /*
  587. * Here we implement safeguards to ensure that a source page
  588. * is not copied to its destination page before the data on
  589. * the destination page is no longer useful.
  590. *
  591. * To do this we maintain the invariant that a source page is
  592. * either its own destination page, or it is not a
  593. * destination page at all.
  594. *
  595. * That is slightly stronger than required, but the proof
  596. * that no problems will not occur is trivial, and the
  597. * implementation is simply to verify.
  598. *
  599. * When allocating all pages normally this algorithm will run
  600. * in O(N) time, but in the worst case it will run in O(N^2)
  601. * time. If the runtime is a problem the data structures can
  602. * be fixed.
  603. */
  604. struct page *page;
  605. unsigned long addr;
  606. /*
  607. * Walk through the list of destination pages, and see if I
  608. * have a match.
  609. */
  610. list_for_each_entry(page, &image->dest_pages, lru) {
  611. addr = page_to_pfn(page) << PAGE_SHIFT;
  612. if (addr == destination) {
  613. list_del(&page->lru);
  614. return page;
  615. }
  616. }
  617. page = NULL;
  618. while (1) {
  619. kimage_entry_t *old;
  620. /* Allocate a page, if we run out of memory give up */
  621. page = kimage_alloc_pages(gfp_mask, 0);
  622. if (!page)
  623. return NULL;
  624. /* If the page cannot be used file it away */
  625. if (page_to_pfn(page) >
  626. (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
  627. list_add(&page->lru, &image->unuseable_pages);
  628. continue;
  629. }
  630. addr = page_to_pfn(page) << PAGE_SHIFT;
  631. /* If it is the destination page we want use it */
  632. if (addr == destination)
  633. break;
  634. /* If the page is not a destination page use it */
  635. if (!kimage_is_destination_range(image, addr,
  636. addr + PAGE_SIZE))
  637. break;
  638. /*
  639. * I know that the page is someones destination page.
  640. * See if there is already a source page for this
  641. * destination page. And if so swap the source pages.
  642. */
  643. old = kimage_dst_used(image, addr);
  644. if (old) {
  645. /* If so move it */
  646. unsigned long old_addr;
  647. struct page *old_page;
  648. old_addr = *old & PAGE_MASK;
  649. old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
  650. copy_highpage(page, old_page);
  651. *old = addr | (*old & ~PAGE_MASK);
  652. /* The old page I have found cannot be a
  653. * destination page, so return it if it's
  654. * gfp_flags honor the ones passed in.
  655. */
  656. if (!(gfp_mask & __GFP_HIGHMEM) &&
  657. PageHighMem(old_page)) {
  658. kimage_free_pages(old_page);
  659. continue;
  660. }
  661. addr = old_addr;
  662. page = old_page;
  663. break;
  664. }
  665. else {
  666. /* Place the page on the destination list I
  667. * will use it later.
  668. */
  669. list_add(&page->lru, &image->dest_pages);
  670. }
  671. }
  672. return page;
  673. }
  674. static int kimage_load_normal_segment(struct kimage *image,
  675. struct kexec_segment *segment)
  676. {
  677. unsigned long maddr;
  678. unsigned long ubytes, mbytes;
  679. int result;
  680. unsigned char __user *buf;
  681. result = 0;
  682. buf = segment->buf;
  683. ubytes = segment->bufsz;
  684. mbytes = segment->memsz;
  685. maddr = segment->mem;
  686. result = kimage_set_destination(image, maddr);
  687. if (result < 0)
  688. goto out;
  689. while (mbytes) {
  690. struct page *page;
  691. char *ptr;
  692. size_t uchunk, mchunk;
  693. page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
  694. if (!page) {
  695. result = -ENOMEM;
  696. goto out;
  697. }
  698. result = kimage_add_page(image, page_to_pfn(page)
  699. << PAGE_SHIFT);
  700. if (result < 0)
  701. goto out;
  702. ptr = kmap(page);
  703. /* Start with a clear page */
  704. memset(ptr, 0, PAGE_SIZE);
  705. ptr += maddr & ~PAGE_MASK;
  706. mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
  707. if (mchunk > mbytes)
  708. mchunk = mbytes;
  709. uchunk = mchunk;
  710. if (uchunk > ubytes)
  711. uchunk = ubytes;
  712. result = copy_from_user(ptr, buf, uchunk);
  713. kunmap(page);
  714. if (result) {
  715. result = (result < 0) ? result : -EIO;
  716. goto out;
  717. }
  718. ubytes -= uchunk;
  719. maddr += mchunk;
  720. buf += mchunk;
  721. mbytes -= mchunk;
  722. }
  723. out:
  724. return result;
  725. }
  726. static int kimage_load_crash_segment(struct kimage *image,
  727. struct kexec_segment *segment)
  728. {
  729. /* For crash dumps kernels we simply copy the data from
  730. * user space to it's destination.
  731. * We do things a page at a time for the sake of kmap.
  732. */
  733. unsigned long maddr;
  734. unsigned long ubytes, mbytes;
  735. int result;
  736. unsigned char __user *buf;
  737. result = 0;
  738. buf = segment->buf;
  739. ubytes = segment->bufsz;
  740. mbytes = segment->memsz;
  741. maddr = segment->mem;
  742. while (mbytes) {
  743. struct page *page;
  744. char *ptr;
  745. size_t uchunk, mchunk;
  746. page = pfn_to_page(maddr >> PAGE_SHIFT);
  747. if (!page) {
  748. result = -ENOMEM;
  749. goto out;
  750. }
  751. ptr = kmap(page);
  752. ptr += maddr & ~PAGE_MASK;
  753. mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
  754. if (mchunk > mbytes)
  755. mchunk = mbytes;
  756. uchunk = mchunk;
  757. if (uchunk > ubytes) {
  758. uchunk = ubytes;
  759. /* Zero the trailing part of the page */
  760. memset(ptr + uchunk, 0, mchunk - uchunk);
  761. }
  762. result = copy_from_user(ptr, buf, uchunk);
  763. kexec_flush_icache_page(page);
  764. kunmap(page);
  765. if (result) {
  766. result = (result < 0) ? result : -EIO;
  767. goto out;
  768. }
  769. ubytes -= uchunk;
  770. maddr += mchunk;
  771. buf += mchunk;
  772. mbytes -= mchunk;
  773. }
  774. out:
  775. return result;
  776. }
  777. static int kimage_load_segment(struct kimage *image,
  778. struct kexec_segment *segment)
  779. {
  780. int result = -ENOMEM;
  781. switch (image->type) {
  782. case KEXEC_TYPE_DEFAULT:
  783. result = kimage_load_normal_segment(image, segment);
  784. break;
  785. case KEXEC_TYPE_CRASH:
  786. result = kimage_load_crash_segment(image, segment);
  787. break;
  788. }
  789. return result;
  790. }
  791. /*
  792. * Exec Kernel system call: for obvious reasons only root may call it.
  793. *
  794. * This call breaks up into three pieces.
  795. * - A generic part which loads the new kernel from the current
  796. * address space, and very carefully places the data in the
  797. * allocated pages.
  798. *
  799. * - A generic part that interacts with the kernel and tells all of
  800. * the devices to shut down. Preventing on-going dmas, and placing
  801. * the devices in a consistent state so a later kernel can
  802. * reinitialize them.
  803. *
  804. * - A machine specific part that includes the syscall number
  805. * and the copies the image to it's final destination. And
  806. * jumps into the image at entry.
  807. *
  808. * kexec does not sync, or unmount filesystems so if you need
  809. * that to happen you need to do that yourself.
  810. */
  811. struct kimage *kexec_image;
  812. struct kimage *kexec_crash_image;
  813. static DEFINE_MUTEX(kexec_mutex);
  814. SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
  815. struct kexec_segment __user *, segments, unsigned long, flags)
  816. {
  817. struct kimage **dest_image, *image;
  818. int result;
  819. /* We only trust the superuser with rebooting the system. */
  820. if (!capable(CAP_SYS_BOOT))
  821. return -EPERM;
  822. /*
  823. * Verify we have a legal set of flags
  824. * This leaves us room for future extensions.
  825. */
  826. if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
  827. return -EINVAL;
  828. /* Verify we are on the appropriate architecture */
  829. if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
  830. ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
  831. return -EINVAL;
  832. /* Put an artificial cap on the number
  833. * of segments passed to kexec_load.
  834. */
  835. if (nr_segments > KEXEC_SEGMENT_MAX)
  836. return -EINVAL;
  837. image = NULL;
  838. result = 0;
  839. /* Because we write directly to the reserved memory
  840. * region when loading crash kernels we need a mutex here to
  841. * prevent multiple crash kernels from attempting to load
  842. * simultaneously, and to prevent a crash kernel from loading
  843. * over the top of a in use crash kernel.
  844. *
  845. * KISS: always take the mutex.
  846. */
  847. if (!mutex_trylock(&kexec_mutex))
  848. return -EBUSY;
  849. dest_image = &kexec_image;
  850. if (flags & KEXEC_ON_CRASH)
  851. dest_image = &kexec_crash_image;
  852. if (nr_segments > 0) {
  853. unsigned long i;
  854. /* Loading another kernel to reboot into */
  855. if ((flags & KEXEC_ON_CRASH) == 0)
  856. result = kimage_normal_alloc(&image, entry,
  857. nr_segments, segments);
  858. /* Loading another kernel to switch to if this one crashes */
  859. else if (flags & KEXEC_ON_CRASH) {
  860. /* Free any current crash dump kernel before
  861. * we corrupt it.
  862. */
  863. kimage_free(xchg(&kexec_crash_image, NULL));
  864. result = kimage_crash_alloc(&image, entry,
  865. nr_segments, segments);
  866. }
  867. if (result)
  868. goto out;
  869. if (flags & KEXEC_PRESERVE_CONTEXT)
  870. image->preserve_context = 1;
  871. result = machine_kexec_prepare(image);
  872. if (result)
  873. goto out;
  874. for (i = 0; i < nr_segments; i++) {
  875. result = kimage_load_segment(image, &image->segment[i]);
  876. if (result)
  877. goto out;
  878. }
  879. kimage_terminate(image);
  880. }
  881. /* Install the new kernel, and Uninstall the old */
  882. image = xchg(dest_image, image);
  883. out:
  884. mutex_unlock(&kexec_mutex);
  885. kimage_free(image);
  886. return result;
  887. }
  888. #ifdef CONFIG_COMPAT
  889. asmlinkage long compat_sys_kexec_load(unsigned long entry,
  890. unsigned long nr_segments,
  891. struct compat_kexec_segment __user *segments,
  892. unsigned long flags)
  893. {
  894. struct compat_kexec_segment in;
  895. struct kexec_segment out, __user *ksegments;
  896. unsigned long i, result;
  897. /* Don't allow clients that don't understand the native
  898. * architecture to do anything.
  899. */
  900. if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
  901. return -EINVAL;
  902. if (nr_segments > KEXEC_SEGMENT_MAX)
  903. return -EINVAL;
  904. ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
  905. for (i=0; i < nr_segments; i++) {
  906. result = copy_from_user(&in, &segments[i], sizeof(in));
  907. if (result)
  908. return -EFAULT;
  909. out.buf = compat_ptr(in.buf);
  910. out.bufsz = in.bufsz;
  911. out.mem = in.mem;
  912. out.memsz = in.memsz;
  913. result = copy_to_user(&ksegments[i], &out, sizeof(out));
  914. if (result)
  915. return -EFAULT;
  916. }
  917. return sys_kexec_load(entry, nr_segments, ksegments, flags);
  918. }
  919. #endif
  920. void crash_kexec(struct pt_regs *regs)
  921. {
  922. /* Take the kexec_mutex here to prevent sys_kexec_load
  923. * running on one cpu from replacing the crash kernel
  924. * we are using after a panic on a different cpu.
  925. *
  926. * If the crash kernel was not located in a fixed area
  927. * of memory the xchg(&kexec_crash_image) would be
  928. * sufficient. But since I reuse the memory...
  929. */
  930. if (mutex_trylock(&kexec_mutex)) {
  931. if (kexec_crash_image) {
  932. struct pt_regs fixed_regs;
  933. crash_setup_regs(&fixed_regs, regs);
  934. crash_save_vmcoreinfo();
  935. machine_crash_shutdown(&fixed_regs);
  936. machine_kexec(kexec_crash_image);
  937. }
  938. mutex_unlock(&kexec_mutex);
  939. }
  940. }
  941. static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
  942. size_t data_len)
  943. {
  944. struct elf_note note;
  945. note.n_namesz = strlen(name) + 1;
  946. note.n_descsz = data_len;
  947. note.n_type = type;
  948. memcpy(buf, &note, sizeof(note));
  949. buf += (sizeof(note) + 3)/4;
  950. memcpy(buf, name, note.n_namesz);
  951. buf += (note.n_namesz + 3)/4;
  952. memcpy(buf, data, note.n_descsz);
  953. buf += (note.n_descsz + 3)/4;
  954. return buf;
  955. }
  956. static void final_note(u32 *buf)
  957. {
  958. struct elf_note note;
  959. note.n_namesz = 0;
  960. note.n_descsz = 0;
  961. note.n_type = 0;
  962. memcpy(buf, &note, sizeof(note));
  963. }
  964. void crash_save_cpu(struct pt_regs *regs, int cpu)
  965. {
  966. struct elf_prstatus prstatus;
  967. u32 *buf;
  968. if ((cpu < 0) || (cpu >= nr_cpu_ids))
  969. return;
  970. /* Using ELF notes here is opportunistic.
  971. * I need a well defined structure format
  972. * for the data I pass, and I need tags
  973. * on the data to indicate what information I have
  974. * squirrelled away. ELF notes happen to provide
  975. * all of that, so there is no need to invent something new.
  976. */
  977. buf = (u32*)per_cpu_ptr(crash_notes, cpu);
  978. if (!buf)
  979. return;
  980. memset(&prstatus, 0, sizeof(prstatus));
  981. prstatus.pr_pid = current->pid;
  982. elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
  983. buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
  984. &prstatus, sizeof(prstatus));
  985. final_note(buf);
  986. }
  987. static int __init crash_notes_memory_init(void)
  988. {
  989. /* Allocate memory for saving cpu registers. */
  990. crash_notes = alloc_percpu(note_buf_t);
  991. if (!crash_notes) {
  992. printk("Kexec: Memory allocation for saving cpu register"
  993. " states failed\n");
  994. return -ENOMEM;
  995. }
  996. return 0;
  997. }
  998. module_init(crash_notes_memory_init)
  999. /*
  1000. * parsing the "crashkernel" commandline
  1001. *
  1002. * this code is intended to be called from architecture specific code
  1003. */
  1004. /*
  1005. * This function parses command lines in the format
  1006. *
  1007. * crashkernel=ramsize-range:size[,...][@offset]
  1008. *
  1009. * The function returns 0 on success and -EINVAL on failure.
  1010. */
  1011. static int __init parse_crashkernel_mem(char *cmdline,
  1012. unsigned long long system_ram,
  1013. unsigned long long *crash_size,
  1014. unsigned long long *crash_base)
  1015. {
  1016. char *cur = cmdline, *tmp;
  1017. /* for each entry of the comma-separated list */
  1018. do {
  1019. unsigned long long start, end = ULLONG_MAX, size;
  1020. /* get the start of the range */
  1021. start = memparse(cur, &tmp);
  1022. if (cur == tmp) {
  1023. pr_warning("crashkernel: Memory value expected\n");
  1024. return -EINVAL;
  1025. }
  1026. cur = tmp;
  1027. if (*cur != '-') {
  1028. pr_warning("crashkernel: '-' expected\n");
  1029. return -EINVAL;
  1030. }
  1031. cur++;
  1032. /* if no ':' is here, than we read the end */
  1033. if (*cur != ':') {
  1034. end = memparse(cur, &tmp);
  1035. if (cur == tmp) {
  1036. pr_warning("crashkernel: Memory "
  1037. "value expected\n");
  1038. return -EINVAL;
  1039. }
  1040. cur = tmp;
  1041. if (end <= start) {
  1042. pr_warning("crashkernel: end <= start\n");
  1043. return -EINVAL;
  1044. }
  1045. }
  1046. if (*cur != ':') {
  1047. pr_warning("crashkernel: ':' expected\n");
  1048. return -EINVAL;
  1049. }
  1050. cur++;
  1051. size = memparse(cur, &tmp);
  1052. if (cur == tmp) {
  1053. pr_warning("Memory value expected\n");
  1054. return -EINVAL;
  1055. }
  1056. cur = tmp;
  1057. if (size >= system_ram) {
  1058. pr_warning("crashkernel: invalid size\n");
  1059. return -EINVAL;
  1060. }
  1061. /* match ? */
  1062. if (system_ram >= start && system_ram < end) {
  1063. *crash_size = size;
  1064. break;
  1065. }
  1066. } while (*cur++ == ',');
  1067. if (*crash_size > 0) {
  1068. while (*cur != ' ' && *cur != '@')
  1069. cur++;
  1070. if (*cur == '@') {
  1071. cur++;
  1072. *crash_base = memparse(cur, &tmp);
  1073. if (cur == tmp) {
  1074. pr_warning("Memory value expected "
  1075. "after '@'\n");
  1076. return -EINVAL;
  1077. }
  1078. }
  1079. }
  1080. return 0;
  1081. }
  1082. /*
  1083. * That function parses "simple" (old) crashkernel command lines like
  1084. *
  1085. * crashkernel=size[@offset]
  1086. *
  1087. * It returns 0 on success and -EINVAL on failure.
  1088. */
  1089. static int __init parse_crashkernel_simple(char *cmdline,
  1090. unsigned long long *crash_size,
  1091. unsigned long long *crash_base)
  1092. {
  1093. char *cur = cmdline;
  1094. *crash_size = memparse(cmdline, &cur);
  1095. if (cmdline == cur) {
  1096. pr_warning("crashkernel: memory value expected\n");
  1097. return -EINVAL;
  1098. }
  1099. if (*cur == '@')
  1100. *crash_base = memparse(cur+1, &cur);
  1101. return 0;
  1102. }
  1103. /*
  1104. * That function is the entry point for command line parsing and should be
  1105. * called from the arch-specific code.
  1106. */
  1107. int __init parse_crashkernel(char *cmdline,
  1108. unsigned long long system_ram,
  1109. unsigned long long *crash_size,
  1110. unsigned long long *crash_base)
  1111. {
  1112. char *p = cmdline, *ck_cmdline = NULL;
  1113. char *first_colon, *first_space;
  1114. BUG_ON(!crash_size || !crash_base);
  1115. *crash_size = 0;
  1116. *crash_base = 0;
  1117. /* find crashkernel and use the last one if there are more */
  1118. p = strstr(p, "crashkernel=");
  1119. while (p) {
  1120. ck_cmdline = p;
  1121. p = strstr(p+1, "crashkernel=");
  1122. }
  1123. if (!ck_cmdline)
  1124. return -EINVAL;
  1125. ck_cmdline += 12; /* strlen("crashkernel=") */
  1126. /*
  1127. * if the commandline contains a ':', then that's the extended
  1128. * syntax -- if not, it must be the classic syntax
  1129. */
  1130. first_colon = strchr(ck_cmdline, ':');
  1131. first_space = strchr(ck_cmdline, ' ');
  1132. if (first_colon && (!first_space || first_colon < first_space))
  1133. return parse_crashkernel_mem(ck_cmdline, system_ram,
  1134. crash_size, crash_base);
  1135. else
  1136. return parse_crashkernel_simple(ck_cmdline, crash_size,
  1137. crash_base);
  1138. return 0;
  1139. }
  1140. void crash_save_vmcoreinfo(void)
  1141. {
  1142. u32 *buf;
  1143. if (!vmcoreinfo_size)
  1144. return;
  1145. vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
  1146. buf = (u32 *)vmcoreinfo_note;
  1147. buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
  1148. vmcoreinfo_size);
  1149. final_note(buf);
  1150. }
  1151. void vmcoreinfo_append_str(const char *fmt, ...)
  1152. {
  1153. va_list args;
  1154. char buf[0x50];
  1155. int r;
  1156. va_start(args, fmt);
  1157. r = vsnprintf(buf, sizeof(buf), fmt, args);
  1158. va_end(args);
  1159. if (r + vmcoreinfo_size > vmcoreinfo_max_size)
  1160. r = vmcoreinfo_max_size - vmcoreinfo_size;
  1161. memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
  1162. vmcoreinfo_size += r;
  1163. }
  1164. /*
  1165. * provide an empty default implementation here -- architecture
  1166. * code may override this
  1167. */
  1168. void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
  1169. {}
  1170. unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
  1171. {
  1172. return __pa((unsigned long)(char *)&vmcoreinfo_note);
  1173. }
  1174. static int __init crash_save_vmcoreinfo_init(void)
  1175. {
  1176. VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
  1177. VMCOREINFO_PAGESIZE(PAGE_SIZE);
  1178. VMCOREINFO_SYMBOL(init_uts_ns);
  1179. VMCOREINFO_SYMBOL(node_online_map);
  1180. VMCOREINFO_SYMBOL(swapper_pg_dir);
  1181. VMCOREINFO_SYMBOL(_stext);
  1182. VMCOREINFO_SYMBOL(vmlist);
  1183. #ifndef CONFIG_NEED_MULTIPLE_NODES
  1184. VMCOREINFO_SYMBOL(mem_map);
  1185. VMCOREINFO_SYMBOL(contig_page_data);
  1186. #endif
  1187. #ifdef CONFIG_SPARSEMEM
  1188. VMCOREINFO_SYMBOL(mem_section);
  1189. VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
  1190. VMCOREINFO_STRUCT_SIZE(mem_section);
  1191. VMCOREINFO_OFFSET(mem_section, section_mem_map);
  1192. #endif
  1193. VMCOREINFO_STRUCT_SIZE(page);
  1194. VMCOREINFO_STRUCT_SIZE(pglist_data);
  1195. VMCOREINFO_STRUCT_SIZE(zone);
  1196. VMCOREINFO_STRUCT_SIZE(free_area);
  1197. VMCOREINFO_STRUCT_SIZE(list_head);
  1198. VMCOREINFO_SIZE(nodemask_t);
  1199. VMCOREINFO_OFFSET(page, flags);
  1200. VMCOREINFO_OFFSET(page, _count);
  1201. VMCOREINFO_OFFSET(page, mapping);
  1202. VMCOREINFO_OFFSET(page, lru);
  1203. VMCOREINFO_OFFSET(pglist_data, node_zones);
  1204. VMCOREINFO_OFFSET(pglist_data, nr_zones);
  1205. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  1206. VMCOREINFO_OFFSET(pglist_data, node_mem_map);
  1207. #endif
  1208. VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
  1209. VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
  1210. VMCOREINFO_OFFSET(pglist_data, node_id);
  1211. VMCOREINFO_OFFSET(zone, free_area);
  1212. VMCOREINFO_OFFSET(zone, vm_stat);
  1213. VMCOREINFO_OFFSET(zone, spanned_pages);
  1214. VMCOREINFO_OFFSET(free_area, free_list);
  1215. VMCOREINFO_OFFSET(list_head, next);
  1216. VMCOREINFO_OFFSET(list_head, prev);
  1217. VMCOREINFO_OFFSET(vm_struct, addr);
  1218. VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
  1219. log_buf_kexec_setup();
  1220. VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
  1221. VMCOREINFO_NUMBER(NR_FREE_PAGES);
  1222. VMCOREINFO_NUMBER(PG_lru);
  1223. VMCOREINFO_NUMBER(PG_private);
  1224. VMCOREINFO_NUMBER(PG_swapcache);
  1225. arch_crash_save_vmcoreinfo();
  1226. return 0;
  1227. }
  1228. module_init(crash_save_vmcoreinfo_init)
  1229. /*
  1230. * Move into place and start executing a preloaded standalone
  1231. * executable. If nothing was preloaded return an error.
  1232. */
  1233. int kernel_kexec(void)
  1234. {
  1235. int error = 0;
  1236. if (!mutex_trylock(&kexec_mutex))
  1237. return -EBUSY;
  1238. if (!kexec_image) {
  1239. error = -EINVAL;
  1240. goto Unlock;
  1241. }
  1242. #ifdef CONFIG_KEXEC_JUMP
  1243. if (kexec_image->preserve_context) {
  1244. mutex_lock(&pm_mutex);
  1245. pm_prepare_console();
  1246. error = freeze_processes();
  1247. if (error) {
  1248. error = -EBUSY;
  1249. goto Restore_console;
  1250. }
  1251. suspend_console();
  1252. error = device_suspend(PMSG_FREEZE);
  1253. if (error)
  1254. goto Resume_console;
  1255. device_pm_lock();
  1256. /* At this point, device_suspend() has been called,
  1257. * but *not* device_power_down(). We *must*
  1258. * device_power_down() now. Otherwise, drivers for
  1259. * some devices (e.g. interrupt controllers) become
  1260. * desynchronized with the actual state of the
  1261. * hardware at resume time, and evil weirdness ensues.
  1262. */
  1263. error = device_power_down(PMSG_FREEZE);
  1264. if (error)
  1265. goto Resume_devices;
  1266. error = disable_nonboot_cpus();
  1267. if (error)
  1268. goto Enable_cpus;
  1269. local_irq_disable();
  1270. /* Suspend system devices */
  1271. error = sysdev_suspend(PMSG_FREEZE);
  1272. if (error)
  1273. goto Enable_irqs;
  1274. } else
  1275. #endif
  1276. {
  1277. kernel_restart_prepare(NULL);
  1278. printk(KERN_EMERG "Starting new kernel\n");
  1279. machine_shutdown();
  1280. }
  1281. machine_kexec(kexec_image);
  1282. #ifdef CONFIG_KEXEC_JUMP
  1283. if (kexec_image->preserve_context) {
  1284. sysdev_resume();
  1285. Enable_irqs:
  1286. local_irq_enable();
  1287. Enable_cpus:
  1288. enable_nonboot_cpus();
  1289. device_power_up(PMSG_RESTORE);
  1290. Resume_devices:
  1291. device_pm_unlock();
  1292. device_resume(PMSG_RESTORE);
  1293. Resume_console:
  1294. resume_console();
  1295. thaw_processes();
  1296. Restore_console:
  1297. pm_restore_console();
  1298. mutex_unlock(&pm_mutex);
  1299. }
  1300. #endif
  1301. Unlock:
  1302. mutex_unlock(&kexec_mutex);
  1303. return error;
  1304. }