crash_dump.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * S390 kdump implementation
  3. *
  4. * Copyright IBM Corp. 2011
  5. * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
  6. */
  7. #include <linux/crash_dump.h>
  8. #include <asm/lowcore.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/slab.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/elf.h>
  15. #include <asm/os_info.h>
  16. #include <asm/elf.h>
  17. #include <asm/ipl.h>
  18. #include <asm/sclp.h>
  19. #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
  20. #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
  21. #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
  22. struct dump_save_areas dump_save_areas;
  23. /*
  24. * Allocate and add a save area for a CPU
  25. */
  26. struct save_area *dump_save_area_create(int cpu)
  27. {
  28. struct save_area **save_areas, *save_area;
  29. save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
  30. if (!save_area)
  31. return NULL;
  32. if (cpu + 1 > dump_save_areas.count) {
  33. dump_save_areas.count = cpu + 1;
  34. save_areas = krealloc(dump_save_areas.areas,
  35. dump_save_areas.count * sizeof(void *),
  36. GFP_KERNEL | __GFP_ZERO);
  37. if (!save_areas) {
  38. kfree(save_area);
  39. return NULL;
  40. }
  41. dump_save_areas.areas = save_areas;
  42. }
  43. dump_save_areas.areas[cpu] = save_area;
  44. return save_area;
  45. }
  46. /*
  47. * Return physical address for virtual address
  48. */
  49. static inline void *load_real_addr(void *addr)
  50. {
  51. unsigned long real_addr;
  52. asm volatile(
  53. " lra %0,0(%1)\n"
  54. " jz 0f\n"
  55. " la %0,0\n"
  56. "0:"
  57. : "=a" (real_addr) : "a" (addr) : "cc");
  58. return (void *)real_addr;
  59. }
  60. /*
  61. * Copy real to virtual or real memory
  62. */
  63. static int copy_from_realmem(void *dest, void *src, size_t count)
  64. {
  65. unsigned long size;
  66. if (!count)
  67. return 0;
  68. if (!is_vmalloc_or_module_addr(dest))
  69. return memcpy_real(dest, src, count);
  70. do {
  71. size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
  72. if (memcpy_real(load_real_addr(dest), src, size))
  73. return -EFAULT;
  74. count -= size;
  75. dest += size;
  76. src += size;
  77. } while (count);
  78. return 0;
  79. }
  80. /*
  81. * Pointer to ELF header in new kernel
  82. */
  83. static void *elfcorehdr_newmem;
  84. /*
  85. * Copy one page from zfcpdump "oldmem"
  86. *
  87. * For pages below HSA size memory from the HSA is copied. Otherwise
  88. * real memory copy is used.
  89. */
  90. static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
  91. unsigned long src, int userbuf)
  92. {
  93. int rc;
  94. if (src < sclp_get_hsa_size()) {
  95. rc = memcpy_hsa(buf, src, csize, userbuf);
  96. } else {
  97. if (userbuf)
  98. rc = copy_to_user_real((void __force __user *) buf,
  99. (void *) src, csize);
  100. else
  101. rc = memcpy_real(buf, (void *) src, csize);
  102. }
  103. return rc ? rc : csize;
  104. }
  105. /*
  106. * Copy one page from kdump "oldmem"
  107. *
  108. * For the kdump reserved memory this functions performs a swap operation:
  109. * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
  110. * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  111. */
  112. static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
  113. unsigned long src, int userbuf)
  114. {
  115. int rc;
  116. if (src < OLDMEM_SIZE)
  117. src += OLDMEM_BASE;
  118. else if (src > OLDMEM_BASE &&
  119. src < OLDMEM_BASE + OLDMEM_SIZE)
  120. src -= OLDMEM_BASE;
  121. if (userbuf)
  122. rc = copy_to_user_real((void __force __user *) buf,
  123. (void *) src, csize);
  124. else
  125. rc = copy_from_realmem(buf, (void *) src, csize);
  126. return (rc == 0) ? rc : csize;
  127. }
  128. /*
  129. * Copy one page from "oldmem"
  130. */
  131. ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
  132. unsigned long offset, int userbuf)
  133. {
  134. unsigned long src;
  135. if (!csize)
  136. return 0;
  137. src = (pfn << PAGE_SHIFT) + offset;
  138. if (OLDMEM_BASE)
  139. return copy_oldmem_page_kdump(buf, csize, src, userbuf);
  140. else
  141. return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
  142. }
  143. /*
  144. * Remap "oldmem" for kdump
  145. *
  146. * For the kdump reserved memory this functions performs a swap operation:
  147. * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  148. */
  149. static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
  150. unsigned long from, unsigned long pfn,
  151. unsigned long size, pgprot_t prot)
  152. {
  153. unsigned long size_old;
  154. int rc;
  155. if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
  156. size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
  157. rc = remap_pfn_range(vma, from,
  158. pfn + (OLDMEM_BASE >> PAGE_SHIFT),
  159. size_old, prot);
  160. if (rc || size == size_old)
  161. return rc;
  162. size -= size_old;
  163. from += size_old;
  164. pfn += size_old >> PAGE_SHIFT;
  165. }
  166. return remap_pfn_range(vma, from, pfn, size, prot);
  167. }
  168. /*
  169. * Remap "oldmem" for zfcpdump
  170. *
  171. * We only map available memory above HSA size. Memory below HSA size
  172. * is read on demand using the copy_oldmem_page() function.
  173. */
  174. static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
  175. unsigned long from,
  176. unsigned long pfn,
  177. unsigned long size, pgprot_t prot)
  178. {
  179. unsigned long hsa_end = sclp_get_hsa_size();
  180. unsigned long size_hsa;
  181. if (pfn < hsa_end >> PAGE_SHIFT) {
  182. size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
  183. if (size == size_hsa)
  184. return 0;
  185. size -= size_hsa;
  186. from += size_hsa;
  187. pfn += size_hsa >> PAGE_SHIFT;
  188. }
  189. return remap_pfn_range(vma, from, pfn, size, prot);
  190. }
  191. /*
  192. * Remap "oldmem" for kdump or zfcpdump
  193. */
  194. int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
  195. unsigned long pfn, unsigned long size, pgprot_t prot)
  196. {
  197. if (OLDMEM_BASE)
  198. return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
  199. else
  200. return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
  201. prot);
  202. }
  203. /*
  204. * Copy memory from old kernel
  205. */
  206. int copy_from_oldmem(void *dest, void *src, size_t count)
  207. {
  208. unsigned long copied = 0;
  209. int rc;
  210. if (OLDMEM_BASE) {
  211. if ((unsigned long) src < OLDMEM_SIZE) {
  212. copied = min(count, OLDMEM_SIZE - (unsigned long) src);
  213. rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
  214. if (rc)
  215. return rc;
  216. }
  217. } else {
  218. unsigned long hsa_end = sclp_get_hsa_size();
  219. if ((unsigned long) src < hsa_end) {
  220. copied = min(count, hsa_end - (unsigned long) src);
  221. rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
  222. if (rc)
  223. return rc;
  224. }
  225. }
  226. return copy_from_realmem(dest + copied, src + copied, count - copied);
  227. }
  228. /*
  229. * Alloc memory and panic in case of ENOMEM
  230. */
  231. static void *kzalloc_panic(int len)
  232. {
  233. void *rc;
  234. rc = kzalloc(len, GFP_KERNEL);
  235. if (!rc)
  236. panic("s390 kdump kzalloc (%d) failed", len);
  237. return rc;
  238. }
  239. /*
  240. * Get memory layout and create hole for oldmem
  241. */
  242. static struct mem_chunk *get_memory_layout(void)
  243. {
  244. struct mem_chunk *chunk_array;
  245. chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
  246. detect_memory_layout(chunk_array, 0);
  247. create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
  248. return chunk_array;
  249. }
  250. /*
  251. * Initialize ELF note
  252. */
  253. static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
  254. const char *name)
  255. {
  256. Elf64_Nhdr *note;
  257. u64 len;
  258. note = (Elf64_Nhdr *)buf;
  259. note->n_namesz = strlen(name) + 1;
  260. note->n_descsz = d_len;
  261. note->n_type = type;
  262. len = sizeof(Elf64_Nhdr);
  263. memcpy(buf + len, name, note->n_namesz);
  264. len = roundup(len + note->n_namesz, 4);
  265. memcpy(buf + len, desc, note->n_descsz);
  266. len = roundup(len + note->n_descsz, 4);
  267. return PTR_ADD(buf, len);
  268. }
  269. /*
  270. * Initialize prstatus note
  271. */
  272. static void *nt_prstatus(void *ptr, struct save_area *sa)
  273. {
  274. struct elf_prstatus nt_prstatus;
  275. static int cpu_nr = 1;
  276. memset(&nt_prstatus, 0, sizeof(nt_prstatus));
  277. memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
  278. memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
  279. memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
  280. nt_prstatus.pr_pid = cpu_nr;
  281. cpu_nr++;
  282. return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
  283. "CORE");
  284. }
  285. /*
  286. * Initialize fpregset (floating point) note
  287. */
  288. static void *nt_fpregset(void *ptr, struct save_area *sa)
  289. {
  290. elf_fpregset_t nt_fpregset;
  291. memset(&nt_fpregset, 0, sizeof(nt_fpregset));
  292. memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
  293. memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
  294. return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
  295. "CORE");
  296. }
  297. /*
  298. * Initialize timer note
  299. */
  300. static void *nt_s390_timer(void *ptr, struct save_area *sa)
  301. {
  302. return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
  303. KEXEC_CORE_NOTE_NAME);
  304. }
  305. /*
  306. * Initialize TOD clock comparator note
  307. */
  308. static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
  309. {
  310. return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
  311. sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
  312. }
  313. /*
  314. * Initialize TOD programmable register note
  315. */
  316. static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
  317. {
  318. return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
  319. sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
  320. }
  321. /*
  322. * Initialize control register note
  323. */
  324. static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
  325. {
  326. return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
  327. sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
  328. }
  329. /*
  330. * Initialize prefix register note
  331. */
  332. static void *nt_s390_prefix(void *ptr, struct save_area *sa)
  333. {
  334. return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
  335. sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
  336. }
  337. /*
  338. * Fill ELF notes for one CPU with save area registers
  339. */
  340. void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
  341. {
  342. ptr = nt_prstatus(ptr, sa);
  343. ptr = nt_fpregset(ptr, sa);
  344. ptr = nt_s390_timer(ptr, sa);
  345. ptr = nt_s390_tod_cmp(ptr, sa);
  346. ptr = nt_s390_tod_preg(ptr, sa);
  347. ptr = nt_s390_ctrs(ptr, sa);
  348. ptr = nt_s390_prefix(ptr, sa);
  349. return ptr;
  350. }
  351. /*
  352. * Initialize prpsinfo note (new kernel)
  353. */
  354. static void *nt_prpsinfo(void *ptr)
  355. {
  356. struct elf_prpsinfo prpsinfo;
  357. memset(&prpsinfo, 0, sizeof(prpsinfo));
  358. prpsinfo.pr_sname = 'R';
  359. strcpy(prpsinfo.pr_fname, "vmlinux");
  360. return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
  361. KEXEC_CORE_NOTE_NAME);
  362. }
  363. /*
  364. * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
  365. */
  366. static void *get_vmcoreinfo_old(unsigned long *size)
  367. {
  368. char nt_name[11], *vmcoreinfo;
  369. Elf64_Nhdr note;
  370. void *addr;
  371. if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
  372. return NULL;
  373. memset(nt_name, 0, sizeof(nt_name));
  374. if (copy_from_oldmem(&note, addr, sizeof(note)))
  375. return NULL;
  376. if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
  377. return NULL;
  378. if (strcmp(nt_name, "VMCOREINFO") != 0)
  379. return NULL;
  380. vmcoreinfo = kzalloc_panic(note.n_descsz);
  381. if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
  382. return NULL;
  383. *size = note.n_descsz;
  384. return vmcoreinfo;
  385. }
  386. /*
  387. * Initialize vmcoreinfo note (new kernel)
  388. */
  389. static void *nt_vmcoreinfo(void *ptr)
  390. {
  391. unsigned long size;
  392. void *vmcoreinfo;
  393. vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
  394. if (!vmcoreinfo)
  395. vmcoreinfo = get_vmcoreinfo_old(&size);
  396. if (!vmcoreinfo)
  397. return ptr;
  398. return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
  399. }
  400. /*
  401. * Initialize ELF header (new kernel)
  402. */
  403. static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
  404. {
  405. memset(ehdr, 0, sizeof(*ehdr));
  406. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  407. ehdr->e_ident[EI_CLASS] = ELFCLASS64;
  408. ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
  409. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  410. memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
  411. ehdr->e_type = ET_CORE;
  412. ehdr->e_machine = EM_S390;
  413. ehdr->e_version = EV_CURRENT;
  414. ehdr->e_phoff = sizeof(Elf64_Ehdr);
  415. ehdr->e_ehsize = sizeof(Elf64_Ehdr);
  416. ehdr->e_phentsize = sizeof(Elf64_Phdr);
  417. ehdr->e_phnum = mem_chunk_cnt + 1;
  418. return ehdr + 1;
  419. }
  420. /*
  421. * Return CPU count for ELF header (new kernel)
  422. */
  423. static int get_cpu_cnt(void)
  424. {
  425. int i, cpus = 0;
  426. for (i = 0; i < dump_save_areas.count; i++) {
  427. if (dump_save_areas.areas[i]->pref_reg == 0)
  428. continue;
  429. cpus++;
  430. }
  431. return cpus;
  432. }
  433. /*
  434. * Return memory chunk count for ELF header (new kernel)
  435. */
  436. static int get_mem_chunk_cnt(void)
  437. {
  438. struct mem_chunk *chunk_array, *mem_chunk;
  439. int i, cnt = 0;
  440. chunk_array = get_memory_layout();
  441. for (i = 0; i < MEMORY_CHUNKS; i++) {
  442. mem_chunk = &chunk_array[i];
  443. if (chunk_array[i].type != CHUNK_READ_WRITE &&
  444. chunk_array[i].type != CHUNK_READ_ONLY)
  445. continue;
  446. if (mem_chunk->size == 0)
  447. continue;
  448. cnt++;
  449. }
  450. kfree(chunk_array);
  451. return cnt;
  452. }
  453. /*
  454. * Initialize ELF loads (new kernel)
  455. */
  456. static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
  457. {
  458. struct mem_chunk *chunk_array, *mem_chunk;
  459. int i;
  460. chunk_array = get_memory_layout();
  461. for (i = 0; i < MEMORY_CHUNKS; i++) {
  462. mem_chunk = &chunk_array[i];
  463. if (mem_chunk->size == 0)
  464. continue;
  465. if (chunk_array[i].type != CHUNK_READ_WRITE &&
  466. chunk_array[i].type != CHUNK_READ_ONLY)
  467. continue;
  468. else
  469. phdr->p_filesz = mem_chunk->size;
  470. phdr->p_type = PT_LOAD;
  471. phdr->p_offset = mem_chunk->addr;
  472. phdr->p_vaddr = mem_chunk->addr;
  473. phdr->p_paddr = mem_chunk->addr;
  474. phdr->p_memsz = mem_chunk->size;
  475. phdr->p_flags = PF_R | PF_W | PF_X;
  476. phdr->p_align = PAGE_SIZE;
  477. phdr++;
  478. }
  479. kfree(chunk_array);
  480. return i;
  481. }
  482. /*
  483. * Initialize notes (new kernel)
  484. */
  485. static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
  486. {
  487. struct save_area *sa;
  488. void *ptr_start = ptr;
  489. int i;
  490. ptr = nt_prpsinfo(ptr);
  491. for (i = 0; i < dump_save_areas.count; i++) {
  492. sa = dump_save_areas.areas[i];
  493. if (sa->pref_reg == 0)
  494. continue;
  495. ptr = fill_cpu_elf_notes(ptr, sa);
  496. }
  497. ptr = nt_vmcoreinfo(ptr);
  498. memset(phdr, 0, sizeof(*phdr));
  499. phdr->p_type = PT_NOTE;
  500. phdr->p_offset = notes_offset;
  501. phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
  502. phdr->p_memsz = phdr->p_filesz;
  503. return ptr;
  504. }
  505. /*
  506. * Create ELF core header (new kernel)
  507. */
  508. int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
  509. {
  510. Elf64_Phdr *phdr_notes, *phdr_loads;
  511. int mem_chunk_cnt;
  512. void *ptr, *hdr;
  513. u32 alloc_size;
  514. u64 hdr_off;
  515. /* If we are not in kdump or zfcpdump mode return */
  516. if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
  517. return 0;
  518. /* If elfcorehdr= has been passed via cmdline, we use that one */
  519. if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
  520. return 0;
  521. /* If we cannot get HSA size for zfcpdump return error */
  522. if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size())
  523. return -ENODEV;
  524. mem_chunk_cnt = get_mem_chunk_cnt();
  525. alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
  526. mem_chunk_cnt * sizeof(Elf64_Phdr);
  527. hdr = kzalloc_panic(alloc_size);
  528. /* Init elf header */
  529. ptr = ehdr_init(hdr, mem_chunk_cnt);
  530. /* Init program headers */
  531. phdr_notes = ptr;
  532. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
  533. phdr_loads = ptr;
  534. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
  535. /* Init notes */
  536. hdr_off = PTR_DIFF(ptr, hdr);
  537. ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
  538. /* Init loads */
  539. hdr_off = PTR_DIFF(ptr, hdr);
  540. loads_init(phdr_loads, hdr_off);
  541. *addr = (unsigned long long) hdr;
  542. elfcorehdr_newmem = hdr;
  543. *size = (unsigned long long) hdr_off;
  544. BUG_ON(elfcorehdr_size > alloc_size);
  545. return 0;
  546. }
  547. /*
  548. * Free ELF core header (new kernel)
  549. */
  550. void elfcorehdr_free(unsigned long long addr)
  551. {
  552. if (!elfcorehdr_newmem)
  553. return;
  554. kfree((void *)(unsigned long)addr);
  555. }
  556. /*
  557. * Read from ELF header
  558. */
  559. ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
  560. {
  561. void *src = (void *)(unsigned long)*ppos;
  562. src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
  563. memcpy(buf, src, count);
  564. *ppos += count;
  565. return count;
  566. }
  567. /*
  568. * Read from ELF notes data
  569. */
  570. ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
  571. {
  572. void *src = (void *)(unsigned long)*ppos;
  573. int rc;
  574. if (elfcorehdr_newmem) {
  575. memcpy(buf, src, count);
  576. } else {
  577. rc = copy_from_oldmem(buf, src, count);
  578. if (rc)
  579. return rc;
  580. }
  581. *ppos += count;
  582. return count;
  583. }