crash_dump.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * S390 kdump implementation
  3. *
  4. * Copyright IBM Corp. 2011
  5. * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
  6. */
  7. #include <linux/crash_dump.h>
  8. #include <asm/lowcore.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/slab.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/elf.h>
  15. #include <asm/os_info.h>
  16. #include <asm/elf.h>
  17. #include <asm/ipl.h>
  18. #include <asm/sclp.h>
  19. #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
  20. #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
  21. #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
  22. struct dump_save_areas dump_save_areas;
  23. /*
  24. * Allocate and add a save area for a CPU
  25. */
  26. struct save_area *dump_save_area_create(int cpu)
  27. {
  28. struct save_area **save_areas, *save_area;
  29. save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
  30. if (!save_area)
  31. return NULL;
  32. if (cpu + 1 > dump_save_areas.count) {
  33. dump_save_areas.count = cpu + 1;
  34. save_areas = krealloc(dump_save_areas.areas,
  35. dump_save_areas.count * sizeof(void *),
  36. GFP_KERNEL | __GFP_ZERO);
  37. if (!save_areas) {
  38. kfree(save_area);
  39. return NULL;
  40. }
  41. dump_save_areas.areas = save_areas;
  42. }
  43. dump_save_areas.areas[cpu] = save_area;
  44. return save_area;
  45. }
  46. /*
  47. * Return physical address for virtual address
  48. */
  49. static inline void *load_real_addr(void *addr)
  50. {
  51. unsigned long real_addr;
  52. asm volatile(
  53. " lra %0,0(%1)\n"
  54. " jz 0f\n"
  55. " la %0,0\n"
  56. "0:"
  57. : "=a" (real_addr) : "a" (addr) : "cc");
  58. return (void *)real_addr;
  59. }
  60. /*
  61. * Copy real to virtual or real memory
  62. */
  63. static int copy_from_realmem(void *dest, void *src, size_t count)
  64. {
  65. unsigned long size;
  66. if (!count)
  67. return 0;
  68. if (!is_vmalloc_or_module_addr(dest))
  69. return memcpy_real(dest, src, count);
  70. do {
  71. size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
  72. if (memcpy_real(load_real_addr(dest), src, size))
  73. return -EFAULT;
  74. count -= size;
  75. dest += size;
  76. src += size;
  77. } while (count);
  78. return 0;
  79. }
  80. /*
  81. * Pointer to ELF header in new kernel
  82. */
  83. static void *elfcorehdr_newmem;
  84. /*
  85. * Copy one page from zfcpdump "oldmem"
  86. *
  87. * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise
  88. * real memory copy is used.
  89. */
  90. static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
  91. unsigned long src, int userbuf)
  92. {
  93. int rc;
  94. if (src < ZFCPDUMP_HSA_SIZE) {
  95. rc = memcpy_hsa(buf, src, csize, userbuf);
  96. } else {
  97. if (userbuf)
  98. rc = copy_to_user_real((void __force __user *) buf,
  99. (void *) src, csize);
  100. else
  101. rc = memcpy_real(buf, (void *) src, csize);
  102. }
  103. return rc ? rc : csize;
  104. }
  105. /*
  106. * Copy one page from kdump "oldmem"
  107. *
  108. * For the kdump reserved memory this functions performs a swap operation:
  109. * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
  110. * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  111. */
  112. static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
  113. unsigned long src, int userbuf)
  114. {
  115. int rc;
  116. if (src < OLDMEM_SIZE)
  117. src += OLDMEM_BASE;
  118. else if (src > OLDMEM_BASE &&
  119. src < OLDMEM_BASE + OLDMEM_SIZE)
  120. src -= OLDMEM_BASE;
  121. if (userbuf)
  122. rc = copy_to_user_real((void __force __user *) buf,
  123. (void *) src, csize);
  124. else
  125. rc = copy_from_realmem(buf, (void *) src, csize);
  126. return (rc == 0) ? rc : csize;
  127. }
  128. /*
  129. * Copy one page from "oldmem"
  130. */
  131. ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
  132. unsigned long offset, int userbuf)
  133. {
  134. unsigned long src;
  135. if (!csize)
  136. return 0;
  137. src = (pfn << PAGE_SHIFT) + offset;
  138. if (OLDMEM_BASE)
  139. return copy_oldmem_page_kdump(buf, csize, src, userbuf);
  140. else
  141. return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
  142. }
  143. /*
  144. * Remap "oldmem" for kdump
  145. *
  146. * For the kdump reserved memory this functions performs a swap operation:
  147. * [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
  148. */
  149. static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
  150. unsigned long from, unsigned long pfn,
  151. unsigned long size, pgprot_t prot)
  152. {
  153. unsigned long size_old;
  154. int rc;
  155. if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
  156. size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
  157. rc = remap_pfn_range(vma, from,
  158. pfn + (OLDMEM_BASE >> PAGE_SHIFT),
  159. size_old, prot);
  160. if (rc || size == size_old)
  161. return rc;
  162. size -= size_old;
  163. from += size_old;
  164. pfn += size_old >> PAGE_SHIFT;
  165. }
  166. return remap_pfn_range(vma, from, pfn, size, prot);
  167. }
  168. /*
  169. * Remap "oldmem" for zfcpdump
  170. *
  171. * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below
  172. * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function.
  173. */
  174. static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
  175. unsigned long from,
  176. unsigned long pfn,
  177. unsigned long size, pgprot_t prot)
  178. {
  179. unsigned long size_hsa;
  180. if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) {
  181. size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT));
  182. if (size == size_hsa)
  183. return 0;
  184. size -= size_hsa;
  185. from += size_hsa;
  186. pfn += size_hsa >> PAGE_SHIFT;
  187. }
  188. return remap_pfn_range(vma, from, pfn, size, prot);
  189. }
  190. /*
  191. * Remap "oldmem" for kdump or zfcpdump
  192. */
  193. int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
  194. unsigned long pfn, unsigned long size, pgprot_t prot)
  195. {
  196. if (OLDMEM_BASE)
  197. return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
  198. else
  199. return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
  200. prot);
  201. }
  202. /*
  203. * Copy memory from old kernel
  204. */
  205. int copy_from_oldmem(void *dest, void *src, size_t count)
  206. {
  207. unsigned long copied = 0;
  208. int rc;
  209. if (OLDMEM_BASE) {
  210. if ((unsigned long) src < OLDMEM_SIZE) {
  211. copied = min(count, OLDMEM_SIZE - (unsigned long) src);
  212. rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
  213. if (rc)
  214. return rc;
  215. }
  216. } else {
  217. if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) {
  218. copied = min(count,
  219. ZFCPDUMP_HSA_SIZE - (unsigned long) src);
  220. rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
  221. if (rc)
  222. return rc;
  223. }
  224. }
  225. return copy_from_realmem(dest + copied, src + copied, count - copied);
  226. }
  227. /*
  228. * Alloc memory and panic in case of ENOMEM
  229. */
  230. static void *kzalloc_panic(int len)
  231. {
  232. void *rc;
  233. rc = kzalloc(len, GFP_KERNEL);
  234. if (!rc)
  235. panic("s390 kdump kzalloc (%d) failed", len);
  236. return rc;
  237. }
  238. /*
  239. * Get memory layout and create hole for oldmem
  240. */
  241. static struct mem_chunk *get_memory_layout(void)
  242. {
  243. struct mem_chunk *chunk_array;
  244. chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
  245. detect_memory_layout(chunk_array, 0);
  246. create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
  247. return chunk_array;
  248. }
  249. /*
  250. * Initialize ELF note
  251. */
  252. static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
  253. const char *name)
  254. {
  255. Elf64_Nhdr *note;
  256. u64 len;
  257. note = (Elf64_Nhdr *)buf;
  258. note->n_namesz = strlen(name) + 1;
  259. note->n_descsz = d_len;
  260. note->n_type = type;
  261. len = sizeof(Elf64_Nhdr);
  262. memcpy(buf + len, name, note->n_namesz);
  263. len = roundup(len + note->n_namesz, 4);
  264. memcpy(buf + len, desc, note->n_descsz);
  265. len = roundup(len + note->n_descsz, 4);
  266. return PTR_ADD(buf, len);
  267. }
  268. /*
  269. * Initialize prstatus note
  270. */
  271. static void *nt_prstatus(void *ptr, struct save_area *sa)
  272. {
  273. struct elf_prstatus nt_prstatus;
  274. static int cpu_nr = 1;
  275. memset(&nt_prstatus, 0, sizeof(nt_prstatus));
  276. memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
  277. memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
  278. memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
  279. nt_prstatus.pr_pid = cpu_nr;
  280. cpu_nr++;
  281. return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
  282. "CORE");
  283. }
  284. /*
  285. * Initialize fpregset (floating point) note
  286. */
  287. static void *nt_fpregset(void *ptr, struct save_area *sa)
  288. {
  289. elf_fpregset_t nt_fpregset;
  290. memset(&nt_fpregset, 0, sizeof(nt_fpregset));
  291. memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
  292. memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
  293. return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
  294. "CORE");
  295. }
  296. /*
  297. * Initialize timer note
  298. */
  299. static void *nt_s390_timer(void *ptr, struct save_area *sa)
  300. {
  301. return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
  302. KEXEC_CORE_NOTE_NAME);
  303. }
  304. /*
  305. * Initialize TOD clock comparator note
  306. */
  307. static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
  308. {
  309. return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
  310. sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
  311. }
  312. /*
  313. * Initialize TOD programmable register note
  314. */
  315. static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
  316. {
  317. return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
  318. sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
  319. }
  320. /*
  321. * Initialize control register note
  322. */
  323. static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
  324. {
  325. return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
  326. sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
  327. }
  328. /*
  329. * Initialize prefix register note
  330. */
  331. static void *nt_s390_prefix(void *ptr, struct save_area *sa)
  332. {
  333. return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
  334. sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
  335. }
  336. /*
  337. * Fill ELF notes for one CPU with save area registers
  338. */
  339. void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
  340. {
  341. ptr = nt_prstatus(ptr, sa);
  342. ptr = nt_fpregset(ptr, sa);
  343. ptr = nt_s390_timer(ptr, sa);
  344. ptr = nt_s390_tod_cmp(ptr, sa);
  345. ptr = nt_s390_tod_preg(ptr, sa);
  346. ptr = nt_s390_ctrs(ptr, sa);
  347. ptr = nt_s390_prefix(ptr, sa);
  348. return ptr;
  349. }
  350. /*
  351. * Initialize prpsinfo note (new kernel)
  352. */
  353. static void *nt_prpsinfo(void *ptr)
  354. {
  355. struct elf_prpsinfo prpsinfo;
  356. memset(&prpsinfo, 0, sizeof(prpsinfo));
  357. prpsinfo.pr_sname = 'R';
  358. strcpy(prpsinfo.pr_fname, "vmlinux");
  359. return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
  360. KEXEC_CORE_NOTE_NAME);
  361. }
  362. /*
  363. * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
  364. */
  365. static void *get_vmcoreinfo_old(unsigned long *size)
  366. {
  367. char nt_name[11], *vmcoreinfo;
  368. Elf64_Nhdr note;
  369. void *addr;
  370. if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
  371. return NULL;
  372. memset(nt_name, 0, sizeof(nt_name));
  373. if (copy_from_oldmem(&note, addr, sizeof(note)))
  374. return NULL;
  375. if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
  376. return NULL;
  377. if (strcmp(nt_name, "VMCOREINFO") != 0)
  378. return NULL;
  379. vmcoreinfo = kzalloc_panic(note.n_descsz);
  380. if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
  381. return NULL;
  382. *size = note.n_descsz;
  383. return vmcoreinfo;
  384. }
  385. /*
  386. * Initialize vmcoreinfo note (new kernel)
  387. */
  388. static void *nt_vmcoreinfo(void *ptr)
  389. {
  390. unsigned long size;
  391. void *vmcoreinfo;
  392. vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
  393. if (!vmcoreinfo)
  394. vmcoreinfo = get_vmcoreinfo_old(&size);
  395. if (!vmcoreinfo)
  396. return ptr;
  397. return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
  398. }
  399. /*
  400. * Initialize ELF header (new kernel)
  401. */
  402. static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
  403. {
  404. memset(ehdr, 0, sizeof(*ehdr));
  405. memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
  406. ehdr->e_ident[EI_CLASS] = ELFCLASS64;
  407. ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
  408. ehdr->e_ident[EI_VERSION] = EV_CURRENT;
  409. memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
  410. ehdr->e_type = ET_CORE;
  411. ehdr->e_machine = EM_S390;
  412. ehdr->e_version = EV_CURRENT;
  413. ehdr->e_phoff = sizeof(Elf64_Ehdr);
  414. ehdr->e_ehsize = sizeof(Elf64_Ehdr);
  415. ehdr->e_phentsize = sizeof(Elf64_Phdr);
  416. ehdr->e_phnum = mem_chunk_cnt + 1;
  417. return ehdr + 1;
  418. }
  419. /*
  420. * Return CPU count for ELF header (new kernel)
  421. */
  422. static int get_cpu_cnt(void)
  423. {
  424. int i, cpus = 0;
  425. for (i = 0; i < dump_save_areas.count; i++) {
  426. if (dump_save_areas.areas[i]->pref_reg == 0)
  427. continue;
  428. cpus++;
  429. }
  430. return cpus;
  431. }
  432. /*
  433. * Return memory chunk count for ELF header (new kernel)
  434. */
  435. static int get_mem_chunk_cnt(void)
  436. {
  437. struct mem_chunk *chunk_array, *mem_chunk;
  438. int i, cnt = 0;
  439. chunk_array = get_memory_layout();
  440. for (i = 0; i < MEMORY_CHUNKS; i++) {
  441. mem_chunk = &chunk_array[i];
  442. if (chunk_array[i].type != CHUNK_READ_WRITE &&
  443. chunk_array[i].type != CHUNK_READ_ONLY)
  444. continue;
  445. if (mem_chunk->size == 0)
  446. continue;
  447. cnt++;
  448. }
  449. kfree(chunk_array);
  450. return cnt;
  451. }
  452. /*
  453. * Initialize ELF loads (new kernel)
  454. */
  455. static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
  456. {
  457. struct mem_chunk *chunk_array, *mem_chunk;
  458. int i;
  459. chunk_array = get_memory_layout();
  460. for (i = 0; i < MEMORY_CHUNKS; i++) {
  461. mem_chunk = &chunk_array[i];
  462. if (mem_chunk->size == 0)
  463. continue;
  464. if (chunk_array[i].type != CHUNK_READ_WRITE &&
  465. chunk_array[i].type != CHUNK_READ_ONLY)
  466. continue;
  467. else
  468. phdr->p_filesz = mem_chunk->size;
  469. phdr->p_type = PT_LOAD;
  470. phdr->p_offset = mem_chunk->addr;
  471. phdr->p_vaddr = mem_chunk->addr;
  472. phdr->p_paddr = mem_chunk->addr;
  473. phdr->p_memsz = mem_chunk->size;
  474. phdr->p_flags = PF_R | PF_W | PF_X;
  475. phdr->p_align = PAGE_SIZE;
  476. phdr++;
  477. }
  478. kfree(chunk_array);
  479. return i;
  480. }
  481. /*
  482. * Initialize notes (new kernel)
  483. */
  484. static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
  485. {
  486. struct save_area *sa;
  487. void *ptr_start = ptr;
  488. int i;
  489. ptr = nt_prpsinfo(ptr);
  490. for (i = 0; i < dump_save_areas.count; i++) {
  491. sa = dump_save_areas.areas[i];
  492. if (sa->pref_reg == 0)
  493. continue;
  494. ptr = fill_cpu_elf_notes(ptr, sa);
  495. }
  496. ptr = nt_vmcoreinfo(ptr);
  497. memset(phdr, 0, sizeof(*phdr));
  498. phdr->p_type = PT_NOTE;
  499. phdr->p_offset = notes_offset;
  500. phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
  501. phdr->p_memsz = phdr->p_filesz;
  502. return ptr;
  503. }
  504. /*
  505. * Create ELF core header (new kernel)
  506. */
  507. int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
  508. {
  509. Elf64_Phdr *phdr_notes, *phdr_loads;
  510. int mem_chunk_cnt;
  511. void *ptr, *hdr;
  512. u32 alloc_size;
  513. u64 hdr_off;
  514. /* If we are not in kdump or zfcpdump mode return */
  515. if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
  516. return 0;
  517. /* If elfcorehdr= has been passed via cmdline, we use that one */
  518. if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
  519. return 0;
  520. mem_chunk_cnt = get_mem_chunk_cnt();
  521. alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
  522. mem_chunk_cnt * sizeof(Elf64_Phdr);
  523. hdr = kzalloc_panic(alloc_size);
  524. /* Init elf header */
  525. ptr = ehdr_init(hdr, mem_chunk_cnt);
  526. /* Init program headers */
  527. phdr_notes = ptr;
  528. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
  529. phdr_loads = ptr;
  530. ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
  531. /* Init notes */
  532. hdr_off = PTR_DIFF(ptr, hdr);
  533. ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
  534. /* Init loads */
  535. hdr_off = PTR_DIFF(ptr, hdr);
  536. loads_init(phdr_loads, hdr_off);
  537. *addr = (unsigned long long) hdr;
  538. elfcorehdr_newmem = hdr;
  539. *size = (unsigned long long) hdr_off;
  540. BUG_ON(elfcorehdr_size > alloc_size);
  541. return 0;
  542. }
  543. /*
  544. * Free ELF core header (new kernel)
  545. */
  546. void elfcorehdr_free(unsigned long long addr)
  547. {
  548. if (!elfcorehdr_newmem)
  549. return;
  550. kfree((void *)(unsigned long)addr);
  551. }
  552. /*
  553. * Read from ELF header
  554. */
  555. ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
  556. {
  557. void *src = (void *)(unsigned long)*ppos;
  558. src = elfcorehdr_newmem ? src : src - OLDMEM_BASE;
  559. memcpy(buf, src, count);
  560. *ppos += count;
  561. return count;
  562. }
  563. /*
  564. * Read from ELF notes data
  565. */
  566. ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
  567. {
  568. void *src = (void *)(unsigned long)*ppos;
  569. int rc;
  570. if (elfcorehdr_newmem) {
  571. memcpy(buf, src, count);
  572. } else {
  573. rc = copy_from_oldmem(buf, src, count);
  574. if (rc)
  575. return rc;
  576. }
  577. *ppos += count;
  578. return count;
  579. }