|
@@ -16,6 +16,7 @@
|
|
|
#include <asm/os_info.h>
|
|
|
#include <asm/elf.h>
|
|
|
#include <asm/ipl.h>
|
|
|
+#include <asm/sclp.h>
|
|
|
|
|
|
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
|
|
|
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
|
|
@@ -69,22 +70,41 @@ static ssize_t copy_page_real(void *buf, void *src, size_t csize)
|
|
|
static void *elfcorehdr_newmem;
|
|
|
|
|
|
/*
|
|
|
- * Copy one page from "oldmem"
|
|
|
+ * Copy one page from zfcpdump "oldmem"
|
|
|
+ *
|
|
|
+ * For pages below ZFCPDUMP_HSA_SIZE memory from the HSA is copied. Otherwise
|
|
|
+ * real memory copy is used.
|
|
|
+ */
|
|
|
+static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize,
|
|
|
+ unsigned long src, int userbuf)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ if (src < ZFCPDUMP_HSA_SIZE) {
|
|
|
+ rc = memcpy_hsa(buf, src, csize, userbuf);
|
|
|
+ } else {
|
|
|
+ if (userbuf)
|
|
|
+ rc = copy_to_user_real((void __force __user *) buf,
|
|
|
+ (void *) src, csize);
|
|
|
+ else
|
|
|
+ rc = memcpy_real(buf, (void *) src, csize);
|
|
|
+ }
|
|
|
+ return rc ? rc : csize;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Copy one page from kdump "oldmem"
|
|
|
*
|
|
|
* For the kdump reserved memory this functions performs a swap operation:
|
|
|
* - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
|
|
|
* - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
|
|
|
*/
|
|
|
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|
|
- size_t csize, unsigned long offset, int userbuf)
|
|
|
+static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
|
|
|
+ unsigned long src, int userbuf)
|
|
|
+
|
|
|
{
|
|
|
- unsigned long src;
|
|
|
int rc;
|
|
|
|
|
|
- if (!csize)
|
|
|
- return 0;
|
|
|
-
|
|
|
- src = (pfn << PAGE_SHIFT) + offset;
|
|
|
if (src < OLDMEM_SIZE)
|
|
|
src += OLDMEM_BASE;
|
|
|
else if (src > OLDMEM_BASE &&
|
|
@@ -95,17 +115,35 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|
|
(void *) src, csize);
|
|
|
else
|
|
|
rc = copy_page_real(buf, (void *) src, csize);
|
|
|
- return (rc == 0) ? csize : rc;
|
|
|
+ return (rc == 0) ? rc : csize;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Remap "oldmem"
|
|
|
+ * Copy one page from "oldmem"
|
|
|
+ */
|
|
|
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
|
|
|
+ unsigned long offset, int userbuf)
|
|
|
+{
|
|
|
+ unsigned long src;
|
|
|
+
|
|
|
+ if (!csize)
|
|
|
+ return 0;
|
|
|
+ src = (pfn << PAGE_SHIFT) + offset;
|
|
|
+ if (OLDMEM_BASE)
|
|
|
+ return copy_oldmem_page_kdump(buf, csize, src, userbuf);
|
|
|
+ else
|
|
|
+ return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Remap "oldmem" for kdump
|
|
|
*
|
|
|
* For the kdump reserved memory this functions performs a swap operation:
|
|
|
* [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
|
|
|
*/
|
|
|
-int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
|
|
|
- unsigned long pfn, unsigned long size, pgprot_t prot)
|
|
|
+static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
|
|
|
+ unsigned long from, unsigned long pfn,
|
|
|
+ unsigned long size, pgprot_t prot)
|
|
|
{
|
|
|
unsigned long size_old;
|
|
|
int rc;
|
|
@@ -124,6 +162,43 @@ int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
|
|
|
return remap_pfn_range(vma, from, pfn, size, prot);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Remap "oldmem" for zfcpdump
|
|
|
+ *
|
|
|
+ * We only map available memory above ZFCPDUMP_HSA_SIZE. Memory below
|
|
|
+ * ZFCPDUMP_HSA_SIZE is read on demand using the copy_oldmem_page() function.
|
|
|
+ */
|
|
|
+static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
|
|
|
+ unsigned long from,
|
|
|
+ unsigned long pfn,
|
|
|
+ unsigned long size, pgprot_t prot)
|
|
|
+{
|
|
|
+ unsigned long size_hsa;
|
|
|
+
|
|
|
+ if (pfn < ZFCPDUMP_HSA_SIZE >> PAGE_SHIFT) {
|
|
|
+ size_hsa = min(size, ZFCPDUMP_HSA_SIZE - (pfn << PAGE_SHIFT));
|
|
|
+ if (size == size_hsa)
|
|
|
+ return 0;
|
|
|
+ size -= size_hsa;
|
|
|
+ from += size_hsa;
|
|
|
+ pfn += size_hsa >> PAGE_SHIFT;
|
|
|
+ }
|
|
|
+ return remap_pfn_range(vma, from, pfn, size, prot);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Remap "oldmem" for kdump or zfcpdump
|
|
|
+ */
|
|
|
+int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
|
|
|
+ unsigned long pfn, unsigned long size, pgprot_t prot)
|
|
|
+{
|
|
|
+ if (OLDMEM_BASE)
|
|
|
+ return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
|
|
|
+ else
|
|
|
+ return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
|
|
|
+ prot);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Copy memory from old kernel
|
|
|
*/
|
|
@@ -132,11 +207,21 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
|
|
|
unsigned long copied = 0;
|
|
|
int rc;
|
|
|
|
|
|
- if ((unsigned long) src < OLDMEM_SIZE) {
|
|
|
- copied = min(count, OLDMEM_SIZE - (unsigned long) src);
|
|
|
- rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
|
|
|
- if (rc)
|
|
|
- return rc;
|
|
|
+ if (OLDMEM_BASE) {
|
|
|
+ if ((unsigned long) src < OLDMEM_SIZE) {
|
|
|
+ copied = min(count, OLDMEM_SIZE - (unsigned long) src);
|
|
|
+ rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if ((unsigned long) src < ZFCPDUMP_HSA_SIZE) {
|
|
|
+ copied = min(count,
|
|
|
+ ZFCPDUMP_HSA_SIZE - (unsigned long) src);
|
|
|
+ rc = memcpy_hsa(dest, (unsigned long) src, copied, 0);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
}
|
|
|
return memcpy_real(dest + copied, src + copied, count - copied);
|
|
|
}
|
|
@@ -466,7 +551,8 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
|
|
|
u32 alloc_size;
|
|
|
u64 hdr_off;
|
|
|
|
|
|
- if (!OLDMEM_BASE)
|
|
|
+ /* If we are not in kdump or zfcpdump mode return */
|
|
|
+ if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP)
|
|
|
return 0;
|
|
|
/* If elfcorehdr= has been passed via cmdline, we use that one */
|
|
|
if (elfcorehdr_addr != ELFCORE_ADDR_MAX)
|