|
@@ -25,7 +25,7 @@
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/kmemleak.h>
|
|
#include <linux/kmemleak.h>
|
|
-
|
|
|
|
|
|
+#include <linux/highmem.h>
|
|
#include <asm/atomic.h>
|
|
#include <asm/atomic.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlbflush.h>
|
|
@@ -1643,10 +1643,120 @@ void *vmalloc_32_user(unsigned long size)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_32_user);
|
|
EXPORT_SYMBOL(vmalloc_32_user);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * small helper routine , copy contents to buf from addr.
|
|
|
|
+ * If the page is not present, fill zero.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+static int aligned_vread(char *buf, char *addr, unsigned long count)
|
|
|
|
+{
|
|
|
|
+ struct page *p;
|
|
|
|
+ int copied = 0;
|
|
|
|
+
|
|
|
|
+ while (count) {
|
|
|
|
+ unsigned long offset, length;
|
|
|
|
+
|
|
|
|
+ offset = (unsigned long)addr & ~PAGE_MASK;
|
|
|
|
+ length = PAGE_SIZE - offset;
|
|
|
|
+ if (length > count)
|
|
|
|
+ length = count;
|
|
|
|
+ p = vmalloc_to_page(addr);
|
|
|
|
+ /*
|
|
|
|
+ * To do safe access to this _mapped_ area, we need
|
|
|
|
+ * lock. But adding lock here means that we need to add
|
|
|
|
+ * overhead of vmalloc()/vfree() calles for this _debug_
|
|
|
|
+ * interface, rarely used. Instead of that, we'll use
|
|
|
|
+ * kmap() and get small overhead in this access function.
|
|
|
|
+ */
|
|
|
|
+ if (p) {
|
|
|
|
+ /*
|
|
|
|
+ * we can expect USER0 is not used (see vread/vwrite's
|
|
|
|
+ * function description)
|
|
|
|
+ */
|
|
|
|
+ void *map = kmap_atomic(p, KM_USER0);
|
|
|
|
+ memcpy(buf, map + offset, length);
|
|
|
|
+ kunmap_atomic(map, KM_USER0);
|
|
|
|
+ } else
|
|
|
|
+ memset(buf, 0, length);
|
|
|
|
+
|
|
|
|
+ addr += length;
|
|
|
|
+ buf += length;
|
|
|
|
+ copied += length;
|
|
|
|
+ count -= length;
|
|
|
|
+ }
|
|
|
|
+ return copied;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int aligned_vwrite(char *buf, char *addr, unsigned long count)
|
|
|
|
+{
|
|
|
|
+ struct page *p;
|
|
|
|
+ int copied = 0;
|
|
|
|
+
|
|
|
|
+ while (count) {
|
|
|
|
+ unsigned long offset, length;
|
|
|
|
+
|
|
|
|
+ offset = (unsigned long)addr & ~PAGE_MASK;
|
|
|
|
+ length = PAGE_SIZE - offset;
|
|
|
|
+ if (length > count)
|
|
|
|
+ length = count;
|
|
|
|
+ p = vmalloc_to_page(addr);
|
|
|
|
+ /*
|
|
|
|
+ * To do safe access to this _mapped_ area, we need
|
|
|
|
+ * lock. But adding lock here means that we need to add
|
|
|
|
+ * overhead of vmalloc()/vfree() calles for this _debug_
|
|
|
|
+ * interface, rarely used. Instead of that, we'll use
|
|
|
|
+ * kmap() and get small overhead in this access function.
|
|
|
|
+ */
|
|
|
|
+ if (p) {
|
|
|
|
+ /*
|
|
|
|
+ * we can expect USER0 is not used (see vread/vwrite's
|
|
|
|
+ * function description)
|
|
|
|
+ */
|
|
|
|
+ void *map = kmap_atomic(p, KM_USER0);
|
|
|
|
+ memcpy(map + offset, buf, length);
|
|
|
|
+ kunmap_atomic(map, KM_USER0);
|
|
|
|
+ }
|
|
|
|
+ addr += length;
|
|
|
|
+ buf += length;
|
|
|
|
+ copied += length;
|
|
|
|
+ count -= length;
|
|
|
|
+ }
|
|
|
|
+ return copied;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * vread() - read vmalloc area in a safe way.
|
|
|
|
+ * @buf: buffer for reading data
|
|
|
|
+ * @addr: vm address.
|
|
|
|
+ * @count: number of bytes to be read.
|
|
|
|
+ *
|
|
|
|
+ * Returns # of bytes which addr and buf should be increased.
|
|
|
|
+ * (same number to @count). Returns 0 if [addr...addr+count) doesn't
|
|
|
|
+ * includes any intersect with alive vmalloc area.
|
|
|
|
+ *
|
|
|
|
+ * This function checks that addr is a valid vmalloc'ed area, and
|
|
|
|
+ * copy data from that area to a given buffer. If the given memory range
|
|
|
|
+ * of [addr...addr+count) includes some valid address, data is copied to
|
|
|
|
+ * proper area of @buf. If there are memory holes, they'll be zero-filled.
|
|
|
|
+ * IOREMAP area is treated as memory hole and no copy is done.
|
|
|
|
+ *
|
|
|
|
+ * If [addr...addr+count) doesn't includes any intersects with alive
|
|
|
|
+ * vm_struct area, returns 0.
|
|
|
|
+ * @buf should be kernel's buffer. Because this function uses KM_USER0,
|
|
|
|
+ * the caller should guarantee KM_USER0 is not used.
|
|
|
|
+ *
|
|
|
|
+ * Note: In usual ops, vread() is never necessary because the caller
|
|
|
|
+ * should know vmalloc() area is valid and can use memcpy().
|
|
|
|
+ * This is for routines which have to access vmalloc area without
|
|
|
|
+ * any informaion, as /dev/kmem.
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+
|
|
long vread(char *buf, char *addr, unsigned long count)
|
|
long vread(char *buf, char *addr, unsigned long count)
|
|
{
|
|
{
|
|
struct vm_struct *tmp;
|
|
struct vm_struct *tmp;
|
|
char *vaddr, *buf_start = buf;
|
|
char *vaddr, *buf_start = buf;
|
|
|
|
+ unsigned long buflen = count;
|
|
unsigned long n;
|
|
unsigned long n;
|
|
|
|
|
|
/* Don't allow overflow */
|
|
/* Don't allow overflow */
|
|
@@ -1654,7 +1764,7 @@ long vread(char *buf, char *addr, unsigned long count)
|
|
count = -(unsigned long) addr;
|
|
count = -(unsigned long) addr;
|
|
|
|
|
|
read_lock(&vmlist_lock);
|
|
read_lock(&vmlist_lock);
|
|
- for (tmp = vmlist; tmp; tmp = tmp->next) {
|
|
|
|
|
|
+ for (tmp = vmlist; count && tmp; tmp = tmp->next) {
|
|
vaddr = (char *) tmp->addr;
|
|
vaddr = (char *) tmp->addr;
|
|
if (addr >= vaddr + tmp->size - PAGE_SIZE)
|
|
if (addr >= vaddr + tmp->size - PAGE_SIZE)
|
|
continue;
|
|
continue;
|
|
@@ -1667,32 +1777,72 @@ long vread(char *buf, char *addr, unsigned long count)
|
|
count--;
|
|
count--;
|
|
}
|
|
}
|
|
n = vaddr + tmp->size - PAGE_SIZE - addr;
|
|
n = vaddr + tmp->size - PAGE_SIZE - addr;
|
|
- do {
|
|
|
|
- if (count == 0)
|
|
|
|
- goto finished;
|
|
|
|
- *buf = *addr;
|
|
|
|
- buf++;
|
|
|
|
- addr++;
|
|
|
|
- count--;
|
|
|
|
- } while (--n > 0);
|
|
|
|
|
|
+ if (n > count)
|
|
|
|
+ n = count;
|
|
|
|
+ if (!(tmp->flags & VM_IOREMAP))
|
|
|
|
+ aligned_vread(buf, addr, n);
|
|
|
|
+ else /* IOREMAP area is treated as memory hole */
|
|
|
|
+ memset(buf, 0, n);
|
|
|
|
+ buf += n;
|
|
|
|
+ addr += n;
|
|
|
|
+ count -= n;
|
|
}
|
|
}
|
|
finished:
|
|
finished:
|
|
read_unlock(&vmlist_lock);
|
|
read_unlock(&vmlist_lock);
|
|
- return buf - buf_start;
|
|
|
|
|
|
+
|
|
|
|
+ if (buf == buf_start)
|
|
|
|
+ return 0;
|
|
|
|
+ /* zero-fill memory holes */
|
|
|
|
+ if (buf != buf_start + buflen)
|
|
|
|
+ memset(buf, 0, buflen - (buf - buf_start));
|
|
|
|
+
|
|
|
|
+ return buflen;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * vwrite() - write vmalloc area in a safe way.
|
|
|
|
+ * @buf: buffer for source data
|
|
|
|
+ * @addr: vm address.
|
|
|
|
+ * @count: number of bytes to be read.
|
|
|
|
+ *
|
|
|
|
+ * Returns # of bytes which addr and buf should be incresed.
|
|
|
|
+ * (same number to @count).
|
|
|
|
+ * If [addr...addr+count) doesn't includes any intersect with valid
|
|
|
|
+ * vmalloc area, returns 0.
|
|
|
|
+ *
|
|
|
|
+ * This function checks that addr is a valid vmalloc'ed area, and
|
|
|
|
+ * copy data from a buffer to the given addr. If specified range of
|
|
|
|
+ * [addr...addr+count) includes some valid address, data is copied from
|
|
|
|
+ * proper area of @buf. If there are memory holes, no copy to hole.
|
|
|
|
+ * IOREMAP area is treated as memory hole and no copy is done.
|
|
|
|
+ *
|
|
|
|
+ * If [addr...addr+count) doesn't includes any intersects with alive
|
|
|
|
+ * vm_struct area, returns 0.
|
|
|
|
+ * @buf should be kernel's buffer. Because this function uses KM_USER0,
|
|
|
|
+ * the caller should guarantee KM_USER0 is not used.
|
|
|
|
+ *
|
|
|
|
+ * Note: In usual ops, vwrite() is never necessary because the caller
|
|
|
|
+ * should know vmalloc() area is valid and can use memcpy().
|
|
|
|
+ * This is for routines which have to access vmalloc area without
|
|
|
|
+ * any informaion, as /dev/kmem.
|
|
|
|
+ *
|
|
|
|
+ * The caller should guarantee KM_USER1 is not used.
|
|
|
|
+ */
|
|
|
|
+
|
|
long vwrite(char *buf, char *addr, unsigned long count)
|
|
long vwrite(char *buf, char *addr, unsigned long count)
|
|
{
|
|
{
|
|
struct vm_struct *tmp;
|
|
struct vm_struct *tmp;
|
|
- char *vaddr, *buf_start = buf;
|
|
|
|
- unsigned long n;
|
|
|
|
|
|
+ char *vaddr;
|
|
|
|
+ unsigned long n, buflen;
|
|
|
|
+ int copied = 0;
|
|
|
|
|
|
/* Don't allow overflow */
|
|
/* Don't allow overflow */
|
|
if ((unsigned long) addr + count < count)
|
|
if ((unsigned long) addr + count < count)
|
|
count = -(unsigned long) addr;
|
|
count = -(unsigned long) addr;
|
|
|
|
+ buflen = count;
|
|
|
|
|
|
read_lock(&vmlist_lock);
|
|
read_lock(&vmlist_lock);
|
|
- for (tmp = vmlist; tmp; tmp = tmp->next) {
|
|
|
|
|
|
+ for (tmp = vmlist; count && tmp; tmp = tmp->next) {
|
|
vaddr = (char *) tmp->addr;
|
|
vaddr = (char *) tmp->addr;
|
|
if (addr >= vaddr + tmp->size - PAGE_SIZE)
|
|
if (addr >= vaddr + tmp->size - PAGE_SIZE)
|
|
continue;
|
|
continue;
|
|
@@ -1704,18 +1854,21 @@ long vwrite(char *buf, char *addr, unsigned long count)
|
|
count--;
|
|
count--;
|
|
}
|
|
}
|
|
n = vaddr + tmp->size - PAGE_SIZE - addr;
|
|
n = vaddr + tmp->size - PAGE_SIZE - addr;
|
|
- do {
|
|
|
|
- if (count == 0)
|
|
|
|
- goto finished;
|
|
|
|
- *addr = *buf;
|
|
|
|
- buf++;
|
|
|
|
- addr++;
|
|
|
|
- count--;
|
|
|
|
- } while (--n > 0);
|
|
|
|
|
|
+ if (n > count)
|
|
|
|
+ n = count;
|
|
|
|
+ if (!(tmp->flags & VM_IOREMAP)) {
|
|
|
|
+ aligned_vwrite(buf, addr, n);
|
|
|
|
+ copied++;
|
|
|
|
+ }
|
|
|
|
+ buf += n;
|
|
|
|
+ addr += n;
|
|
|
|
+ count -= n;
|
|
}
|
|
}
|
|
finished:
|
|
finished:
|
|
read_unlock(&vmlist_lock);
|
|
read_unlock(&vmlist_lock);
|
|
- return buf - buf_start;
|
|
|
|
|
|
+ if (!copied)
|
|
|
|
+ return 0;
|
|
|
|
+ return buflen;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|