|
@@ -32,15 +32,39 @@
|
|
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/types.h>
|
|
#include <linux/types.h>
|
|
|
|
+#include <linux/spinlock.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/export.h>
|
|
#include <linux/export.h>
|
|
#include <asm/xen/hypervisor.h>
|
|
#include <asm/xen/hypervisor.h>
|
|
#include <asm/xen/page.h>
|
|
#include <asm/xen/page.h>
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/interface/event_channel.h>
|
|
#include <xen/interface/event_channel.h>
|
|
|
|
+#include <xen/balloon.h>
|
|
#include <xen/events.h>
|
|
#include <xen/events.h>
|
|
#include <xen/grant_table.h>
|
|
#include <xen/grant_table.h>
|
|
#include <xen/xenbus.h>
|
|
#include <xen/xenbus.h>
|
|
|
|
+#include <xen/xen.h>
|
|
|
|
+
|
|
|
|
+#include "xenbus_probe.h"
|
|
|
|
+
|
|
|
|
+struct xenbus_map_node {
|
|
|
|
+ struct list_head next;
|
|
|
|
+ union {
|
|
|
|
+ struct vm_struct *area; /* PV */
|
|
|
|
+ struct page *page; /* HVM */
|
|
|
|
+ };
|
|
|
|
+ grant_handle_t handle;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static DEFINE_SPINLOCK(xenbus_valloc_lock);
|
|
|
|
+static LIST_HEAD(xenbus_valloc_pages);
|
|
|
|
+
|
|
|
|
+struct xenbus_ring_ops {
|
|
|
|
+ int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
|
|
|
|
+ int (*unmap)(struct xenbus_device *dev, void *vaddr);
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static const struct xenbus_ring_ops *ring_ops __read_mostly;
|
|
|
|
|
|
const char *xenbus_strstate(enum xenbus_state state)
|
|
const char *xenbus_strstate(enum xenbus_state state)
|
|
{
|
|
{
|
|
@@ -435,20 +459,34 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
|
|
* XenbusStateClosing and the error message will be saved in XenStore.
|
|
* XenbusStateClosing and the error message will be saved in XenStore.
|
|
*/
|
|
*/
|
|
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
|
|
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
|
|
|
|
+{
|
|
|
|
+ return ring_ops->map(dev, gnt_ref, vaddr);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
|
|
|
|
+
|
|
|
|
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
|
|
|
|
+ int gnt_ref, void **vaddr)
|
|
{
|
|
{
|
|
struct gnttab_map_grant_ref op = {
|
|
struct gnttab_map_grant_ref op = {
|
|
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
|
|
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
|
|
.ref = gnt_ref,
|
|
.ref = gnt_ref,
|
|
.dom = dev->otherend_id,
|
|
.dom = dev->otherend_id,
|
|
};
|
|
};
|
|
|
|
+ struct xenbus_map_node *node;
|
|
struct vm_struct *area;
|
|
struct vm_struct *area;
|
|
pte_t *pte;
|
|
pte_t *pte;
|
|
|
|
|
|
*vaddr = NULL;
|
|
*vaddr = NULL;
|
|
|
|
|
|
|
|
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
|
|
+ if (!node)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
area = alloc_vm_area(PAGE_SIZE, &pte);
|
|
area = alloc_vm_area(PAGE_SIZE, &pte);
|
|
- if (!area)
|
|
|
|
|
|
+ if (!area) {
|
|
|
|
+ kfree(node);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
|
|
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
|
|
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
|
|
|
|
|
|
@@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
|
|
|
|
|
|
if (op.status != GNTST_okay) {
|
|
if (op.status != GNTST_okay) {
|
|
free_vm_area(area);
|
|
free_vm_area(area);
|
|
|
|
+ kfree(node);
|
|
xenbus_dev_fatal(dev, op.status,
|
|
xenbus_dev_fatal(dev, op.status,
|
|
"mapping in shared page %d from domain %d",
|
|
"mapping in shared page %d from domain %d",
|
|
gnt_ref, dev->otherend_id);
|
|
gnt_ref, dev->otherend_id);
|
|
return op.status;
|
|
return op.status;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Stuff the handle in an unused field */
|
|
|
|
- area->phys_addr = (unsigned long)op.handle;
|
|
|
|
|
|
+ node->handle = op.handle;
|
|
|
|
+ node->area = area;
|
|
|
|
+
|
|
|
|
+ spin_lock(&xenbus_valloc_lock);
|
|
|
|
+ list_add(&node->next, &xenbus_valloc_pages);
|
|
|
|
+ spin_unlock(&xenbus_valloc_lock);
|
|
|
|
|
|
*vaddr = area->addr;
|
|
*vaddr = area->addr;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
|
|
|
|
|
|
+
|
|
|
|
+static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
|
|
|
|
+ int gnt_ref, void **vaddr)
|
|
|
|
+{
|
|
|
|
+ struct xenbus_map_node *node;
|
|
|
|
+ int err;
|
|
|
|
+ void *addr;
|
|
|
|
+
|
|
|
|
+ *vaddr = NULL;
|
|
|
|
+
|
|
|
|
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
|
|
+ if (!node)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
|
|
|
|
+ if (err)
|
|
|
|
+ goto out_err;
|
|
|
|
+
|
|
|
|
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
|
|
|
|
+
|
|
|
|
+ err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
|
|
|
|
+ if (err)
|
|
|
|
+ goto out_err;
|
|
|
|
+
|
|
|
|
+ spin_lock(&xenbus_valloc_lock);
|
|
|
|
+ list_add(&node->next, &xenbus_valloc_pages);
|
|
|
|
+ spin_unlock(&xenbus_valloc_lock);
|
|
|
|
+
|
|
|
|
+ *vaddr = addr;
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ out_err:
|
|
|
|
+ free_xenballooned_pages(1, &node->page);
|
|
|
|
+ kfree(node);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -525,32 +603,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
|
|
*/
|
|
*/
|
|
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
|
|
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
|
|
{
|
|
{
|
|
- struct vm_struct *area;
|
|
|
|
|
|
+ return ring_ops->unmap(dev, vaddr);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
|
|
|
|
+
|
|
|
|
+static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
|
|
|
|
+{
|
|
|
|
+ struct xenbus_map_node *node;
|
|
struct gnttab_unmap_grant_ref op = {
|
|
struct gnttab_unmap_grant_ref op = {
|
|
.host_addr = (unsigned long)vaddr,
|
|
.host_addr = (unsigned long)vaddr,
|
|
};
|
|
};
|
|
unsigned int level;
|
|
unsigned int level;
|
|
|
|
|
|
- /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
|
|
|
|
- * method so that we don't have to muck with vmalloc internals here.
|
|
|
|
- * We could force the user to hang on to their struct vm_struct from
|
|
|
|
- * xenbus_map_ring_valloc, but these 6 lines considerably simplify
|
|
|
|
- * this API.
|
|
|
|
- */
|
|
|
|
- read_lock(&vmlist_lock);
|
|
|
|
- for (area = vmlist; area != NULL; area = area->next) {
|
|
|
|
- if (area->addr == vaddr)
|
|
|
|
- break;
|
|
|
|
|
|
+ spin_lock(&xenbus_valloc_lock);
|
|
|
|
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
|
|
|
|
+ if (node->area->addr == vaddr) {
|
|
|
|
+ list_del(&node->next);
|
|
|
|
+ goto found;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
- read_unlock(&vmlist_lock);
|
|
|
|
|
|
+ node = NULL;
|
|
|
|
+ found:
|
|
|
|
+ spin_unlock(&xenbus_valloc_lock);
|
|
|
|
|
|
- if (!area) {
|
|
|
|
|
|
+ if (!node) {
|
|
xenbus_dev_error(dev, -ENOENT,
|
|
xenbus_dev_error(dev, -ENOENT,
|
|
"can't find mapped virtual address %p", vaddr);
|
|
"can't find mapped virtual address %p", vaddr);
|
|
return GNTST_bad_virt_addr;
|
|
return GNTST_bad_virt_addr;
|
|
}
|
|
}
|
|
|
|
|
|
- op.handle = (grant_handle_t)area->phys_addr;
|
|
|
|
|
|
+ op.handle = node->handle;
|
|
op.host_addr = arbitrary_virt_to_machine(
|
|
op.host_addr = arbitrary_virt_to_machine(
|
|
lookup_address((unsigned long)vaddr, &level)).maddr;
|
|
lookup_address((unsigned long)vaddr, &level)).maddr;
|
|
|
|
|
|
@@ -558,16 +640,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
|
|
BUG();
|
|
BUG();
|
|
|
|
|
|
if (op.status == GNTST_okay)
|
|
if (op.status == GNTST_okay)
|
|
- free_vm_area(area);
|
|
|
|
|
|
+ free_vm_area(node->area);
|
|
else
|
|
else
|
|
xenbus_dev_error(dev, op.status,
|
|
xenbus_dev_error(dev, op.status,
|
|
"unmapping page at handle %d error %d",
|
|
"unmapping page at handle %d error %d",
|
|
- (int16_t)area->phys_addr, op.status);
|
|
|
|
|
|
+ node->handle, op.status);
|
|
|
|
|
|
|
|
+ kfree(node);
|
|
return op.status;
|
|
return op.status;
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
|
|
|
|
|
|
|
|
|
|
+static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
|
|
|
|
+{
|
|
|
|
+ int rv;
|
|
|
|
+ struct xenbus_map_node *node;
|
|
|
|
+ void *addr;
|
|
|
|
+
|
|
|
|
+ spin_lock(&xenbus_valloc_lock);
|
|
|
|
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
|
|
|
|
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
|
|
|
|
+ if (addr == vaddr) {
|
|
|
|
+ list_del(&node->next);
|
|
|
|
+ goto found;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ node = NULL;
|
|
|
|
+ found:
|
|
|
|
+ spin_unlock(&xenbus_valloc_lock);
|
|
|
|
+
|
|
|
|
+ if (!node) {
|
|
|
|
+ xenbus_dev_error(dev, -ENOENT,
|
|
|
|
+ "can't find mapped virtual address %p", vaddr);
|
|
|
|
+ return GNTST_bad_virt_addr;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ rv = xenbus_unmap_ring(dev, node->handle, addr);
|
|
|
|
+
|
|
|
|
+ if (!rv)
|
|
|
|
+ free_xenballooned_pages(1, &node->page);
|
|
|
|
+ else
|
|
|
|
+ WARN(1, "Leaking %p\n", vaddr);
|
|
|
|
+
|
|
|
|
+ kfree(node);
|
|
|
|
+ return rv;
|
|
|
|
+}
|
|
|
|
|
|
/**
|
|
/**
|
|
* xenbus_unmap_ring
|
|
* xenbus_unmap_ring
|
|
@@ -617,3 +733,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
|
|
return result;
|
|
return result;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
|
|
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
|
|
|
|
+
|
|
|
|
+static const struct xenbus_ring_ops ring_ops_pv = {
|
|
|
|
+ .map = xenbus_map_ring_valloc_pv,
|
|
|
|
+ .unmap = xenbus_unmap_ring_vfree_pv,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static const struct xenbus_ring_ops ring_ops_hvm = {
|
|
|
|
+ .map = xenbus_map_ring_valloc_hvm,
|
|
|
|
+ .unmap = xenbus_unmap_ring_vfree_hvm,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+void __init xenbus_ring_ops_init(void)
|
|
|
|
+{
|
|
|
|
+ if (xen_pv_domain())
|
|
|
|
+ ring_ops = &ring_ops_pv;
|
|
|
|
+ else
|
|
|
|
+ ring_ops = &ring_ops_hvm;
|
|
|
|
+}
|