فهرست منبع

Merge branch 'stable/for-linus-fixes-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

* 'stable/for-linus-fixes-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen-gntalloc: signedness bug in add_grefs()
  xen-gntalloc: integer overflow in gntalloc_ioctl_alloc()
  xen-gntdev: integer overflow in gntdev_alloc_map()
  xen:pvhvm: enable PVHVM VCPU placement when using more than 32 CPUs.
  xen/balloon: Avoid OOM when requesting highmem
  xen: Remove hanging references to CONFIG_XEN_PLATFORM_PCI
  xen: map foreign pages for shared rings by updating the PTEs directly
Linus Torvalds 13 سال پیش
والد
کامیت
b684452383

+ 1 - 2
arch/x86/xen/enlighten.c

@@ -1356,7 +1356,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
 	int cpu = (long)hcpu;
 	int cpu = (long)hcpu;
 	switch (action) {
 	switch (action) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE:
-		per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+		xen_vcpu_setup(cpu);
 		if (xen_have_vector_callback)
 		if (xen_have_vector_callback)
 			xen_init_lock_cpu(cpu);
 			xen_init_lock_cpu(cpu);
 		break;
 		break;
@@ -1386,7 +1386,6 @@ static void __init xen_hvm_guest_init(void)
 	xen_hvm_smp_init();
 	xen_hvm_smp_init();
 	register_cpu_notifier(&xen_hvm_cpu_notifier);
 	register_cpu_notifier(&xen_hvm_cpu_notifier);
 	xen_unplug_emulated_devices();
 	xen_unplug_emulated_devices();
-	have_vcpu_info_placement = 0;
 	x86_init.irqs.intr_init = xen_init_IRQ;
 	x86_init.irqs.intr_init = xen_init_IRQ;
 	xen_hvm_init_time_ops();
 	xen_hvm_init_time_ops();
 	xen_hvm_init_mmu_ops();
 	xen_hvm_init_mmu_ops();

+ 1 - 1
arch/x86/xen/grant-table.c

@@ -71,7 +71,7 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
 
 
 	if (shared == NULL) {
 	if (shared == NULL) {
 		struct vm_struct *area =
 		struct vm_struct *area =
-			alloc_vm_area(PAGE_SIZE * max_nr_gframes);
+			alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
 		BUG_ON(area == NULL);
 		BUG_ON(area == NULL);
 		shared = area->addr;
 		shared = area->addr;
 		*__shared = shared;
 		*__shared = shared;

+ 2 - 2
drivers/xen/balloon.c

@@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
  * alloc_xenballooned_pages - get pages that have been ballooned out
  * alloc_xenballooned_pages - get pages that have been ballooned out
  * @nr_pages: Number of pages to get
  * @nr_pages: Number of pages to get
  * @pages: pages returned
  * @pages: pages returned
- * @highmem: highmem or lowmem pages
+ * @highmem: allow highmem pages
  * @return 0 on success, error otherwise
  * @return 0 on success, error otherwise
  */
  */
 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
@@ -511,7 +511,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
 	mutex_lock(&balloon_mutex);
 	mutex_lock(&balloon_mutex);
 	while (pgno < nr_pages) {
 	while (pgno < nr_pages) {
 		page = balloon_retrieve(highmem);
 		page = balloon_retrieve(highmem);
-		if (page && PageHighMem(page) == highmem) {
+		if (page && (highmem || !PageHighMem(page))) {
 			pages[pgno++] = page;
 			pages[pgno++] = page;
 		} else {
 		} else {
 			enum bp_state st;
 			enum bp_state st;

+ 2 - 2
drivers/xen/gntalloc.c

@@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
 		/* Grant foreign access to the page. */
 		/* Grant foreign access to the page. */
 		gref->gref_id = gnttab_grant_foreign_access(op->domid,
 		gref->gref_id = gnttab_grant_foreign_access(op->domid,
 			pfn_to_mfn(page_to_pfn(gref->page)), readonly);
 			pfn_to_mfn(page_to_pfn(gref->page)), readonly);
-		if (gref->gref_id < 0) {
+		if ((int)gref->gref_id < 0) {
 			rc = gref->gref_id;
 			rc = gref->gref_id;
 			goto undo;
 			goto undo;
 		}
 		}
@@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
 		goto out;
 		goto out;
 	}
 	}
 
 
-	gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+	gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
 	if (!gref_ids) {
 	if (!gref_ids) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto out;
 		goto out;

+ 5 - 5
drivers/xen/gntdev.c

@@ -114,11 +114,11 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 	if (NULL == add)
 	if (NULL == add)
 		return NULL;
 		return NULL;
 
 
-	add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
-	add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
-	add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
-	add->kmap_ops  = kzalloc(sizeof(add->kmap_ops[0])  * count, GFP_KERNEL);
-	add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
+	add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
+	add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
+	add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
+	add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
+	add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 	if (NULL == add->grants    ||
 	if (NULL == add->grants    ||
 	    NULL == add->map_ops   ||
 	    NULL == add->map_ops   ||
 	    NULL == add->unmap_ops ||
 	    NULL == add->unmap_ops ||

+ 8 - 3
drivers/xen/xenbus/xenbus_client.c

@@ -35,6 +35,7 @@
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <linux/export.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypervisor.h>
+#include <asm/xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
 #include <xen/interface/event_channel.h>
 #include <xen/events.h>
 #include <xen/events.h>
@@ -436,19 +437,20 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
 {
 {
 	struct gnttab_map_grant_ref op = {
 	struct gnttab_map_grant_ref op = {
-		.flags = GNTMAP_host_map,
+		.flags = GNTMAP_host_map | GNTMAP_contains_pte,
 		.ref   = gnt_ref,
 		.ref   = gnt_ref,
 		.dom   = dev->otherend_id,
 		.dom   = dev->otherend_id,
 	};
 	};
 	struct vm_struct *area;
 	struct vm_struct *area;
+	pte_t *pte;
 
 
 	*vaddr = NULL;
 	*vaddr = NULL;
 
 
-	area = alloc_vm_area(PAGE_SIZE);
+	area = alloc_vm_area(PAGE_SIZE, &pte);
 	if (!area)
 	if (!area)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	op.host_addr = (unsigned long)area->addr;
+	op.host_addr = arbitrary_virt_to_machine(pte).maddr;
 
 
 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
 		BUG();
 		BUG();
@@ -527,6 +529,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 	struct gnttab_unmap_grant_ref op = {
 	struct gnttab_unmap_grant_ref op = {
 		.host_addr = (unsigned long)vaddr,
 		.host_addr = (unsigned long)vaddr,
 	};
 	};
+	unsigned int level;
 
 
 	/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
 	/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
 	 * method so that we don't have to muck with vmalloc internals here.
 	 * method so that we don't have to muck with vmalloc internals here.
@@ -548,6 +551,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 	}
 	}
 
 
 	op.handle = (grant_handle_t)area->phys_addr;
 	op.handle = (grant_handle_t)area->phys_addr;
+	op.host_addr = arbitrary_virt_to_machine(
+		lookup_address((unsigned long)vaddr, &level)).maddr;
 
 
 	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
 	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
 		BUG();
 		BUG();

+ 1 - 1
include/linux/vmalloc.h

@@ -119,7 +119,7 @@ unmap_kernel_range(unsigned long addr, unsigned long size)
 #endif
 #endif
 
 
 /* Allocate/destroy a 'vmalloc' VM area. */
 /* Allocate/destroy a 'vmalloc' VM area. */
-extern struct vm_struct *alloc_vm_area(size_t size);
+extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
 extern void free_vm_area(struct vm_struct *area);
 extern void free_vm_area(struct vm_struct *area);
 
 
 /* for /dev/kmem */
 /* for /dev/kmem */

+ 2 - 4
include/xen/platform_pci.h

@@ -29,8 +29,7 @@
 static inline int xen_must_unplug_nics(void) {
 static inline int xen_must_unplug_nics(void) {
 #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
 #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
 		defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \
 		defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \
-		(defined(CONFIG_XEN_PLATFORM_PCI) || \
-		 defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
+		defined(CONFIG_XEN_PVHVM)
         return 1;
         return 1;
 #else
 #else
         return 0;
         return 0;
@@ -40,8 +39,7 @@ static inline int xen_must_unplug_nics(void) {
 static inline int xen_must_unplug_disks(void) {
 static inline int xen_must_unplug_disks(void) {
 #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \
 #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \
 		defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \
 		defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \
-		(defined(CONFIG_XEN_PLATFORM_PCI) || \
-		 defined(CONFIG_XEN_PLATFORM_PCI_MODULE))
+		defined(CONFIG_XEN_PVHVM)
         return 1;
         return 1;
 #else
 #else
         return 0;
         return 0;

+ 1 - 1
mm/nommu.c

@@ -454,7 +454,7 @@ void  __attribute__((weak)) vmalloc_sync_all(void)
  *	between processes, it syncs the pagetable across all
  *	between processes, it syncs the pagetable across all
  *	processes.
  *	processes.
  */
  */
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 {
 {
 	BUG();
 	BUG();
 	return NULL;
 	return NULL;

+ 13 - 14
mm/vmalloc.c

@@ -2141,23 +2141,30 @@ void  __attribute__((weak)) vmalloc_sync_all(void)
 
 
 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
 {
 {
-	/* apply_to_page_range() does all the hard work. */
+	pte_t ***p = data;
+
+	if (p) {
+		*(*p) = pte;
+		(*p)++;
+	}
 	return 0;
 	return 0;
 }
 }
 
 
 /**
 /**
  *	alloc_vm_area - allocate a range of kernel address space
  *	alloc_vm_area - allocate a range of kernel address space
  *	@size:		size of the area
  *	@size:		size of the area
+ *	@ptes:		returns the PTEs for the address space
  *
  *
  *	Returns:	NULL on failure, vm_struct on success
  *	Returns:	NULL on failure, vm_struct on success
  *
  *
  *	This function reserves a range of kernel address space, and
  *	This function reserves a range of kernel address space, and
  *	allocates pagetables to map that range.  No actual mappings
  *	allocates pagetables to map that range.  No actual mappings
- *	are created.  If the kernel address space is not shared
- *	between processes, it syncs the pagetable across all
- *	processes.
+ *	are created.
+ *
+ *	If @ptes is non-NULL, pointers to the PTEs (in init_mm)
+ *	allocated for the VM area are returned.
  */
  */
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 {
 {
 	struct vm_struct *area;
 	struct vm_struct *area;
 
 
@@ -2171,19 +2178,11 @@ struct vm_struct *alloc_vm_area(size_t size)
 	 * of kernel virtual address space and mapped into init_mm.
 	 * of kernel virtual address space and mapped into init_mm.
 	 */
 	 */
 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
-				area->size, f, NULL)) {
+				size, f, ptes ? &ptes : NULL)) {
 		free_vm_area(area);
 		free_vm_area(area);
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	/*
-	 * If the allocated address space is passed to a hypercall
-	 * before being used then we cannot rely on a page fault to
-	 * trigger an update of the page tables.  So sync all the page
-	 * tables here.
-	 */
-	vmalloc_sync_all();
-
 	return area;
 	return area;
 }
 }
 EXPORT_SYMBOL_GPL(alloc_vm_area);
 EXPORT_SYMBOL_GPL(alloc_vm_area);