Browse Source

Merge remote branch 'origin/master' of ../linux-2.6 into drm-next

This was a non-trivial merge with some patches sent to Linus
in drm-fixes.

Conflicts:
	drivers/gpu/drm/radeon/r300.c
	drivers/gpu/drm/radeon/radeon_asic.h
	drivers/gpu/drm/radeon/rs600.c
	drivers/gpu/drm/radeon/rs690.c
	drivers/gpu/drm/radeon/rv515.c
Dave Airlie 15 years ago
parent
commit
cc84ef3fd2
66 changed files with 578 additions and 353 deletions
  1. 1 1
      Makefile
  2. 3 1
      arch/ia64/kernel/dma-mapping.c
  3. 5 3
      arch/ia64/lib/ip_fast_csum.S
  4. 3 3
      arch/powerpc/kernel/power7-pmu.c
  5. 0 1
      arch/powerpc/sysdev/xilinx_intc.c
  6. 1 1
      arch/sparc/kernel/irq_64.c
  7. 1 1
      arch/sparc/kernel/nmi.c
  8. 1 1
      arch/sparc/prom/misc_64.c
  9. 3 4
      arch/sparc/prom/printf.c
  10. 1 1
      block/blk-sysfs.c
  11. 9 2
      crypto/algapi.c
  12. 1 2
      drivers/char/n_tty.c
  13. 1 9
      drivers/char/pty.c
  14. 7 88
      drivers/cpufreq/cpufreq.c
  15. 2 2
      drivers/firewire/core-iso.c
  16. 14 0
      drivers/firewire/ohci.c
  17. 4 4
      drivers/firewire/sbp2.c
  18. 1 0
      drivers/ide/ide-cs.c
  19. 35 0
      drivers/input/keyboard/atkbd.c
  20. 8 0
      drivers/input/serio/i8042-x86ia64io.h
  21. 13 0
      drivers/md/dm-exception-store.c
  22. 4 0
      drivers/md/dm-exception-store.h
  23. 24 15
      drivers/md/dm-log-userspace-base.c
  24. 4 2
      drivers/md/dm-log-userspace-transfer.c
  25. 1 1
      drivers/md/dm-log-userspace-transfer.h
  26. 7 1
      drivers/md/dm-raid1.c
  27. 53 35
      drivers/md/dm-snap-persistent.c
  28. 21 2
      drivers/md/dm-snap.c
  29. 12 1
      drivers/md/dm-stripe.c
  30. 33 18
      drivers/md/dm-table.c
  31. 10 5
      drivers/md/dm.c
  32. 1 1
      drivers/mtd/devices/m25p80.c
  33. 9 6
      drivers/mtd/nftlcore.c
  34. 1 0
      drivers/net/gianfar.c
  35. 67 53
      drivers/net/wireless/ipw2x00/ipw2200.c
  36. 23 0
      drivers/pci/iov.c
  37. 13 0
      drivers/pci/pci.h
  38. 2 2
      drivers/pci/setup-bus.c
  39. 4 4
      drivers/pci/setup-res.c
  40. 4 13
      fs/compat.c
  41. 38 25
      fs/exec.c
  42. 4 0
      fs/ext2/namei.c
  43. 10 0
      fs/jffs2/wbuf.c
  44. 15 7
      fs/namei.c
  45. 1 1
      fs/nilfs2/btnode.c
  46. 2 2
      fs/ocfs2/aops.c
  47. 11 0
      fs/ocfs2/dcache.c
  48. 1 1
      fs/xfs/linux-2.6/xfs_ioctl32.c
  49. 1 0
      include/crypto/algapi.h
  50. 2 2
      include/crypto/internal/skcipher.h
  51. 1 0
      include/linux/binfmts.h
  52. 4 0
      include/linux/device-mapper.h
  53. 12 1
      include/linux/dm-log-userspace.h
  54. 15 0
      include/linux/workqueue.h
  55. 2 2
      include/net/pkt_sched.h
  56. 2 1
      kernel/perf_counter.c
  57. 1 2
      mm/nommu.c
  58. 4 2
      mm/page_alloc.c
  59. 14 1
      mm/percpu.c
  60. 2 2
      mm/slub.c
  61. 1 1
      net/core/sock.c
  62. 7 5
      net/sched/sch_api.c
  63. 11 14
      net/sched/sch_cbq.c
  64. 5 1
      security/integrity/ima/ima_main.c
  65. 3 0
      sound/pci/oxygen/oxygen_lib.c
  66. 2 0
      sound/pci/oxygen/oxygen_pcm.c

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 31
 SUBLEVEL = 31
-EXTRAVERSION = -rc8
+EXTRAVERSION = -rc9
 NAME = Man-Eating Seals of Antiquity
 NAME = Man-Eating Seals of Antiquity
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 3 - 1
arch/ia64/kernel/dma-mapping.c

@@ -10,7 +10,9 @@ EXPORT_SYMBOL(dma_ops);
 
 
 static int __init dma_init(void)
 static int __init dma_init(void)
 {
 {
-       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+	return 0;
 }
 }
 fs_initcall(dma_init);
 fs_initcall(dma_init);
 
 

+ 5 - 3
arch/ia64/lib/ip_fast_csum.S

@@ -96,20 +96,22 @@ END(ip_fast_csum)
 GLOBAL_ENTRY(csum_ipv6_magic)
 GLOBAL_ENTRY(csum_ipv6_magic)
 	ld4	r20=[in0],4
 	ld4	r20=[in0],4
 	ld4	r21=[in1],4
 	ld4	r21=[in1],4
-	dep	r15=in3,in2,32,16
+	zxt4	in2=in2
 	;;
 	;;
 	ld4	r22=[in0],4
 	ld4	r22=[in0],4
 	ld4	r23=[in1],4
 	ld4	r23=[in1],4
-	mux1	r15=r15,@rev
+	dep	r15=in3,in2,32,16
 	;;
 	;;
 	ld4	r24=[in0],4
 	ld4	r24=[in0],4
 	ld4	r25=[in1],4
 	ld4	r25=[in1],4
-	shr.u	r15=r15,16
+	mux1	r15=r15,@rev
 	add	r16=r20,r21
 	add	r16=r20,r21
 	add	r17=r22,r23
 	add	r17=r22,r23
+	zxt4	in4=in4
 	;;
 	;;
 	ld4	r26=[in0],4
 	ld4	r26=[in0],4
 	ld4	r27=[in1],4
 	ld4	r27=[in1],4
+	shr.u	r15=r15,16
 	add	r18=r24,r25
 	add	r18=r24,r25
 	add	r8=r16,r17
 	add	r8=r16,r17
 	;;
 	;;

+ 3 - 3
arch/powerpc/kernel/power7-pmu.c

@@ -317,7 +317,7 @@ static int power7_generic_events[] = {
  */
  */
 static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
 static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
 	[C(L1D)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
 	[C(L1D)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
-		[C(OP_READ)] = {	0x400f0,	0xc880	},
+		[C(OP_READ)] = {	0xc880,		0x400f0	},
 		[C(OP_WRITE)] = {	0,		0x300f0	},
 		[C(OP_WRITE)] = {	0,		0x300f0	},
 		[C(OP_PREFETCH)] = {	0xd8b8,		0	},
 		[C(OP_PREFETCH)] = {	0xd8b8,		0	},
 	},
 	},
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
 		[C(OP_PREFETCH)] = {	0x408a,		0	},
 		[C(OP_PREFETCH)] = {	0x408a,		0	},
 	},
 	},
 	[C(LL)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
 	[C(LL)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
-		[C(OP_READ)] = {	0x6080,		0x6084	},
-		[C(OP_WRITE)] = {	0x6082,		0x6086	},
+		[C(OP_READ)] = {	0x16080,	0x26080	},
+		[C(OP_WRITE)] = {	0x16082,	0x26082	},
 		[C(OP_PREFETCH)] = {	0,		0	},
 		[C(OP_PREFETCH)] = {	0,		0	},
 	},
 	},
 	[C(DTLB)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
 	[C(DTLB)] = {		/* 	RESULT_ACCESS	RESULT_MISS */

+ 0 - 1
arch/powerpc/sysdev/xilinx_intc.c

@@ -234,7 +234,6 @@ static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
 		generic_handle_irq(cascade_irq);
 		generic_handle_irq(cascade_irq);
 
 
 	/* Let xilinx_intc end the interrupt */
 	/* Let xilinx_intc end the interrupt */
-	desc->chip->ack(irq);
 	desc->chip->unmask(irq);
 	desc->chip->unmask(irq);
 }
 }
 
 

+ 1 - 1
arch/sparc/kernel/irq_64.c

@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void)
  * Therefore you cannot make any OBP calls, not even prom_printf,
  * Therefore you cannot make any OBP calls, not even prom_printf,
  * from these two routines.
  * from these two routines.
  */
  */
-static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
+static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
 {
 {
 	unsigned long num_entries = (qmask + 1) / 64;
 	unsigned long num_entries = (qmask + 1) / 64;
 	unsigned long status;
 	unsigned long status;

+ 1 - 1
arch/sparc/kernel/nmi.c

@@ -103,7 +103,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
 	}
 	}
 	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
 	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
 		local_inc(&__get_cpu_var(alert_counter));
 		local_inc(&__get_cpu_var(alert_counter));
-		if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+		if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
 			die_nmi("BUG: NMI Watchdog detected LOCKUP",
 			die_nmi("BUG: NMI Watchdog detected LOCKUP",
 				regs, panic_on_timeout);
 				regs, panic_on_timeout);
 	} else {
 	} else {

+ 1 - 1
arch/sparc/prom/misc_64.c

@@ -88,7 +88,7 @@ void prom_cmdline(void)
 /* Drop into the prom, but completely terminate the program.
 /* Drop into the prom, but completely terminate the program.
  * No chance of continuing.
  * No chance of continuing.
  */
  */
-void prom_halt(void)
+void notrace prom_halt(void)
 {
 {
 #ifdef CONFIG_SUN_LDOMS
 #ifdef CONFIG_SUN_LDOMS
 	if (ldom_domaining_enabled)
 	if (ldom_domaining_enabled)

+ 3 - 4
arch/sparc/prom/printf.c

@@ -14,14 +14,14 @@
  */
  */
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 
 
 #include <asm/openprom.h>
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 #include <asm/oplib.h>
 
 
 static char ppbuf[1024];
 static char ppbuf[1024];
 
 
-void
-prom_write(const char *buf, unsigned int n)
+void notrace prom_write(const char *buf, unsigned int n)
 {
 {
 	char ch;
 	char ch;
 
 
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n)
 	}
 	}
 }
 }
 
 
-void
-prom_printf(const char *fmt, ...)
+void notrace prom_printf(const char *fmt, ...)
 {
 {
 	va_list args;
 	va_list args;
 	int i;
 	int i;

+ 1 - 1
block/blk-sysfs.c

@@ -133,7 +133,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
-	blk_queue_max_sectors(q, max_sectors_kb << 1);
+	q->limits.max_sectors = max_sectors_kb << 1;
 	spin_unlock_irq(q->queue_lock);
 	spin_unlock_irq(q->queue_lock);
 
 
 	return ret;
 	return ret;

+ 9 - 2
crypto/algapi.c

@@ -692,7 +692,7 @@ out:
 }
 }
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 
 
-struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
 {
 {
 	struct list_head *request;
 	struct list_head *request;
 
 
@@ -707,7 +707,14 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
 	request = queue->list.next;
 	request = queue->list.next;
 	list_del(request);
 	list_del(request);
 
 
-	return list_entry(request, struct crypto_async_request, list);
+	return (char *)list_entry(request, struct crypto_async_request, list) -
+	       offset;
+}
+EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
+
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+{
+	return __crypto_dequeue_request(queue, 0);
 }
 }
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 
 

+ 1 - 2
drivers/char/n_tty.c

@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
 			if (space < 2)
 			if (space < 2)
 				return -1;
 				return -1;
 			tty->canon_column = tty->column = 0;
 			tty->canon_column = tty->column = 0;
-			tty_put_char(tty, '\r');
-			tty_put_char(tty, c);
+			tty->ops->write(tty, "\r\n", 2);
 			return 2;
 			return 2;
 		}
 		}
 		tty->canon_column = tty->column;
 		tty->canon_column = tty->column;

+ 1 - 9
drivers/char/pty.c

@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to)
  *	the other side of the pty/tty pair.
  *	the other side of the pty/tty pair.
  */
  */
 
 
-static int pty_write(struct tty_struct *tty, const unsigned char *buf,
-								int count)
+static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
 {
 {
 	struct tty_struct *to = tty->link;
 	struct tty_struct *to = tty->link;
-	int c;
 
 
 	if (tty->stopped)
 	if (tty->stopped)
 		return 0;
 		return 0;
 
 
-	/* This isn't locked but our 8K is quite sloppy so no
-	   big deal */
-
-	c = pty_space(to);
-	if (c > count)
-		c = count;
 	if (c > 0) {
 	if (c > 0) {
 		/* Stuff the data into the input queue of the other end */
 		/* Stuff the data into the input queue of the other end */
 		c = tty_insert_flip_string(to, buf, c);
 		c = tty_insert_flip_string(to, buf, c);

+ 7 - 88
drivers/cpufreq/cpufreq.c

@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
-#ifdef __powerpc__
 	int cpu = sysdev->id;
 	int cpu = sysdev->id;
-	unsigned int cur_freq = 0;
 	struct cpufreq_policy *cpu_policy;
 	struct cpufreq_policy *cpu_policy;
 
 
 	dprintk("suspending cpu %u\n", cpu);
 	dprintk("suspending cpu %u\n", cpu);
 
 
-	/*
-	 * This whole bogosity is here because Powerbooks are made of fail.
-	 * No sane platform should need any of the code below to be run.
-	 * (it's entirely the wrong thing to do, as driver->get may
-	 *  reenable interrupts on some architectures).
-	 */
-
 	if (!cpu_online(cpu))
 	if (!cpu_online(cpu))
 		return 0;
 		return 0;
 
 
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 
 
 	if (cpufreq_driver->suspend) {
 	if (cpufreq_driver->suspend) {
 		ret = cpufreq_driver->suspend(cpu_policy, pmsg);
 		ret = cpufreq_driver->suspend(cpu_policy, pmsg);
-		if (ret) {
+		if (ret)
 			printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
 			printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
 					"step on CPU %u\n", cpu_policy->cpu);
 					"step on CPU %u\n", cpu_policy->cpu);
-			goto out;
-		}
-	}
-
-	if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
-		goto out;
-
-	if (cpufreq_driver->get)
-		cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
-	if (!cur_freq || !cpu_policy->cur) {
-		printk(KERN_ERR "cpufreq: suspend failed to assert current "
-		       "frequency is what timing core thinks it is.\n");
-		goto out;
-	}
-
-	if (unlikely(cur_freq != cpu_policy->cur)) {
-		struct cpufreq_freqs freqs;
-
-		if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
-			dprintk("Warning: CPU frequency is %u, "
-			       "cpufreq assumed %u kHz.\n",
-			       cur_freq, cpu_policy->cur);
-
-		freqs.cpu = cpu;
-		freqs.old = cpu_policy->cur;
-		freqs.new = cur_freq;
-
-		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
-				    CPUFREQ_SUSPENDCHANGE, &freqs);
-		adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
-
-		cpu_policy->cur = cur_freq;
 	}
 	}
 
 
 out:
 out:
 	cpufreq_cpu_put(cpu_policy);
 	cpufreq_cpu_put(cpu_policy);
-#endif	/* __powerpc__ */
 	return ret;
 	return ret;
 }
 }
 
 
@@ -1330,24 +1287,21 @@ out:
  *	cpufreq_resume -  restore proper CPU frequency handling after resume
  *	cpufreq_resume -  restore proper CPU frequency handling after resume
  *
  *
  *	1.) resume CPUfreq hardware support (cpufreq_driver->resume())
  *	1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- *	2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
- *	3.) schedule call cpufreq_update_policy() ASAP as interrupts are
- *	    restored.
+ *	2.) schedule call cpufreq_update_policy() ASAP as interrupts are
+ *	    restored. It will verify that the current freq is in sync with
+ *	    what we believe it to be. This is a bit later than when it
+ *	    should be, but nonethteless it's better than calling
+ *	    cpufreq_driver->get() here which might re-enable interrupts...
  */
  */
 static int cpufreq_resume(struct sys_device *sysdev)
 static int cpufreq_resume(struct sys_device *sysdev)
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
-#ifdef __powerpc__
 	int cpu = sysdev->id;
 	int cpu = sysdev->id;
 	struct cpufreq_policy *cpu_policy;
 	struct cpufreq_policy *cpu_policy;
 
 
 	dprintk("resuming cpu %u\n", cpu);
 	dprintk("resuming cpu %u\n", cpu);
 
 
-	/* As with the ->suspend method, all the code below is
-	 * only necessary because Powerbooks suck.
-	 * See commit 42d4dc3f4e1e for jokes. */
-
 	if (!cpu_online(cpu))
 	if (!cpu_online(cpu))
 		return 0;
 		return 0;
 
 
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev)
 		}
 		}
 	}
 	}
 
 
-	if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
-		unsigned int cur_freq = 0;
-
-		if (cpufreq_driver->get)
-			cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
-		if (!cur_freq || !cpu_policy->cur) {
-			printk(KERN_ERR "cpufreq: resume failed to assert "
-					"current frequency is what timing core "
-					"thinks it is.\n");
-			goto out;
-		}
-
-		if (unlikely(cur_freq != cpu_policy->cur)) {
-			struct cpufreq_freqs freqs;
-
-			if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
-				dprintk("Warning: CPU frequency "
-				       "is %u, cpufreq assumed %u kHz.\n",
-				       cur_freq, cpu_policy->cur);
-
-			freqs.cpu = cpu;
-			freqs.old = cpu_policy->cur;
-			freqs.new = cur_freq;
-
-			srcu_notifier_call_chain(
-					&cpufreq_transition_notifier_list,
-					CPUFREQ_RESUMECHANGE, &freqs);
-			adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
-
-			cpu_policy->cur = cur_freq;
-		}
-	}
-
-out:
 	schedule_work(&cpu_policy->update);
 	schedule_work(&cpu_policy->update);
+
 fail:
 fail:
 	cpufreq_cpu_put(cpu_policy);
 	cpufreq_cpu_put(cpu_policy);
-#endif	/* __powerpc__ */
 	return ret;
 	return ret;
 }
 }
 
 

+ 2 - 2
drivers/firewire/core-iso.c

@@ -196,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
 		switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
 		switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
 				irm_id, generation, SCODE_100,
 				irm_id, generation, SCODE_100,
 				CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
 				CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
-				data, sizeof(data))) {
+				data, 8)) {
 		case RCODE_GENERATION:
 		case RCODE_GENERATION:
 			/* A generation change frees all bandwidth. */
 			/* A generation change frees all bandwidth. */
 			return allocate ? -EAGAIN : bandwidth;
 			return allocate ? -EAGAIN : bandwidth;
@@ -233,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
 		data[1] = old ^ c;
 		data[1] = old ^ c;
 		switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
 		switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
 					   irm_id, generation, SCODE_100,
 					   irm_id, generation, SCODE_100,
-					   offset, data, sizeof(data))) {
+					   offset, data, 8)) {
 		case RCODE_GENERATION:
 		case RCODE_GENERATION:
 			/* A generation change frees all channels. */
 			/* A generation change frees all channels. */
 			return allocate ? -EAGAIN : i;
 			return allocate ? -EAGAIN : i;

+ 14 - 0
drivers/firewire/ohci.c

@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
+#include <linux/pci_ids.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
@@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev)
 #define ohci_pmac_off(dev)
 #define ohci_pmac_off(dev)
 #endif /* CONFIG_PPC_PMAC */
 #endif /* CONFIG_PPC_PMAC */
 
 
+#define PCI_VENDOR_ID_AGERE		PCI_VENDOR_ID_ATT
+#define PCI_DEVICE_ID_AGERE_FW643	0x5901
+
 static int __devinit pci_probe(struct pci_dev *dev,
 static int __devinit pci_probe(struct pci_dev *dev,
 			       const struct pci_device_id *ent)
 			       const struct pci_device_id *ent)
 {
 {
@@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev,
 	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
 	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
 	ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
 	ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
 
 
+	/* dual-buffer mode is broken if more than one IR context is active */
+	if (dev->vendor == PCI_VENDOR_ID_AGERE &&
+	    dev->device == PCI_DEVICE_ID_AGERE_FW643)
+		ohci->use_dualbuffer = false;
+
+	/* dual-buffer mode is broken */
+	if (dev->vendor == PCI_VENDOR_ID_RICOH &&
+	    dev->device == PCI_DEVICE_ID_RICOH_R5C832)
+		ohci->use_dualbuffer = false;
+
 /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
 /* x86-32 currently doesn't use highmem for dma_alloc_coherent */
 #if !defined(CONFIG_X86_32)
 #if !defined(CONFIG_X86_32)
 	/* dual-buffer mode is broken with descriptor addresses above 2G */
 	/* dual-buffer mode is broken with descriptor addresses above 2G */

+ 4 - 4
drivers/firewire/sbp2.c

@@ -456,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
 	}
 	}
 	spin_unlock_irqrestore(&card->lock, flags);
 	spin_unlock_irqrestore(&card->lock, flags);
 
 
-	if (&orb->link != &lu->orb_list)
+	if (&orb->link != &lu->orb_list) {
 		orb->callback(orb, &status);
 		orb->callback(orb, &status);
-	else
+		kref_put(&orb->kref, free_orb);
+	} else {
 		fw_error("status write for unknown orb\n");
 		fw_error("status write for unknown orb\n");
-
-	kref_put(&orb->kref, free_orb);
+	}
 
 
 	fw_send_response(card, request, RCODE_COMPLETE);
 	fw_send_response(card, request, RCODE_COMPLETE);
 }
 }

+ 1 - 0
drivers/ide/ide-cs.c

@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = {
 	PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
 	PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
 	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
 	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
 	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
 	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+	PCMCIA_DEVICE_PROD_ID12("CNF   ", "CD-ROM", 0x46d7db81, 0x66536591),
 	PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
 	PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
 	PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
 	PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
 	PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
 	PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),

+ 35 - 0
drivers/input/keyboard/atkbd.c

@@ -879,6 +879,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
 	0xae, 0xb0, -1U
 	0xae, 0xb0, -1U
 };
 };
 
 
+/*
+ * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
+ * release for their volume buttons
+ */
+static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
+	0xae, 0xb0, -1U
+};
+
 /*
 /*
  * Samsung NC10,NC20 with Fn+F? key release not working
  * Samsung NC10,NC20 with Fn+F? key release not working
  */
  */
@@ -1536,6 +1544,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
 		.callback = atkbd_setup_forced_release,
 		.callback = atkbd_setup_forced_release,
 		.driver_data = atkbd_hp_zv6100_forced_release_keys,
 		.driver_data = atkbd_hp_zv6100_forced_release_keys,
 	},
 	},
+	{
+		.ident = "HP Presario R4000",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
+		},
+		.callback = atkbd_setup_forced_release,
+		.driver_data = atkbd_hp_r4000_forced_release_keys,
+	},
+	{
+		.ident = "HP Presario R4100",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
+		},
+		.callback = atkbd_setup_forced_release,
+		.driver_data = atkbd_hp_r4000_forced_release_keys,
+	},
+	{
+		.ident = "HP Presario R4200",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
+		},
+		.callback = atkbd_setup_forced_release,
+		.driver_data = atkbd_hp_r4000_forced_release_keys,
+	},
 	{
 	{
 		.ident = "Inventec Symphony",
 		.ident = "Inventec Symphony",
 		.matches = {
 		.matches = {

+ 8 - 0
drivers/input/serio/i8042-x86ia64io.h

@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
 		},
 		},
 	},
 	},
+	{
+		.ident = "Acer Aspire 5536",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+		},
+	},
 	{ }
 	{ }
 };
 };
 
 

+ 13 - 0
drivers/md/dm-exception-store.c

@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store,
 	 */
 	 */
 	chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
 	chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
 
 
+	return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
+						 error);
+}
+
+int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+				      unsigned long chunk_size_ulong,
+				      char **error)
+{
 	/* Check chunk_size is a power of 2 */
 	/* Check chunk_size is a power of 2 */
 	if (!is_power_of_2(chunk_size_ulong)) {
 	if (!is_power_of_2(chunk_size_ulong)) {
 		*error = "Chunk size is not a power of 2";
 		*error = "Chunk size is not a power of 2";
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
+		*error = "Chunk size is too high";
+		return -EINVAL;
+	}
+
 	store->chunk_size = chunk_size_ulong;
 	store->chunk_size = chunk_size_ulong;
 	store->chunk_mask = chunk_size_ulong - 1;
 	store->chunk_mask = chunk_size_ulong - 1;
 	store->chunk_shift = ffs(chunk_size_ulong) - 1;
 	store->chunk_shift = ffs(chunk_size_ulong) - 1;

+ 4 - 0
drivers/md/dm-exception-store.h

@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
 int dm_exception_store_type_register(struct dm_exception_store_type *type);
 int dm_exception_store_type_register(struct dm_exception_store_type *type);
 int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
 int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
 
 
+int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+				      unsigned long chunk_size_ulong,
+				      char **error);
+
 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
 			      unsigned *args_used,
 			      unsigned *args_used,
 			      struct dm_exception_store **store);
 			      struct dm_exception_store **store);

+ 24 - 15
drivers/md/dm-log-userspace-base.c

@@ -21,6 +21,7 @@ struct log_c {
 	struct dm_target *ti;
 	struct dm_target *ti;
 	uint32_t region_size;
 	uint32_t region_size;
 	region_t region_count;
 	region_t region_count;
+	uint64_t luid;
 	char uuid[DM_UUID_LEN];
 	char uuid[DM_UUID_LEN];
 
 
 	char *usr_argv_str;
 	char *usr_argv_str;
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
 	 * restored.
 	 * restored.
 	 */
 	 */
 retry:
 retry:
-	r = dm_consult_userspace(uuid, request_type, data,
+	r = dm_consult_userspace(uuid, lc->luid, request_type, data,
 				 data_size, rdata, rdata_size);
 				 data_size, rdata, rdata_size);
 
 
 	if (r != -ESRCH)
 	if (r != -ESRCH)
@@ -74,14 +75,15 @@ retry:
 		set_current_state(TASK_INTERRUPTIBLE);
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule_timeout(2*HZ);
 		schedule_timeout(2*HZ);
 		DMWARN("Attempting to contact userspace log server...");
 		DMWARN("Attempting to contact userspace log server...");
-		r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str,
+		r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
+					 lc->usr_argv_str,
 					 strlen(lc->usr_argv_str) + 1,
 					 strlen(lc->usr_argv_str) + 1,
 					 NULL, NULL);
 					 NULL, NULL);
 		if (!r)
 		if (!r)
 			break;
 			break;
 	}
 	}
 	DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
 	DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
-	r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL,
+	r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
 				 0, NULL, NULL);
 				 0, NULL, NULL);
 	if (!r)
 	if (!r)
 		goto retry;
 		goto retry;
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	for (i = 0, str_size = 0; i < argc; i++)
-		str_size += sprintf(str + str_size, "%s ", argv[i]);
-	str_size += sprintf(str + str_size, "%llu",
-			    (unsigned long long)ti->len);
+	str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
+	for (i = 0; i < argc; i++)
+		str_size += sprintf(str + str_size, " %s", argv[i]);
 
 
 	*ctr_str = str;
 	*ctr_str = str;
 	return str_size;
 	return str_size;
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
+	/* The ptr value is sufficient for local unique id */
+	lc->luid = (uint64_t)lc;
+
 	lc->ti = ti;
 	lc->ti = ti;
 
 
 	if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
 	if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 	}
 	}
 
 
 	/* Send table string */
 	/* Send table string */
-	r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR,
+	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
 				 ctr_str, str_size, NULL, NULL);
 				 ctr_str, str_size, NULL, NULL);
 
 
 	if (r == -ESRCH) {
 	if (r == -ESRCH) {
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 
 
 	/* Since the region size does not change, get it now */
 	/* Since the region size does not change, get it now */
 	rdata_size = sizeof(rdata);
 	rdata_size = sizeof(rdata);
-	r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE,
+	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
 				 NULL, 0, (char *)&rdata, &rdata_size);
 				 NULL, 0, (char *)&rdata, &rdata_size);
 
 
 	if (r) {
 	if (r) {
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
 	int r;
 	int r;
 	struct log_c *lc = log->context;
 	struct log_c *lc = log->context;
 
 
-	r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR,
+	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
 				 NULL, 0,
 				 NULL, 0,
 				 NULL, NULL);
 				 NULL, NULL);
 
 
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)
 	int r;
 	int r;
 	struct log_c *lc = log->context;
 	struct log_c *lc = log->context;
 
 
-	r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND,
+	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
 				 NULL, 0,
 				 NULL, 0,
 				 NULL, NULL);
 				 NULL, NULL);
 
 
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log)
 	int r;
 	int r;
 	struct log_c *lc = log->context;
 	struct log_c *lc = log->context;
 
 
-	r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND,
+	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
 				 NULL, 0,
 				 NULL, 0,
 				 NULL, NULL);
 				 NULL, NULL);
 
 
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log)
 	struct log_c *lc = log->context;
 	struct log_c *lc = log->context;
 
 
 	lc->in_sync_hint = 0;
 	lc->in_sync_hint = 0;
-	r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME,
+	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
 				 NULL, 0,
 				 NULL, 0,
 				 NULL, NULL);
 				 NULL, NULL);
 
 
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
 			    char *result, unsigned maxlen)
 			    char *result, unsigned maxlen)
 {
 {
 	int r = 0;
 	int r = 0;
+	char *table_args;
 	size_t sz = (size_t)maxlen;
 	size_t sz = (size_t)maxlen;
 	struct log_c *lc = log->context;
 	struct log_c *lc = log->context;
 
 
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
 		break;
 		break;
 	case STATUSTYPE_TABLE:
 	case STATUSTYPE_TABLE:
 		sz = 0;
 		sz = 0;
-		DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1,
-		       lc->uuid, lc->usr_argv_str);
+		table_args = strstr(lc->usr_argv_str, " ");
+		BUG_ON(!table_args); /* There will always be a ' ' */
+		table_args++;
+
+		DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
+		       lc->uuid, table_args);
 		break;
 		break;
 	}
 	}
 	return (r) ? 0 : (int)sz;
 	return (r) ? 0 : (int)sz;

+ 4 - 2
drivers/md/dm-log-userspace-transfer.c

@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data)
 
 
 /**
 /**
  * dm_consult_userspace
  * dm_consult_userspace
- * @uuid: log's uuid (must be DM_UUID_LEN in size)
+ * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
+ * @luid: log's local unique identifier
  * @request_type:  found in include/linux/dm-log-userspace.h
  * @request_type:  found in include/linux/dm-log-userspace.h
  * @data: data to tx to the server
  * @data: data to tx to the server
  * @data_size: size of data in bytes
  * @data_size: size of data in bytes
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data)
  *
  *
  * Returns: 0 on success, -EXXX on failure
  * Returns: 0 on success, -EXXX on failure
  **/
  **/
-int dm_consult_userspace(const char *uuid, int request_type,
+int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
 			 char *data, size_t data_size,
 			 char *data, size_t data_size,
 			 char *rdata, size_t *rdata_size)
 			 char *rdata, size_t *rdata_size)
 {
 {
@@ -190,6 +191,7 @@ resend:
 
 
 	memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
 	memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
 	memcpy(tfr->uuid, uuid, DM_UUID_LEN);
 	memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+	tfr->luid = luid;
 	tfr->seq = dm_ulog_seq++;
 	tfr->seq = dm_ulog_seq++;
 
 
 	/*
 	/*

+ 1 - 1
drivers/md/dm-log-userspace-transfer.h

@@ -11,7 +11,7 @@
 
 
 int dm_ulog_tfr_init(void);
 int dm_ulog_tfr_init(void);
 void dm_ulog_tfr_exit(void);
 void dm_ulog_tfr_exit(void);
-int dm_consult_userspace(const char *uuid, int request_type,
+int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
 			 char *data, size_t data_size,
 			 char *data, size_t data_size,
 			 char *rdata, size_t *rdata_size);
 			 char *rdata, size_t *rdata_size);
 
 

+ 7 - 1
drivers/md/dm-raid1.c

@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
 	 */
 	 */
 	dm_rh_inc_pending(ms->rh, &sync);
 	dm_rh_inc_pending(ms->rh, &sync);
 	dm_rh_inc_pending(ms->rh, &nosync);
 	dm_rh_inc_pending(ms->rh, &nosync);
-	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
+
+	/*
+	 * If the flush fails on a previous call and succeeds here,
+	 * we must not reset the log_failure variable.  We need
+	 * userspace interaction to do that.
+	 */
+	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
 
 
 	/*
 	/*
 	 * Dispatch io.
 	 * Dispatch io.

+ 53 - 35
drivers/md/dm-snap-persistent.c

@@ -105,6 +105,13 @@ struct pstore {
 	 */
 	 */
 	void *zero_area;
 	void *zero_area;
 
 
+	/*
+	 * An area used for header. The header can be written
+	 * concurrently with metadata (when invalidating the snapshot),
+	 * so it needs a separate buffer.
+	 */
+	void *header_area;
+
 	/*
 	/*
 	 * Used to keep track of which metadata area the data in
 	 * Used to keep track of which metadata area the data in
 	 * 'chunk' refers to.
 	 * 'chunk' refers to.
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps)
 	 */
 	 */
 	ps->area = vmalloc(len);
 	ps->area = vmalloc(len);
 	if (!ps->area)
 	if (!ps->area)
-		return r;
+		goto err_area;
 
 
 	ps->zero_area = vmalloc(len);
 	ps->zero_area = vmalloc(len);
-	if (!ps->zero_area) {
-		vfree(ps->area);
-		return r;
-	}
+	if (!ps->zero_area)
+		goto err_zero_area;
 	memset(ps->zero_area, 0, len);
 	memset(ps->zero_area, 0, len);
 
 
+	ps->header_area = vmalloc(len);
+	if (!ps->header_area)
+		goto err_header_area;
+
 	return 0;
 	return 0;
+
+err_header_area:
+	vfree(ps->zero_area);
+
+err_zero_area:
+	vfree(ps->area);
+
+err_area:
+	return r;
 }
 }
 
 
 static void free_area(struct pstore *ps)
 static void free_area(struct pstore *ps)
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps)
 	if (ps->zero_area)
 	if (ps->zero_area)
 		vfree(ps->zero_area);
 		vfree(ps->zero_area);
 	ps->zero_area = NULL;
 	ps->zero_area = NULL;
+
+	if (ps->header_area)
+		vfree(ps->header_area);
+	ps->header_area = NULL;
 }
 }
 
 
 struct mdata_req {
 struct mdata_req {
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work)
 /*
 /*
  * Read or write a chunk aligned and sized block of data from a device.
  * Read or write a chunk aligned and sized block of data from a device.
  */
  */
-static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
+		    int metadata)
 {
 {
 	struct dm_io_region where = {
 	struct dm_io_region where = {
 		.bdev = ps->store->cow->bdev,
 		.bdev = ps->store->cow->bdev,
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
 	struct dm_io_request io_req = {
 	struct dm_io_request io_req = {
 		.bi_rw = rw,
 		.bi_rw = rw,
 		.mem.type = DM_IO_VMA,
 		.mem.type = DM_IO_VMA,
-		.mem.ptr.vma = ps->area,
+		.mem.ptr.vma = area,
 		.client = ps->io_client,
 		.client = ps->io_client,
 		.notify.fn = NULL,
 		.notify.fn = NULL,
 	};
 	};
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw)
 
 
 	chunk = area_location(ps, ps->current_area);
 	chunk = area_location(ps, ps->current_area);
 
 
-	r = chunk_io(ps, chunk, rw, 0);
+	r = chunk_io(ps, ps->area, chunk, rw, 0);
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps)
 
 
 static int zero_disk_area(struct pstore *ps, chunk_t area)
 static int zero_disk_area(struct pstore *ps, chunk_t area)
 {
 {
-	struct dm_io_region where = {
-		.bdev = ps->store->cow->bdev,
-		.sector = ps->store->chunk_size * area_location(ps, area),
-		.count = ps->store->chunk_size,
-	};
-	struct dm_io_request io_req = {
-		.bi_rw = WRITE,
-		.mem.type = DM_IO_VMA,
-		.mem.ptr.vma = ps->zero_area,
-		.client = ps->io_client,
-		.notify.fn = NULL,
-	};
-
-	return dm_io(&io_req, 1, &where, NULL);
+	return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 }
 }
 
 
 static int read_header(struct pstore *ps, int *new_snapshot)
 static int read_header(struct pstore *ps, int *new_snapshot)
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
 	struct disk_header *dh;
 	struct disk_header *dh;
 	chunk_t chunk_size;
 	chunk_t chunk_size;
 	int chunk_size_supplied = 1;
 	int chunk_size_supplied = 1;
+	char *chunk_err;
 
 
 	/*
 	/*
 	 * Use default chunk size (or hardsect_size, if larger) if none supplied
 	 * Use default chunk size (or hardsect_size, if larger) if none supplied
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
-	r = chunk_io(ps, 0, READ, 1);
+	r = chunk_io(ps, ps->header_area, 0, READ, 1);
 	if (r)
 	if (r)
 		goto bad;
 		goto bad;
 
 
-	dh = (struct disk_header *) ps->area;
+	dh = ps->header_area;
 
 
 	if (le32_to_cpu(dh->magic) == 0) {
 	if (le32_to_cpu(dh->magic) == 0) {
 		*new_snapshot = 1;
 		*new_snapshot = 1;
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot)
 	ps->version = le32_to_cpu(dh->version);
 	ps->version = le32_to_cpu(dh->version);
 	chunk_size = le32_to_cpu(dh->chunk_size);
 	chunk_size = le32_to_cpu(dh->chunk_size);
 
 
-	if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
+	if (ps->store->chunk_size == chunk_size)
 		return 0;
 		return 0;
 
 
-	DMWARN("chunk size %llu in device metadata overrides "
-	       "table chunk size of %llu.",
-	       (unsigned long long)chunk_size,
-	       (unsigned long long)ps->store->chunk_size);
+	if (chunk_size_supplied)
+		DMWARN("chunk size %llu in device metadata overrides "
+		       "table chunk size of %llu.",
+		       (unsigned long long)chunk_size,
+		       (unsigned long long)ps->store->chunk_size);
 
 
 	/* We had a bogus chunk_size. Fix stuff up. */
 	/* We had a bogus chunk_size. Fix stuff up. */
 	free_area(ps);
 	free_area(ps);
 
 
-	ps->store->chunk_size = chunk_size;
-	ps->store->chunk_mask = chunk_size - 1;
-	ps->store->chunk_shift = ffs(chunk_size) - 1;
+	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
+					      &chunk_err);
+	if (r) {
+		DMERR("invalid on-disk chunk size %llu: %s.",
+		      (unsigned long long)chunk_size, chunk_err);
+		return r;
+	}
 
 
 	r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
 	r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
 				ps->io_client);
 				ps->io_client);
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps)
 {
 {
 	struct disk_header *dh;
 	struct disk_header *dh;
 
 
-	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
+	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 
 
-	dh = (struct disk_header *) ps->area;
+	dh = ps->header_area;
 	dh->magic = cpu_to_le32(SNAP_MAGIC);
 	dh->magic = cpu_to_le32(SNAP_MAGIC);
 	dh->valid = cpu_to_le32(ps->valid);
 	dh->valid = cpu_to_le32(ps->valid);
 	dh->version = cpu_to_le32(ps->version);
 	dh->version = cpu_to_le32(ps->version);
 	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
 	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
 
 
-	return chunk_io(ps, 0, WRITE, 1);
+	return chunk_io(ps, ps->header_area, 0, WRITE, 1);
 }
 }
 
 
 /*
 /*
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store,
 	ps->valid = 1;
 	ps->valid = 1;
 	ps->version = SNAPSHOT_DISK_VERSION;
 	ps->version = SNAPSHOT_DISK_VERSION;
 	ps->area = NULL;
 	ps->area = NULL;
+	ps->zero_area = NULL;
+	ps->header_area = NULL;
 	ps->next_free = 2;	/* skipping the header and first area */
 	ps->next_free = 2;	/* skipping the header and first area */
 	ps->current_committed = 0;
 	ps->current_committed = 0;
 
 

+ 21 - 2
drivers/md/dm-snap.c

@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
 	return 0;
 	return 0;
 }
 }
 
 
+static int snapshot_iterate_devices(struct dm_target *ti,
+				    iterate_devices_callout_fn fn, void *data)
+{
+	struct dm_snapshot *snap = ti->private;
+
+	return fn(ti, snap->origin, 0, ti->len, data);
+}
+
+
 /*-----------------------------------------------------------------
 /*-----------------------------------------------------------------
  * Origin methods
  * Origin methods
  *---------------------------------------------------------------*/
  *---------------------------------------------------------------*/
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
 	return 0;
 	return 0;
 }
 }
 
 
+static int origin_iterate_devices(struct dm_target *ti,
+				  iterate_devices_callout_fn fn, void *data)
+{
+	struct dm_dev *dev = ti->private;
+
+	return fn(ti, dev, 0, ti->len, data);
+}
+
 static struct target_type origin_target = {
 static struct target_type origin_target = {
 	.name    = "snapshot-origin",
 	.name    = "snapshot-origin",
-	.version = {1, 6, 0},
+	.version = {1, 7, 0},
 	.module  = THIS_MODULE,
 	.module  = THIS_MODULE,
 	.ctr     = origin_ctr,
 	.ctr     = origin_ctr,
 	.dtr     = origin_dtr,
 	.dtr     = origin_dtr,
 	.map     = origin_map,
 	.map     = origin_map,
 	.resume  = origin_resume,
 	.resume  = origin_resume,
 	.status  = origin_status,
 	.status  = origin_status,
+	.iterate_devices = origin_iterate_devices,
 };
 };
 
 
 static struct target_type snapshot_target = {
 static struct target_type snapshot_target = {
 	.name    = "snapshot",
 	.name    = "snapshot",
-	.version = {1, 6, 0},
+	.version = {1, 7, 0},
 	.module  = THIS_MODULE,
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
 	.dtr     = snapshot_dtr,
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = {
 	.end_io  = snapshot_end_io,
 	.end_io  = snapshot_end_io,
 	.resume  = snapshot_resume,
 	.resume  = snapshot_resume,
 	.status  = snapshot_status,
 	.status  = snapshot_status,
+	.iterate_devices = snapshot_iterate_devices,
 };
 };
 
 
 static int __init dm_snapshot_init(void)
 static int __init dm_snapshot_init(void)

+ 12 - 1
drivers/md/dm-stripe.c

@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti,
 	return ret;
 	return ret;
 }
 }
 
 
+static void stripe_io_hints(struct dm_target *ti,
+			    struct queue_limits *limits)
+{
+	struct stripe_c *sc = ti->private;
+	unsigned chunk_size = (sc->chunk_mask + 1) << 9;
+
+	blk_limits_io_min(limits, chunk_size);
+	limits->io_opt = chunk_size * sc->stripes;
+}
+
 static struct target_type stripe_target = {
 static struct target_type stripe_target = {
 	.name   = "striped",
 	.name   = "striped",
-	.version = {1, 2, 0},
+	.version = {1, 3, 0},
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
 	.ctr    = stripe_ctr,
 	.ctr    = stripe_ctr,
 	.dtr    = stripe_dtr,
 	.dtr    = stripe_dtr,
@@ -339,6 +349,7 @@ static struct target_type stripe_target = {
 	.end_io = stripe_end_io,
 	.end_io = stripe_end_io,
 	.status = stripe_status,
 	.status = stripe_status,
 	.iterate_devices = stripe_iterate_devices,
 	.iterate_devices = stripe_iterate_devices,
+	.io_hints = stripe_io_hints,
 };
 };
 
 
 int __init dm_stripe_init(void)
 int __init dm_stripe_init(void)

+ 33 - 18
drivers/md/dm-table.c

@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
 }
 }
 
 
 /*
 /*
- * If possible, this checks an area of a destination device is valid.
+ * If possible, this checks an area of a destination device is invalid.
  */
  */
-static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
-				sector_t start, sector_t len, void *data)
+static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+				  sector_t start, sector_t len, void *data)
 {
 {
 	struct queue_limits *limits = data;
 	struct queue_limits *limits = data;
 	struct block_device *bdev = dev->bdev;
 	struct block_device *bdev = dev->bdev;
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
 	char b[BDEVNAME_SIZE];
 	char b[BDEVNAME_SIZE];
 
 
 	if (!dev_size)
 	if (!dev_size)
-		return 1;
+		return 0;
 
 
 	if ((start >= dev_size) || (start + len > dev_size)) {
 	if ((start >= dev_size) || (start + len > dev_size)) {
-		DMWARN("%s: %s too small for target",
-		       dm_device_name(ti->table->md), bdevname(bdev, b));
-		return 0;
+		DMWARN("%s: %s too small for target: "
+		       "start=%llu, len=%llu, dev_size=%llu",
+		       dm_device_name(ti->table->md), bdevname(bdev, b),
+		       (unsigned long long)start,
+		       (unsigned long long)len,
+		       (unsigned long long)dev_size);
+		return 1;
 	}
 	}
 
 
 	if (logical_block_size_sectors <= 1)
 	if (logical_block_size_sectors <= 1)
-		return 1;
+		return 0;
 
 
 	if (start & (logical_block_size_sectors - 1)) {
 	if (start & (logical_block_size_sectors - 1)) {
 		DMWARN("%s: start=%llu not aligned to h/w "
 		DMWARN("%s: start=%llu not aligned to h/w "
-		       "logical block size %hu of %s",
+		       "logical block size %u of %s",
 		       dm_device_name(ti->table->md),
 		       dm_device_name(ti->table->md),
 		       (unsigned long long)start,
 		       (unsigned long long)start,
 		       limits->logical_block_size, bdevname(bdev, b));
 		       limits->logical_block_size, bdevname(bdev, b));
-		return 0;
+		return 1;
 	}
 	}
 
 
 	if (len & (logical_block_size_sectors - 1)) {
 	if (len & (logical_block_size_sectors - 1)) {
 		DMWARN("%s: len=%llu not aligned to h/w "
 		DMWARN("%s: len=%llu not aligned to h/w "
-		       "logical block size %hu of %s",
+		       "logical block size %u of %s",
 		       dm_device_name(ti->table->md),
 		       dm_device_name(ti->table->md),
 		       (unsigned long long)len,
 		       (unsigned long long)len,
 		       limits->logical_block_size, bdevname(bdev, b));
 		       limits->logical_block_size, bdevname(bdev, b));
-		return 0;
+		return 1;
 	}
 	}
 
 
-	return 1;
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 	}
 	}
 
 
 	if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
 	if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
-		DMWARN("%s: target device %s is misaligned",
-		       dm_device_name(ti->table->md), bdevname(bdev, b));
+		DMWARN("%s: target device %s is misaligned: "
+		       "physical_block_size=%u, logical_block_size=%u, "
+		       "alignment_offset=%u, start=%llu",
+		       dm_device_name(ti->table->md), bdevname(bdev, b),
+		       q->limits.physical_block_size,
+		       q->limits.logical_block_size,
+		       q->limits.alignment_offset,
+		       (unsigned long long) start << 9);
+
 
 
 	/*
 	/*
 	 * Check if merge fn is supported.
 	 * Check if merge fn is supported.
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
 
 
 	if (remaining) {
 	if (remaining) {
 		DMWARN("%s: table line %u (start sect %llu len %llu) "
 		DMWARN("%s: table line %u (start sect %llu len %llu) "
-		       "not aligned to h/w logical block size %hu",
+		       "not aligned to h/w logical block size %u",
 		       dm_device_name(table->md), i,
 		       dm_device_name(table->md), i,
 		       (unsigned long long) ti->begin,
 		       (unsigned long long) ti->begin,
 		       (unsigned long long) ti->len,
 		       (unsigned long long) ti->len,
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table,
 		ti->type->iterate_devices(ti, dm_set_device_limits,
 		ti->type->iterate_devices(ti, dm_set_device_limits,
 					  &ti_limits);
 					  &ti_limits);
 
 
+		/* Set I/O hints portion of queue limits */
+		if (ti->type->io_hints)
+			ti->type->io_hints(ti, &ti_limits);
+
 		/*
 		/*
 		 * Check each device area is consistent with the target's
 		 * Check each device area is consistent with the target's
 		 * overall queue limits.
 		 * overall queue limits.
 		 */
 		 */
-		if (!ti->type->iterate_devices(ti, device_area_is_valid,
-					       &ti_limits))
+		if (ti->type->iterate_devices(ti, device_area_is_invalid,
+					      &ti_limits))
 			return -EINVAL;
 			return -EINVAL;
 
 
 combine_limits:
 combine_limits:

+ 10 - 5
drivers/md/dm.c

@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue)
 	dm_put(md);
 	dm_put(md);
 }
 }
 
 
+static void free_rq_clone(struct request *clone)
+{
+	struct dm_rq_target_io *tio = clone->end_io_data;
+
+	blk_rq_unprep_clone(clone);
+	free_rq_tio(tio);
+}
+
 static void dm_unprep_request(struct request *rq)
 static void dm_unprep_request(struct request *rq)
 {
 {
 	struct request *clone = rq->special;
 	struct request *clone = rq->special;
-	struct dm_rq_target_io *tio = clone->end_io_data;
 
 
 	rq->special = NULL;
 	rq->special = NULL;
 	rq->cmd_flags &= ~REQ_DONTPREP;
 	rq->cmd_flags &= ~REQ_DONTPREP;
 
 
-	blk_rq_unprep_clone(clone);
-	free_rq_tio(tio);
+	free_rq_clone(clone);
 }
 }
 
 
 /*
 /*
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error)
 			rq->sense_len = clone->sense_len;
 			rq->sense_len = clone->sense_len;
 	}
 	}
 
 
-	BUG_ON(clone->bio);
-	free_rq_tio(tio);
+	free_rq_clone(clone);
 
 
 	blk_end_request_all(rq, error);
 	blk_end_request_all(rq, error);
 
 

+ 1 - 1
drivers/mtd/devices/m25p80.c

@@ -736,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
 			flash->partitioned = 1;
 			flash->partitioned = 1;
 			return add_mtd_partitions(&flash->mtd, parts, nr_parts);
 			return add_mtd_partitions(&flash->mtd, parts, nr_parts);
 		}
 		}
-	} else if (data->nr_parts)
+	} else if (data && data->nr_parts)
 		dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
 		dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
 				data->nr_parts, data->name);
 				data->nr_parts, data->name);
 
 

+ 9 - 6
drivers/mtd/nftlcore.c

@@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
 int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
 int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
 		  size_t *retlen, uint8_t *buf)
 		  size_t *retlen, uint8_t *buf)
 {
 {
+	loff_t mask = mtd->writesize - 1;
 	struct mtd_oob_ops ops;
 	struct mtd_oob_ops ops;
 	int res;
 	int res;
 
 
 	ops.mode = MTD_OOB_PLACE;
 	ops.mode = MTD_OOB_PLACE;
-	ops.ooboffs = offs & (mtd->writesize - 1);
+	ops.ooboffs = offs & mask;
 	ops.ooblen = len;
 	ops.ooblen = len;
 	ops.oobbuf = buf;
 	ops.oobbuf = buf;
 	ops.datbuf = NULL;
 	ops.datbuf = NULL;
 
 
-	res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+	res = mtd->read_oob(mtd, offs & ~mask, &ops);
 	*retlen = ops.oobretlen;
 	*retlen = ops.oobretlen;
 	return res;
 	return res;
 }
 }
@@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
 int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
 int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
 		   size_t *retlen, uint8_t *buf)
 		   size_t *retlen, uint8_t *buf)
 {
 {
+	loff_t mask = mtd->writesize - 1;
 	struct mtd_oob_ops ops;
 	struct mtd_oob_ops ops;
 	int res;
 	int res;
 
 
 	ops.mode = MTD_OOB_PLACE;
 	ops.mode = MTD_OOB_PLACE;
-	ops.ooboffs = offs & (mtd->writesize - 1);
+	ops.ooboffs = offs & mask;
 	ops.ooblen = len;
 	ops.ooblen = len;
 	ops.oobbuf = buf;
 	ops.oobbuf = buf;
 	ops.datbuf = NULL;
 	ops.datbuf = NULL;
 
 
-	res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+	res = mtd->write_oob(mtd, offs & ~mask, &ops);
 	*retlen = ops.oobretlen;
 	*retlen = ops.oobretlen;
 	return res;
 	return res;
 }
 }
@@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
 static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
 static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
 		      size_t *retlen, uint8_t *buf, uint8_t *oob)
 		      size_t *retlen, uint8_t *buf, uint8_t *oob)
 {
 {
+	loff_t mask = mtd->writesize - 1;
 	struct mtd_oob_ops ops;
 	struct mtd_oob_ops ops;
 	int res;
 	int res;
 
 
 	ops.mode = MTD_OOB_PLACE;
 	ops.mode = MTD_OOB_PLACE;
-	ops.ooboffs = offs;
+	ops.ooboffs = offs & mask;
 	ops.ooblen = mtd->oobsize;
 	ops.ooblen = mtd->oobsize;
 	ops.oobbuf = oob;
 	ops.oobbuf = oob;
 	ops.datbuf = buf;
 	ops.datbuf = buf;
 	ops.len = len;
 	ops.len = len;
 
 
-	res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+	res = mtd->write_oob(mtd, offs & ~mask, &ops);
 	*retlen = ops.retlen;
 	*retlen = ops.retlen;
 	return res;
 	return res;
 }
 }

+ 1 - 0
drivers/net/gianfar.c

@@ -491,6 +491,7 @@ static int gfar_remove(struct of_device *ofdev)
 
 
 	dev_set_drvdata(&ofdev->dev, NULL);
 	dev_set_drvdata(&ofdev->dev, NULL);
 
 
+	unregister_netdev(priv->ndev);
 	iounmap(priv->regs);
 	iounmap(priv->regs);
 	free_netdev(priv->ndev);
 	free_netdev(priv->ndev);
 
 

+ 67 - 53
drivers/net/wireless/ipw2x00/ipw2200.c

@@ -2874,45 +2874,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
 	return 0;
 	return 0;
 }
 }
 
 
-static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
-				 u32 src_phys, u32 dest_address, u32 length)
+static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
+				 int nr, u32 dest_address, u32 len)
 {
 {
-	u32 bytes_left = length;
-	u32 src_offset = 0;
-	u32 dest_offset = 0;
-	int status = 0;
+	int ret, i;
+	u32 size;
+
 	IPW_DEBUG_FW(">> \n");
 	IPW_DEBUG_FW(">> \n");
-	IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
-			  src_phys, dest_address, length);
-	while (bytes_left > CB_MAX_LENGTH) {
-		status = ipw_fw_dma_add_command_block(priv,
-						      src_phys + src_offset,
-						      dest_address +
-						      dest_offset,
-						      CB_MAX_LENGTH, 0, 0);
-		if (status) {
+	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
+			  nr, dest_address, len);
+
+	for (i = 0; i < nr; i++) {
+		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
+		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
+						   dest_address +
+						   i * CB_MAX_LENGTH, size,
+						   0, 0);
+		if (ret) {
 			IPW_DEBUG_FW_INFO(": Failed\n");
 			IPW_DEBUG_FW_INFO(": Failed\n");
 			return -1;
 			return -1;
 		} else
 		} else
 			IPW_DEBUG_FW_INFO(": Added new cb\n");
 			IPW_DEBUG_FW_INFO(": Added new cb\n");
-
-		src_offset += CB_MAX_LENGTH;
-		dest_offset += CB_MAX_LENGTH;
-		bytes_left -= CB_MAX_LENGTH;
-	}
-
-	/* add the buffer tail */
-	if (bytes_left > 0) {
-		status =
-		    ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
-						 dest_address + dest_offset,
-						 bytes_left, 0, 0);
-		if (status) {
-			IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
-			return -1;
-		} else
-			IPW_DEBUG_FW_INFO
-			    (": Adding new cb - the buffer tail\n");
 	}
 	}
 
 
 	IPW_DEBUG_FW("<< \n");
 	IPW_DEBUG_FW("<< \n");
@@ -3160,59 +3142,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
 
 
 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
 {
 {
-	int rc = -1;
+	int ret = -1;
 	int offset = 0;
 	int offset = 0;
 	struct fw_chunk *chunk;
 	struct fw_chunk *chunk;
-	dma_addr_t shared_phys;
-	u8 *shared_virt;
+	int total_nr = 0;
+	int i;
+	struct pci_pool *pool;
+	u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
+	dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
 
 
 	IPW_DEBUG_TRACE("<< : \n");
 	IPW_DEBUG_TRACE("<< : \n");
-	shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
 
 
-	if (!shared_virt)
+	pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
+	if (!pool) {
+		IPW_ERROR("pci_pool_create failed\n");
 		return -ENOMEM;
 		return -ENOMEM;
-
-	memmove(shared_virt, data, len);
+	}
 
 
 	/* Start the Dma */
 	/* Start the Dma */
-	rc = ipw_fw_dma_enable(priv);
+	ret = ipw_fw_dma_enable(priv);
 
 
 	/* the DMA is already ready this would be a bug. */
 	/* the DMA is already ready this would be a bug. */
 	BUG_ON(priv->sram_desc.last_cb_index > 0);
 	BUG_ON(priv->sram_desc.last_cb_index > 0);
 
 
 	do {
 	do {
+		u32 chunk_len;
+		u8 *start;
+		int size;
+		int nr = 0;
+
 		chunk = (struct fw_chunk *)(data + offset);
 		chunk = (struct fw_chunk *)(data + offset);
 		offset += sizeof(struct fw_chunk);
 		offset += sizeof(struct fw_chunk);
+		chunk_len = le32_to_cpu(chunk->length);
+		start = data + offset;
+
+		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
+		for (i = 0; i < nr; i++) {
+			virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
+							 &phys[total_nr]);
+			if (!virts[total_nr]) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
+				     CB_MAX_LENGTH);
+			memcpy(virts[total_nr], start, size);
+			start += size;
+			total_nr++;
+			/* We don't support fw chunk larger than 64*8K */
+			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
+		}
+
 		/* build DMA packet and queue up for sending */
 		/* build DMA packet and queue up for sending */
 		/* dma to chunk->address, the chunk->length bytes from data +
 		/* dma to chunk->address, the chunk->length bytes from data +
 		 * offeset*/
 		 * offeset*/
 		/* Dma loading */
 		/* Dma loading */
-		rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
-					   le32_to_cpu(chunk->address),
-					   le32_to_cpu(chunk->length));
-		if (rc) {
+		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
+					    nr, le32_to_cpu(chunk->address),
+					    chunk_len);
+		if (ret) {
 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
 			goto out;
 			goto out;
 		}
 		}
 
 
-		offset += le32_to_cpu(chunk->length);
+		offset += chunk_len;
 	} while (offset < len);
 	} while (offset < len);
 
 
 	/* Run the DMA and wait for the answer */
 	/* Run the DMA and wait for the answer */
-	rc = ipw_fw_dma_kick(priv);
-	if (rc) {
+	ret = ipw_fw_dma_kick(priv);
+	if (ret) {
 		IPW_ERROR("dmaKick Failed\n");
 		IPW_ERROR("dmaKick Failed\n");
 		goto out;
 		goto out;
 	}
 	}
 
 
-	rc = ipw_fw_dma_wait(priv);
-	if (rc) {
+	ret = ipw_fw_dma_wait(priv);
+	if (ret) {
 		IPW_ERROR("dmaWaitSync Failed\n");
 		IPW_ERROR("dmaWaitSync Failed\n");
 		goto out;
 		goto out;
 	}
 	}
-      out:
-	pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
-	return rc;
+ out:
+	for (i = 0; i < total_nr; i++)
+		pci_pool_free(pool, virts[i], phys[i]);
+
+	pci_pool_destroy(pool);
+
+	return ret;
 }
 }
 
 
 /* stop nic */
 /* stop nic */

+ 23 - 0
drivers/pci/iov.c

@@ -597,6 +597,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
 		4 * (resno - PCI_IOV_RESOURCES);
 		4 * (resno - PCI_IOV_RESOURCES);
 }
 }
 
 
+/**
+ * pci_sriov_resource_alignment - get resource alignment for VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ *
+ * Returns the alignment of the VF BAR found in the SR-IOV capability.
+ * This is not the same as the resource size which is defined as
+ * the VF BAR size multiplied by the number of VFs.  The alignment
+ * is just the VF BAR size.
+ */
+int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+{
+	struct resource tmp;
+	enum pci_bar_type type;
+	int reg = pci_iov_resource_bar(dev, resno, &type);
+	
+	if (!reg)
+		return 0;
+
+	 __pci_read_base(dev, type, &tmp, reg);
+	return resource_alignment(&tmp);
+}
+
 /**
 /**
  * pci_restore_iov_state - restore the state of the IOV capability
  * pci_restore_iov_state - restore the state of the IOV capability
  * @dev: the PCI device
  * @dev: the PCI device

+ 13 - 0
drivers/pci/pci.h

@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev);
 extern void pci_iov_release(struct pci_dev *dev);
 extern void pci_iov_release(struct pci_dev *dev);
 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
 				enum pci_bar_type *type);
 				enum pci_bar_type *type);
+extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
 extern void pci_restore_iov_state(struct pci_dev *dev);
 extern void pci_restore_iov_state(struct pci_dev *dev);
 extern int pci_iov_bus_range(struct pci_bus *bus);
 extern int pci_iov_bus_range(struct pci_bus *bus);
 
 
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
 }
 }
 #endif /* CONFIG_PCI_IOV */
 #endif /* CONFIG_PCI_IOV */
 
 
+static inline int pci_resource_alignment(struct pci_dev *dev,
+					 struct resource *res)
+{
+#ifdef CONFIG_PCI_IOV
+	int resno = res - dev->resource;
+
+	if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+		return pci_sriov_resource_alignment(dev, resno);
+#endif
+	return resource_alignment(res);
+}
+
 #endif /* DRIVERS_PCI_H */
 #endif /* DRIVERS_PCI_H */

+ 2 - 2
drivers/pci/setup-bus.c

@@ -25,7 +25,7 @@
 #include <linux/ioport.h>
 #include <linux/ioport.h>
 #include <linux/cache.h>
 #include <linux/cache.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
-
+#include "pci.h"
 
 
 static void pbus_assign_resources_sorted(const struct pci_bus *bus)
 static void pbus_assign_resources_sorted(const struct pci_bus *bus)
 {
 {
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
 				continue;
 				continue;
 			r_size = resource_size(r);
 			r_size = resource_size(r);
 			/* For bridges size != alignment */
 			/* For bridges size != alignment */
-			align = resource_alignment(r);
+			align = pci_resource_alignment(dev, r);
 			order = __ffs(align) - 20;
 			order = __ffs(align) - 20;
 			if (order > 11) {
 			if (order > 11) {
 				dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
 				dev_warn(&dev->dev, "BAR %d bad alignment %llx: "

+ 4 - 4
drivers/pci/setup-res.c

@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
 
 
 	size = resource_size(res);
 	size = resource_size(res);
 	min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
 	min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-	align = resource_alignment(res);
+	align = pci_resource_alignment(dev, res);
 
 
 	/* First, try exact prefetching match.. */
 	/* First, try exact prefetching match.. */
 	ret = pci_bus_alloc_resource(bus, res, size, align, min,
 	ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
 	struct pci_bus *bus;
 	struct pci_bus *bus;
 	int ret;
 	int ret;
 
 
-	align = resource_alignment(res);
+	align = pci_resource_alignment(dev, res);
 	if (!align) {
 	if (!align) {
 		dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
 		dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
 			"alignment) %pR flags %#lx\n",
 			"alignment) %pR flags %#lx\n",
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
 		if (!(r->flags) || r->parent)
 		if (!(r->flags) || r->parent)
 			continue;
 			continue;
 
 
-		r_align = resource_alignment(r);
+		r_align = pci_resource_alignment(dev, r);
 		if (!r_align) {
 		if (!r_align) {
 			dev_warn(&dev->dev, "BAR %d: bogus alignment "
 			dev_warn(&dev->dev, "BAR %d: bogus alignment "
 				"%pR flags %#lx\n",
 				"%pR flags %#lx\n",
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
 			struct resource_list *ln = list->next;
 			struct resource_list *ln = list->next;
 
 
 			if (ln)
 			if (ln)
-				align = resource_alignment(ln->res);
+				align = pci_resource_alignment(ln->dev, ln->res);
 
 
 			if (r_align > align) {
 			if (r_align > align) {
 				tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
 				tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);

+ 4 - 13
fs/compat.c

@@ -1485,20 +1485,15 @@ int compat_do_execve(char * filename,
 	if (!bprm)
 	if (!bprm)
 		goto out_files;
 		goto out_files;
 
 
-	retval = -ERESTARTNOINTR;
-	if (mutex_lock_interruptible(&current->cred_guard_mutex))
+	retval = prepare_bprm_creds(bprm);
+	if (retval)
 		goto out_free;
 		goto out_free;
-	current->in_execve = 1;
-
-	retval = -ENOMEM;
-	bprm->cred = prepare_exec_creds();
-	if (!bprm->cred)
-		goto out_unlock;
 
 
 	retval = check_unsafe_exec(bprm);
 	retval = check_unsafe_exec(bprm);
 	if (retval < 0)
 	if (retval < 0)
-		goto out_unlock;
+		goto out_free;
 	clear_in_exec = retval;
 	clear_in_exec = retval;
+	current->in_execve = 1;
 
 
 	file = open_exec(filename);
 	file = open_exec(filename);
 	retval = PTR_ERR(file);
 	retval = PTR_ERR(file);
@@ -1547,7 +1542,6 @@ int compat_do_execve(char * filename,
 	/* execve succeeded */
 	/* execve succeeded */
 	current->fs->in_exec = 0;
 	current->fs->in_exec = 0;
 	current->in_execve = 0;
 	current->in_execve = 0;
-	mutex_unlock(&current->cred_guard_mutex);
 	acct_update_integrals(current);
 	acct_update_integrals(current);
 	free_bprm(bprm);
 	free_bprm(bprm);
 	if (displaced)
 	if (displaced)
@@ -1567,10 +1561,7 @@ out_file:
 out_unmark:
 out_unmark:
 	if (clear_in_exec)
 	if (clear_in_exec)
 		current->fs->in_exec = 0;
 		current->fs->in_exec = 0;
-
-out_unlock:
 	current->in_execve = 0;
 	current->in_execve = 0;
-	mutex_unlock(&current->cred_guard_mutex);
 
 
 out_free:
 out_free:
 	free_bprm(bprm);
 	free_bprm(bprm);

+ 38 - 25
fs/exec.c

@@ -1015,6 +1015,35 @@ out:
 
 
 EXPORT_SYMBOL(flush_old_exec);
 EXPORT_SYMBOL(flush_old_exec);
 
 
+/*
+ * Prepare credentials and lock ->cred_guard_mutex.
+ * install_exec_creds() commits the new creds and drops the lock.
+ * Or, if exec fails before, free_bprm() should release ->cred and
+ * and unlock.
+ */
+int prepare_bprm_creds(struct linux_binprm *bprm)
+{
+	if (mutex_lock_interruptible(&current->cred_guard_mutex))
+		return -ERESTARTNOINTR;
+
+	bprm->cred = prepare_exec_creds();
+	if (likely(bprm->cred))
+		return 0;
+
+	mutex_unlock(&current->cred_guard_mutex);
+	return -ENOMEM;
+}
+
+void free_bprm(struct linux_binprm *bprm)
+{
+	free_arg_pages(bprm);
+	if (bprm->cred) {
+		mutex_unlock(&current->cred_guard_mutex);
+		abort_creds(bprm->cred);
+	}
+	kfree(bprm);
+}
+
 /*
 /*
  * install the new credentials for this executable
  * install the new credentials for this executable
  */
  */
@@ -1024,12 +1053,13 @@ void install_exec_creds(struct linux_binprm *bprm)
 
 
 	commit_creds(bprm->cred);
 	commit_creds(bprm->cred);
 	bprm->cred = NULL;
 	bprm->cred = NULL;
-
-	/* cred_guard_mutex must be held at least to this point to prevent
+	/*
+	 * cred_guard_mutex must be held at least to this point to prevent
 	 * ptrace_attach() from altering our determination of the task's
 	 * ptrace_attach() from altering our determination of the task's
-	 * credentials; any time after this it may be unlocked */
-
+	 * credentials; any time after this it may be unlocked.
+	 */
 	security_bprm_committed_creds(bprm);
 	security_bprm_committed_creds(bprm);
+	mutex_unlock(&current->cred_guard_mutex);
 }
 }
 EXPORT_SYMBOL(install_exec_creds);
 EXPORT_SYMBOL(install_exec_creds);
 
 
@@ -1246,14 +1276,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
 
 
 EXPORT_SYMBOL(search_binary_handler);
 EXPORT_SYMBOL(search_binary_handler);
 
 
-void free_bprm(struct linux_binprm *bprm)
-{
-	free_arg_pages(bprm);
-	if (bprm->cred)
-		abort_creds(bprm->cred);
-	kfree(bprm);
-}
-
 /*
 /*
  * sys_execve() executes a new program.
  * sys_execve() executes a new program.
  */
  */
@@ -1277,20 +1299,15 @@ int do_execve(char * filename,
 	if (!bprm)
 	if (!bprm)
 		goto out_files;
 		goto out_files;
 
 
-	retval = -ERESTARTNOINTR;
-	if (mutex_lock_interruptible(&current->cred_guard_mutex))
+	retval = prepare_bprm_creds(bprm);
+	if (retval)
 		goto out_free;
 		goto out_free;
-	current->in_execve = 1;
-
-	retval = -ENOMEM;
-	bprm->cred = prepare_exec_creds();
-	if (!bprm->cred)
-		goto out_unlock;
 
 
 	retval = check_unsafe_exec(bprm);
 	retval = check_unsafe_exec(bprm);
 	if (retval < 0)
 	if (retval < 0)
-		goto out_unlock;
+		goto out_free;
 	clear_in_exec = retval;
 	clear_in_exec = retval;
+	current->in_execve = 1;
 
 
 	file = open_exec(filename);
 	file = open_exec(filename);
 	retval = PTR_ERR(file);
 	retval = PTR_ERR(file);
@@ -1340,7 +1357,6 @@ int do_execve(char * filename,
 	/* execve succeeded */
 	/* execve succeeded */
 	current->fs->in_exec = 0;
 	current->fs->in_exec = 0;
 	current->in_execve = 0;
 	current->in_execve = 0;
-	mutex_unlock(&current->cred_guard_mutex);
 	acct_update_integrals(current);
 	acct_update_integrals(current);
 	free_bprm(bprm);
 	free_bprm(bprm);
 	if (displaced)
 	if (displaced)
@@ -1360,10 +1376,7 @@ out_file:
 out_unmark:
 out_unmark:
 	if (clear_in_exec)
 	if (clear_in_exec)
 		current->fs->in_exec = 0;
 		current->fs->in_exec = 0;
-
-out_unlock:
 	current->in_execve = 0;
 	current->in_execve = 0;
-	mutex_unlock(&current->cred_guard_mutex);
 
 
 out_free:
 out_free:
 	free_bprm(bprm);
 	free_bprm(bprm);

+ 4 - 0
fs/ext2/namei.c

@@ -362,6 +362,10 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
 	if (dir_de) {
 	if (dir_de) {
 		if (old_dir != new_dir)
 		if (old_dir != new_dir)
 			ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
 			ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
+		else {
+			kunmap(dir_page);
+			page_cache_release(dir_page);
+		}
 		inode_dec_link_count(old_dir);
 		inode_dec_link_count(old_dir);
 	}
 	}
 	return 0;
 	return 0;

+ 10 - 0
fs/jffs2/wbuf.c

@@ -1268,10 +1268,20 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
 	if (!c->wbuf)
 	if (!c->wbuf)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
+#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
+	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+	if (!c->wbuf_verify) {
+		kfree(c->wbuf);
+		return -ENOMEM;
+	}
+#endif
 	return 0;
 	return 0;
 }
 }
 
 
 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
+#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
+	kfree(c->wbuf_verify);
+#endif
 	kfree(c->wbuf);
 	kfree(c->wbuf);
 }
 }
 
 

+ 15 - 7
fs/namei.c

@@ -1542,28 +1542,31 @@ int may_open(struct path *path, int acc_mode, int flag)
 	 * An append-only file must be opened in append mode for writing.
 	 * An append-only file must be opened in append mode for writing.
 	 */
 	 */
 	if (IS_APPEND(inode)) {
 	if (IS_APPEND(inode)) {
+		error = -EPERM;
 		if  ((flag & FMODE_WRITE) && !(flag & O_APPEND))
 		if  ((flag & FMODE_WRITE) && !(flag & O_APPEND))
-			return -EPERM;
+			goto err_out;
 		if (flag & O_TRUNC)
 		if (flag & O_TRUNC)
-			return -EPERM;
+			goto err_out;
 	}
 	}
 
 
 	/* O_NOATIME can only be set by the owner or superuser */
 	/* O_NOATIME can only be set by the owner or superuser */
 	if (flag & O_NOATIME)
 	if (flag & O_NOATIME)
-		if (!is_owner_or_cap(inode))
-			return -EPERM;
+		if (!is_owner_or_cap(inode)) {
+			error = -EPERM;
+			goto err_out;
+		}
 
 
 	/*
 	/*
 	 * Ensure there are no outstanding leases on the file.
 	 * Ensure there are no outstanding leases on the file.
 	 */
 	 */
 	error = break_lease(inode, flag);
 	error = break_lease(inode, flag);
 	if (error)
 	if (error)
-		return error;
+		goto err_out;
 
 
 	if (flag & O_TRUNC) {
 	if (flag & O_TRUNC) {
 		error = get_write_access(inode);
 		error = get_write_access(inode);
 		if (error)
 		if (error)
-			return error;
+			goto err_out;
 
 
 		/*
 		/*
 		 * Refuse to truncate files with mandatory locks held on them.
 		 * Refuse to truncate files with mandatory locks held on them.
@@ -1581,12 +1584,17 @@ int may_open(struct path *path, int acc_mode, int flag)
 		}
 		}
 		put_write_access(inode);
 		put_write_access(inode);
 		if (error)
 		if (error)
-			return error;
+			goto err_out;
 	} else
 	} else
 		if (flag & FMODE_WRITE)
 		if (flag & FMODE_WRITE)
 			vfs_dq_init(inode);
 			vfs_dq_init(inode);
 
 
 	return 0;
 	return 0;
+err_out:
+	ima_counts_put(path, acc_mode ?
+		       acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) :
+		       ACC_MODE(flag) & (MAY_READ | MAY_WRITE));
+	return error;
 }
 }
 
 
 /*
 /*

+ 1 - 1
fs/nilfs2/btnode.c

@@ -209,6 +209,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
 		 * We cannot call radix_tree_preload for the kernels older
 		 * We cannot call radix_tree_preload for the kernels older
 		 * than 2.6.23, because it is not exported for modules.
 		 * than 2.6.23, because it is not exported for modules.
 		 */
 		 */
+retry:
 		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
 		if (err)
 		if (err)
 			goto failed_unlock;
 			goto failed_unlock;
@@ -219,7 +220,6 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
 				       (unsigned long long)oldkey,
 				       (unsigned long long)oldkey,
 				       (unsigned long long)newkey);
 				       (unsigned long long)newkey);
 
 
-retry:
 		spin_lock_irq(&btnc->tree_lock);
 		spin_lock_irq(&btnc->tree_lock);
 		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
 		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
 		spin_unlock_irq(&btnc->tree_lock);
 		spin_unlock_irq(&btnc->tree_lock);

+ 2 - 2
fs/ocfs2/aops.c

@@ -1747,8 +1747,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
 	 * we know zeros will only be needed in the first and/or last cluster.
 	 * we know zeros will only be needed in the first and/or last cluster.
 	 */
 	 */
 	if (clusters_to_alloc || extents_to_split ||
 	if (clusters_to_alloc || extents_to_split ||
-	    wc->w_desc[0].c_needs_zero ||
-	    wc->w_desc[wc->w_clen - 1].c_needs_zero)
+	    (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
+			    wc->w_desc[wc->w_clen - 1].c_needs_zero)))
 		cluster_of_pages = 1;
 		cluster_of_pages = 1;
 	else
 	else
 		cluster_of_pages = 0;
 		cluster_of_pages = 0;

+ 11 - 0
fs/ocfs2/dcache.c

@@ -85,6 +85,17 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
 		goto bail;
 		goto bail;
 	}
 	}
 
 
+	/*
+	 * If the last lookup failed to create dentry lock, let us
+	 * redo it.
+	 */
+	if (!dentry->d_fsdata) {
+		mlog(0, "Inode %llu doesn't have dentry lock, "
+		     "returning false\n",
+		     (unsigned long long)OCFS2_I(inode)->ip_blkno);
+		goto bail;
+	}
+
 	ret = 1;
 	ret = 1;
 
 
 bail:
 bail:

+ 1 - 1
fs/xfs/linux-2.6/xfs_ioctl32.c

@@ -619,7 +619,7 @@ xfs_file_compat_ioctl(
 	case XFS_IOC_GETVERSION_32:
 	case XFS_IOC_GETVERSION_32:
 		cmd = _NATIVE_IOC(cmd, long);
 		cmd = _NATIVE_IOC(cmd, long);
 		return xfs_file_ioctl(filp, cmd, p);
 		return xfs_file_ioctl(filp, cmd, p);
-	case XFS_IOC_SWAPEXT: {
+	case XFS_IOC_SWAPEXT_32: {
 		struct xfs_swapext	  sxp;
 		struct xfs_swapext	  sxp;
 		struct compat_xfs_swapext __user *sxu = arg;
 		struct compat_xfs_swapext __user *sxu = arg;
 
 

+ 1 - 0
include/crypto/algapi.h

@@ -137,6 +137,7 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 int crypto_enqueue_request(struct crypto_queue *queue,
 int crypto_enqueue_request(struct crypto_queue *queue,
 			   struct crypto_async_request *request);
 			   struct crypto_async_request *request);
+void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
 
 

+ 2 - 2
include/crypto/internal/skcipher.h

@@ -79,8 +79,8 @@ static inline int skcipher_enqueue_givcrypt(
 static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
 static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
 	struct crypto_queue *queue)
 	struct crypto_queue *queue)
 {
 {
-	return container_of(ablkcipher_dequeue_request(queue),
-			    struct skcipher_givcrypt_request, creq);
+	return __crypto_dequeue_request(
+		queue, offsetof(struct skcipher_givcrypt_request, creq.base));
 }
 }
 
 
 static inline void *skcipher_givcrypt_reqctx(
 static inline void *skcipher_givcrypt_reqctx(

+ 1 - 0
include/linux/binfmts.h

@@ -117,6 +117,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
 			   int executable_stack);
 			   int executable_stack);
 extern int bprm_mm_init(struct linux_binprm *bprm);
 extern int bprm_mm_init(struct linux_binprm *bprm);
 extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
 extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
+extern int prepare_bprm_creds(struct linux_binprm *bprm);
 extern void install_exec_creds(struct linux_binprm *bprm);
 extern void install_exec_creds(struct linux_binprm *bprm);
 extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
 extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
 extern int set_binfmt(struct linux_binfmt *new);
 extern int set_binfmt(struct linux_binfmt *new);

+ 4 - 0
include/linux/device-mapper.h

@@ -91,6 +91,9 @@ typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
 				      iterate_devices_callout_fn fn,
 				      iterate_devices_callout_fn fn,
 				      void *data);
 				      void *data);
 
 
+typedef void (*dm_io_hints_fn) (struct dm_target *ti,
+				struct queue_limits *limits);
+
 /*
 /*
  * Returns:
  * Returns:
  *    0: The target can handle the next I/O immediately.
  *    0: The target can handle the next I/O immediately.
@@ -151,6 +154,7 @@ struct target_type {
 	dm_merge_fn merge;
 	dm_merge_fn merge;
 	dm_busy_fn busy;
 	dm_busy_fn busy;
 	dm_iterate_devices_fn iterate_devices;
 	dm_iterate_devices_fn iterate_devices;
+	dm_io_hints_fn io_hints;
 
 
 	/* For internal device-mapper use. */
 	/* For internal device-mapper use. */
 	struct list_head list;
 	struct list_head list;

+ 12 - 1
include/linux/dm-log-userspace.h

@@ -371,7 +371,18 @@
 	(DM_ULOG_REQUEST_MASK & (request_type))
 	(DM_ULOG_REQUEST_MASK & (request_type))
 
 
 struct dm_ulog_request {
 struct dm_ulog_request {
-	char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */
+	/*
+	 * The local unique identifier (luid) and the universally unique
+	 * identifier (uuid) are used to tie a request to a specific
+	 * mirror log.  A single machine log could probably make due with
+	 * just the 'luid', but a cluster-aware log must use the 'uuid' and
+	 * the 'luid'.  The uuid is what is required for node to node
+	 * communication concerning a particular log, but the 'luid' helps
+	 * differentiate between logs that are being swapped and have the
+	 * same 'uuid'.  (Think "live" and "inactive" device-mapper tables.)
+	 */
+	uint64_t luid;
+	char uuid[DM_UUID_LEN];
 	char padding[7];        /* Padding because DM_UUID_LEN = 129 */
 	char padding[7];        /* Padding because DM_UUID_LEN = 129 */
 
 
 	int32_t error;          /* Used to report back processing errors */
 	int32_t error;          /* Used to report back processing errors */

+ 15 - 0
include/linux/workqueue.h

@@ -240,6 +240,21 @@ static inline int cancel_delayed_work(struct delayed_work *work)
 	return ret;
 	return ret;
 }
 }
 
 
+/*
+ * Like above, but uses del_timer() instead of del_timer_sync(). This means,
+ * if it returns 0 the timer function may be running and the queueing is in
+ * progress.
+ */
+static inline int __cancel_delayed_work(struct delayed_work *work)
+{
+	int ret;
+
+	ret = del_timer(&work->timer);
+	if (ret)
+		work_clear_pending(&work->work);
+	return ret;
+}
+
 extern int cancel_delayed_work_sync(struct delayed_work *work);
 extern int cancel_delayed_work_sync(struct delayed_work *work);
 
 
 /* Obsolete. use cancel_delayed_work_sync() */
 /* Obsolete. use cancel_delayed_work_sync() */

+ 2 - 2
include/net/pkt_sched.h

@@ -61,8 +61,8 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
 }
 }
 
 
 struct qdisc_watchdog {
 struct qdisc_watchdog {
-	struct tasklet_hrtimer	timer;
-	struct Qdisc		*qdisc;
+	struct hrtimer	timer;
+	struct Qdisc	*qdisc;
 };
 };
 
 
 extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
 extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);

+ 2 - 1
kernel/perf_counter.c

@@ -50,7 +50,7 @@ static atomic_t nr_task_counters __read_mostly;
  *  1 - disallow cpu counters to unpriv
  *  1 - disallow cpu counters to unpriv
  *  2 - disallow kernel profiling to unpriv
  *  2 - disallow kernel profiling to unpriv
  */
  */
-int sysctl_perf_counter_paranoid __read_mostly;
+int sysctl_perf_counter_paranoid __read_mostly = 1;
 
 
 static inline bool perf_paranoid_cpu(void)
 static inline bool perf_paranoid_cpu(void)
 {
 {
@@ -4066,6 +4066,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
 	hwc->sample_period = attr->sample_period;
 	hwc->sample_period = attr->sample_period;
 	if (attr->freq && attr->sample_freq)
 	if (attr->freq && attr->sample_freq)
 		hwc->sample_period = 1;
 		hwc->sample_period = 1;
+	hwc->last_period = hwc->sample_period;
 
 
 	atomic64_set(&hwc->period_left, hwc->sample_period);
 	atomic64_set(&hwc->period_left, hwc->sample_period);
 
 

+ 1 - 2
mm/nommu.c

@@ -1352,6 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file,
 	}
 	}
 
 
 	vma->vm_region = region;
 	vma->vm_region = region;
+	add_nommu_region(region);
 
 
 	/* set up the mapping */
 	/* set up the mapping */
 	if (file && vma->vm_flags & VM_SHARED)
 	if (file && vma->vm_flags & VM_SHARED)
@@ -1361,8 +1362,6 @@ unsigned long do_mmap_pgoff(struct file *file,
 	if (ret < 0)
 	if (ret < 0)
 		goto error_put_region;
 		goto error_put_region;
 
 
-	add_nommu_region(region);
-
 	/* okay... we have a mapping; now we have to register it */
 	/* okay... we have a mapping; now we have to register it */
 	result = vma->vm_start;
 	result = vma->vm_start;
 
 

+ 4 - 2
mm/page_alloc.c

@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 			 * agressive about taking ownership of free pages
 			 * agressive about taking ownership of free pages
 			 */
 			 */
 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
 			if (unlikely(current_order >= (pageblock_order >> 1)) ||
-					start_migratetype == MIGRATE_RECLAIMABLE) {
+					start_migratetype == MIGRATE_RECLAIMABLE ||
+					page_group_by_mobility_disabled) {
 				unsigned long pages;
 				unsigned long pages;
 				pages = move_freepages_block(zone, page,
 				pages = move_freepages_block(zone, page,
 								start_migratetype);
 								start_migratetype);
 
 
 				/* Claim the whole block if over half of it is free */
 				/* Claim the whole block if over half of it is free */
-				if (pages >= (1 << (pageblock_order-1)))
+				if (pages >= (1 << (pageblock_order-1)) ||
+						page_group_by_mobility_disabled)
 					set_pageblock_migratetype(page,
 					set_pageblock_migratetype(page,
 								start_migratetype);
 								start_migratetype);
 
 

+ 14 - 1
mm/percpu.c

@@ -197,7 +197,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
 static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
 				     int page_idx)
 				     int page_idx)
 {
 {
-	return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
+	/*
+	 * Any possible cpu id can be used here, so there's no need to
+	 * worry about preemption or cpu hotplug.
+	 */
+	return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(),
+				 page_idx) != NULL;
 }
 }
 
 
 /* set the pointer to a chunk in a page struct */
 /* set the pointer to a chunk in a page struct */
@@ -297,6 +302,14 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 		return pcpu_first_chunk;
 		return pcpu_first_chunk;
 	}
 	}
 
 
+	/*
+	 * The address is relative to unit0 which might be unused and
+	 * thus unmapped.  Offset the address to the unit space of the
+	 * current processor before looking it up in the vmalloc
+	 * space.  Note that any possible cpu id can be used here, so
+	 * there's no need to worry about preemption or cpu hotplug.
+	 */
+	addr += raw_smp_processor_id() * pcpu_unit_size;
 	return pcpu_get_page_chunk(vmalloc_to_page(addr));
 	return pcpu_get_page_chunk(vmalloc_to_page(addr));
 }
 }
 
 

+ 2 - 2
mm/slub.c

@@ -2594,8 +2594,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
  */
  */
 void kmem_cache_destroy(struct kmem_cache *s)
 void kmem_cache_destroy(struct kmem_cache *s)
 {
 {
-	if (s->flags & SLAB_DESTROY_BY_RCU)
-		rcu_barrier();
 	down_write(&slub_lock);
 	down_write(&slub_lock);
 	s->refcount--;
 	s->refcount--;
 	if (!s->refcount) {
 	if (!s->refcount) {
@@ -2606,6 +2604,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
 				"still has objects.\n", s->name, __func__);
 				"still has objects.\n", s->name, __func__);
 			dump_stack();
 			dump_stack();
 		}
 		}
+		if (s->flags & SLAB_DESTROY_BY_RCU)
+			rcu_barrier();
 		sysfs_slab_remove(s);
 		sysfs_slab_remove(s);
 	} else
 	} else
 		up_write(&slub_lock);
 		up_write(&slub_lock);

+ 1 - 1
net/core/sock.c

@@ -1025,6 +1025,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
 		sk->sk_prot = sk->sk_prot_creator = prot;
 		sk->sk_prot = sk->sk_prot_creator = prot;
 		sock_lock_init(sk);
 		sock_lock_init(sk);
 		sock_net_set(sk, get_net(net));
 		sock_net_set(sk, get_net(net));
+		atomic_set(&sk->sk_wmem_alloc, 1);
 	}
 	}
 
 
 	return sk;
 	return sk;
@@ -1872,7 +1873,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
 	 */
 	 */
 	smp_wmb();
 	smp_wmb();
 	atomic_set(&sk->sk_refcnt, 1);
 	atomic_set(&sk->sk_refcnt, 1);
-	atomic_set(&sk->sk_wmem_alloc, 1);
 	atomic_set(&sk->sk_drops, 0);
 	atomic_set(&sk->sk_drops, 0);
 }
 }
 EXPORT_SYMBOL(sock_init_data);
 EXPORT_SYMBOL(sock_init_data);

+ 7 - 5
net/sched/sch_api.c

@@ -458,7 +458,7 @@ EXPORT_SYMBOL(qdisc_warn_nonwc);
 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 {
 {
 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
-						 timer.timer);
+						 timer);
 
 
 	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
 	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
 	__netif_schedule(qdisc_root(wd->qdisc));
 	__netif_schedule(qdisc_root(wd->qdisc));
@@ -468,8 +468,8 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 
 
 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 {
 {
-	tasklet_hrtimer_init(&wd->timer, qdisc_watchdog,
-			     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	wd->timer.function = qdisc_watchdog;
 	wd->qdisc = qdisc;
 	wd->qdisc = qdisc;
 }
 }
 EXPORT_SYMBOL(qdisc_watchdog_init);
 EXPORT_SYMBOL(qdisc_watchdog_init);
@@ -485,13 +485,13 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 	wd->qdisc->flags |= TCQ_F_THROTTLED;
 	wd->qdisc->flags |= TCQ_F_THROTTLED;
 	time = ktime_set(0, 0);
 	time = ktime_set(0, 0);
 	time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
 	time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
-	tasklet_hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
+	hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
 }
 }
 EXPORT_SYMBOL(qdisc_watchdog_schedule);
 EXPORT_SYMBOL(qdisc_watchdog_schedule);
 
 
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
 {
-	tasklet_hrtimer_cancel(&wd->timer);
+	hrtimer_cancel(&wd->timer);
 	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
 	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
 }
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
@@ -1456,6 +1456,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
 	tcm = NLMSG_DATA(nlh);
 	tcm = NLMSG_DATA(nlh);
 	tcm->tcm_family = AF_UNSPEC;
 	tcm->tcm_family = AF_UNSPEC;
+	tcm->tcm__pad1 = 0;
+	tcm->tcm__pad2 = 0;
 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
 	tcm->tcm_parent = q->handle;
 	tcm->tcm_parent = q->handle;
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_handle = q->handle;

+ 11 - 14
net/sched/sch_cbq.c

@@ -163,7 +163,7 @@ struct cbq_sched_data
 	psched_time_t		now_rt;		/* Cached real time */
 	psched_time_t		now_rt;		/* Cached real time */
 	unsigned		pmask;
 	unsigned		pmask;
 
 
-	struct tasklet_hrtimer	delay_timer;
+	struct hrtimer		delay_timer;
 	struct qdisc_watchdog	watchdog;	/* Watchdog timer,
 	struct qdisc_watchdog	watchdog;	/* Watchdog timer,
 						   started when CBQ has
 						   started when CBQ has
 						   backlog, but cannot
 						   backlog, but cannot
@@ -503,8 +503,6 @@ static void cbq_ovl_delay(struct cbq_class *cl)
 		cl->undertime = q->now + delay;
 		cl->undertime = q->now + delay;
 
 
 		if (delay > 0) {
 		if (delay > 0) {
-			struct hrtimer *ht;
-
 			sched += delay + cl->penalty;
 			sched += delay + cl->penalty;
 			cl->penalized = sched;
 			cl->penalized = sched;
 			cl->cpriority = TC_CBQ_MAXPRIO;
 			cl->cpriority = TC_CBQ_MAXPRIO;
@@ -512,12 +510,12 @@ static void cbq_ovl_delay(struct cbq_class *cl)
 
 
 			expires = ktime_set(0, 0);
 			expires = ktime_set(0, 0);
 			expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
 			expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
-			ht = &q->delay_timer.timer;
-			if (hrtimer_try_to_cancel(ht) &&
-			    ktime_to_ns(ktime_sub(hrtimer_get_expires(ht),
-						  expires)) > 0)
-				hrtimer_set_expires(ht, expires);
-			hrtimer_restart(ht);
+			if (hrtimer_try_to_cancel(&q->delay_timer) &&
+			    ktime_to_ns(ktime_sub(
+					hrtimer_get_expires(&q->delay_timer),
+					expires)) > 0)
+				hrtimer_set_expires(&q->delay_timer, expires);
+			hrtimer_restart(&q->delay_timer);
 			cl->delayed = 1;
 			cl->delayed = 1;
 			cl->xstats.overactions++;
 			cl->xstats.overactions++;
 			return;
 			return;
@@ -593,7 +591,7 @@ static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
 {
 {
 	struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
 	struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
-						delay_timer.timer);
+						delay_timer);
 	struct Qdisc *sch = q->watchdog.qdisc;
 	struct Qdisc *sch = q->watchdog.qdisc;
 	psched_time_t now;
 	psched_time_t now;
 	psched_tdiff_t delay = 0;
 	psched_tdiff_t delay = 0;
@@ -623,7 +621,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
 
 
 		time = ktime_set(0, 0);
 		time = ktime_set(0, 0);
 		time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
 		time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
-		tasklet_hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
+		hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
 	}
 	}
 
 
 	sch->flags &= ~TCQ_F_THROTTLED;
 	sch->flags &= ~TCQ_F_THROTTLED;
@@ -1216,7 +1214,7 @@ cbq_reset(struct Qdisc* sch)
 	q->tx_class = NULL;
 	q->tx_class = NULL;
 	q->tx_borrowed = NULL;
 	q->tx_borrowed = NULL;
 	qdisc_watchdog_cancel(&q->watchdog);
 	qdisc_watchdog_cancel(&q->watchdog);
-	tasklet_hrtimer_cancel(&q->delay_timer);
+	hrtimer_cancel(&q->delay_timer);
 	q->toplevel = TC_CBQ_MAXLEVEL;
 	q->toplevel = TC_CBQ_MAXLEVEL;
 	q->now = psched_get_time();
 	q->now = psched_get_time();
 	q->now_rt = q->now;
 	q->now_rt = q->now;
@@ -1399,8 +1397,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
 	q->link.minidle = -0x7FFFFFFF;
 	q->link.minidle = -0x7FFFFFFF;
 
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 	qdisc_watchdog_init(&q->watchdog, sch);
-	tasklet_hrtimer_init(&q->delay_timer, cbq_undelay,
-			     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+	hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 	q->delay_timer.function = cbq_undelay;
 	q->delay_timer.function = cbq_undelay;
 	q->toplevel = TC_CBQ_MAXLEVEL;
 	q->toplevel = TC_CBQ_MAXLEVEL;
 	q->now = psched_get_time();
 	q->now = psched_get_time();

+ 5 - 1
security/integrity/ima/ima_main.c

@@ -249,7 +249,11 @@ void ima_counts_put(struct path *path, int mask)
 	struct inode *inode = path->dentry->d_inode;
 	struct inode *inode = path->dentry->d_inode;
 	struct ima_iint_cache *iint;
 	struct ima_iint_cache *iint;
 
 
-	if (!ima_initialized || !S_ISREG(inode->i_mode))
+	/* The inode may already have been freed, freeing the iint
+	 * with it. Verify the inode is not NULL before dereferencing
+	 * it.
+	 */
+	if (!ima_initialized || !inode || !S_ISREG(inode->i_mode))
 		return;
 		return;
 	iint = ima_iint_find_insert_get(inode);
 	iint = ima_iint_find_insert_get(inode);
 	if (!iint)
 	if (!iint)

+ 3 - 0
sound/pci/oxygen/oxygen_lib.c

@@ -260,6 +260,9 @@ oxygen_search_pci_id(struct oxygen *chip, const struct pci_device_id ids[])
 	 * chip didn't if the first EEPROM word was overwritten.
 	 * chip didn't if the first EEPROM word was overwritten.
 	 */
 	 */
 	subdevice = oxygen_read_eeprom(chip, 2);
 	subdevice = oxygen_read_eeprom(chip, 2);
+	/* use default ID if EEPROM is missing */
+	if (subdevice == 0xffff)
+		subdevice = 0x8788;
 	/*
 	/*
 	 * We use only the subsystem device ID for searching because it is
 	 * We use only the subsystem device ID for searching because it is
 	 * unique even without the subsystem vendor ID, which may have been
 	 * unique even without the subsystem vendor ID, which may have been

+ 2 - 0
sound/pci/oxygen/oxygen_pcm.c

@@ -469,9 +469,11 @@ static int oxygen_multich_hw_params(struct snd_pcm_substream *substream,
 	oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
 	oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
 			      oxygen_rate(hw_params) |
 			      oxygen_rate(hw_params) |
 			      chip->model.dac_i2s_format |
 			      chip->model.dac_i2s_format |
+			      oxygen_i2s_mclk(hw_params) |
 			      oxygen_i2s_bits(hw_params),
 			      oxygen_i2s_bits(hw_params),
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
+			      OXYGEN_I2S_MCLK_MASK |
 			      OXYGEN_I2S_BITS_MASK);
 			      OXYGEN_I2S_BITS_MASK);
 	oxygen_update_dac_routing(chip);
 	oxygen_update_dac_routing(chip);
 	oxygen_update_spdif_source(chip);
 	oxygen_update_spdif_source(chip);