浏览代码

Merge branch 'imx/board' into next/boards

* imx/board: (4 commits)
  Enable 32 bit flash support for iMX21ADS board
  ARM: mx31pdk: Add MC13783 RTC support
  iomux-mx25: configuration to support CSPI3 on CSI pins
  MX1:apf9328: Add i2c support

Updated to v3.2-rc6, conflicts:
	arch/arm/kernel/setup.c
Arnd Bergmann 13 年之前
父节点
当前提交
1fc3eb8110
共有 100 个文件被更改,包括 1147 次插入853 次删除
  1. 0 7
      Documentation/ABI/testing/sysfs-bus-rbd
  2. 1 1
      Makefile
  3. 2 1
      arch/arm/Kconfig
  4. 0 1
      arch/arm/configs/imx_v4_v5_defconfig
  5. 4 12
      arch/arm/include/asm/unwind.h
  6. 3 0
      arch/arm/kernel/perf_event.c
  7. 6 8
      arch/arm/kernel/setup.c
  8. 84 45
      arch/arm/kernel/unwind.c
  9. 10 3
      arch/arm/mach-exynos/mct.c
  10. 1 0
      arch/arm/mach-imx/Kconfig
  11. 10 0
      arch/arm/mach-imx/mach-apf9328.c
  12. 1 1
      arch/arm/mach-imx/mach-mx31_3ds.c
  13. 1 1
      arch/arm/mach-mx5/board-mx51_babbage.c
  14. 1 1
      arch/arm/mach-mx5/board-mx53_evk.c
  15. 1 1
      arch/arm/mach-mx5/board-mx53_loco.c
  16. 1 1
      arch/arm/mach-mx5/board-mx53_smd.c
  17. 1 1
      arch/arm/mach-omap2/board-rx51-peripherals.c
  18. 3 3
      arch/arm/mach-omap2/mcbsp.c
  19. 1 0
      arch/arm/mach-s5pv210/mach-smdkv210.c
  20. 1 0
      arch/arm/plat-mxc/cpufreq.c
  21. 4 0
      arch/arm/plat-mxc/include/mach/iomux-mx25.h
  22. 6 1
      arch/arm/plat-mxc/pwm.c
  23. 0 1
      arch/arm/plat-samsung/dev-backlight.c
  24. 2 4
      arch/sparc/kernel/ds.c
  25. 1 3
      arch/sparc/kernel/prom_common.c
  26. 1 2
      arch/sparc/mm/btfixup.c
  27. 0 8
      arch/x86/include/asm/e820.h
  28. 5 0
      arch/x86/include/asm/efi.h
  29. 1 2
      arch/x86/kernel/e820.c
  30. 1 20
      arch/x86/kernel/setup.c
  31. 11 18
      arch/x86/platform/efi/efi.c
  32. 2 46
      arch/x86/platform/efi/efi_32.c
  33. 17 0
      arch/x86/platform/efi/efi_64.c
  34. 15 3
      arch/x86/xen/setup.c
  35. 11 12
      block/blk-core.c
  36. 14 2
      block/cfq-iosched.c
  37. 4 2
      drivers/block/cciss.c
  38. 2 2
      drivers/block/loop.c
  39. 10 91
      drivers/block/rbd.c
  40. 215 147
      drivers/block/swim3.c
  41. 3 3
      drivers/bluetooth/Kconfig
  42. 13 2
      drivers/bluetooth/btmrvl_sdio.c
  43. 1 2
      drivers/bluetooth/btusb.c
  44. 40 2
      drivers/firmware/iscsi_ibft.c
  45. 2 24
      drivers/firmware/iscsi_ibft_find.c
  46. 8 13
      drivers/gpio/gpio-da9052.c
  47. 31 1
      drivers/gpio/gpio-ml-ioh.c
  48. 13 5
      drivers/gpio/gpio-mpc8xxx.c
  49. 0 4
      drivers/gpio/gpio-pl061.c
  50. 1 0
      drivers/gpu/drm/i915/i915_debugfs.c
  51. 10 0
      drivers/gpu/drm/i915/i915_dma.c
  52. 33 10
      drivers/gpu/drm/i915/i915_drv.c
  53. 14 4
      drivers/gpu/drm/i915/i915_drv.h
  54. 1 6
      drivers/gpu/drm/i915/i915_gem.c
  55. 18 1
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  56. 26 4
      drivers/gpu/drm/i915/i915_reg.h
  57. 79 10
      drivers/gpu/drm/i915/intel_display.c
  58. 133 40
      drivers/gpu/drm/i915/intel_dp.c
  59. 1 0
      drivers/gpu/drm/i915/intel_drv.h
  60. 8 0
      drivers/gpu/drm/i915/intel_lvds.c
  61. 5 11
      drivers/gpu/drm/i915/intel_panel.c
  62. 26 10
      drivers/gpu/drm/i915/intel_sdvo.c
  63. 2 2
      drivers/hwmon/jz4740-hwmon.c
  64. 5 0
      drivers/iommu/intel-iommu.c
  65. 8 0
      drivers/mmc/card/block.c
  66. 64 34
      drivers/mmc/core/core.c
  67. 8 4
      drivers/mmc/core/mmc.c
  68. 1 0
      drivers/mmc/host/mxcmmc.c
  69. 5 2
      drivers/mmc/host/omap_hsmmc.c
  70. 1 0
      drivers/mmc/host/sdhci-cns3xxx.c
  71. 0 2
      drivers/mmc/host/sdhci-s3c.c
  72. 1 1
      drivers/mmc/host/sh_mmcif.c
  73. 1 1
      drivers/mmc/host/tmio_mmc_pio.c
  74. 7 4
      drivers/net/ethernet/freescale/fec.c
  75. 8 45
      drivers/net/ethernet/freescale/fsl_pq_mdio.c
  76. 1 3
      drivers/net/ppp/pptp.c
  77. 1 1
      drivers/net/wireless/ath/ath9k/main.c
  78. 1 1
      drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
  79. 1 1
      drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
  80. 1 1
      drivers/net/wireless/rtlwifi/rtl8192de/phy.c
  81. 1 1
      drivers/net/wireless/rtlwifi/rtl8192se/phy.c
  82. 5 22
      drivers/sbus/char/bbc_i2c.c
  83. 1 12
      drivers/sbus/char/display7seg.c
  84. 1 11
      drivers/sbus/char/envctrl.c
  85. 1 11
      drivers/sbus/char/flash.c
  86. 1 11
      drivers/sbus/char/uctrl.c
  87. 6 2
      drivers/ssb/driver_pcicore.c
  88. 1 0
      drivers/staging/rtl8712/usb_intf.c
  89. 12 3
      drivers/staging/tidspbridge/core/dsp-clock.c
  90. 0 4
      drivers/staging/tidspbridge/rmgr/drv_interface.c
  91. 10 0
      drivers/usb/class/cdc-acm.c
  92. 1 0
      drivers/usb/gadget/f_mass_storage.c
  93. 1 1
      drivers/usb/renesas_usbhs/mod.c
  94. 1 0
      drivers/usb/renesas_usbhs/mod_host.c
  95. 6 1
      drivers/usb/serial/option.c
  96. 2 2
      drivers/xen/swiotlb-xen.c
  97. 55 62
      fs/btrfs/async-thread.c
  98. 2 2
      fs/btrfs/async-thread.h
  99. 2 1
      fs/btrfs/ctree.h
  100. 2 2
      fs/btrfs/delayed-inode.c

+ 0 - 7
Documentation/ABI/testing/sysfs-bus-rbd

@@ -57,13 +57,6 @@ create_snap
 
 
 	 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
 	 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
 
 
-rollback_snap
-
-	Rolls back data to the specified snapshot. This goes over the entire
-	list of rados blocks and sends a rollback command to each.
-
-	 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
 snap_*
 snap_*
 
 
 	A directory per each snapshot
 	A directory per each snapshot

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 2
 PATCHLEVEL = 2
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Saber-toothed Squirrel
 NAME = Saber-toothed Squirrel
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 2 - 1
arch/arm/Kconfig

@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
 	  be avoided when possible.
 	  be avoided when possible.
 
 
 config PHYS_OFFSET
 config PHYS_OFFSET
-	hex "Physical address of main memory"
+	hex "Physical address of main memory" if MMU
 	depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
 	depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+	default DRAM_BASE if !MMU
 	help
 	help
 	  Please provide the physical address corresponding to the
 	  Please provide the physical address corresponding to the
 	  location of main memory in your system.
 	  location of main memory in your system.

+ 0 - 1
arch/arm/configs/imx_v4_v5_defconfig

@@ -67,7 +67,6 @@ CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
 CONFIG_MTD_CFI_GEOMETRY=y
 CONFIG_MTD_CFI_GEOMETRY=y
 # CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
 # CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
 # CONFIG_MTD_CFI_I2 is not set
 # CONFIG_MTD_CFI_I2 is not set
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_MTD_PHYSMAP=y

+ 4 - 12
arch/arm/include/asm/unwind.h

@@ -30,14 +30,15 @@ enum unwind_reason_code {
 };
 };
 
 
 struct unwind_idx {
 struct unwind_idx {
-	unsigned long addr;
+	unsigned long addr_offset;
 	unsigned long insn;
 	unsigned long insn;
 };
 };
 
 
 struct unwind_table {
 struct unwind_table {
 	struct list_head list;
 	struct list_head list;
-	struct unwind_idx *start;
-	struct unwind_idx *stop;
+	const struct unwind_idx *start;
+	const struct unwind_idx *origin;
+	const struct unwind_idx *stop;
 	unsigned long begin_addr;
 	unsigned long begin_addr;
 	unsigned long end_addr;
 	unsigned long end_addr;
 };
 };
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
 extern void unwind_table_del(struct unwind_table *tab);
 extern void unwind_table_del(struct unwind_table *tab);
 extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
 extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
 
 
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
-	return 0;
-}
-#endif
-
 #endif	/* !__ASSEMBLY__ */
 #endif	/* !__ASSEMBLY__ */
 
 
 #ifdef CONFIG_ARM_UNWIND
 #ifdef CONFIG_ARM_UNWIND

+ 3 - 0
arch/arm/kernel/perf_event.c

@@ -639,6 +639,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
 
 
 static int __devinit armpmu_device_probe(struct platform_device *pdev)
 static int __devinit armpmu_device_probe(struct platform_device *pdev)
 {
 {
+	if (!cpu_pmu)
+		return -ENODEV;
+
 	cpu_pmu->plat_device = pdev;
 	cpu_pmu->plat_device = pdev;
 	return 0;
 	return 0;
 }
 }

+ 6 - 8
arch/arm/kernel/setup.c

@@ -902,8 +902,6 @@ void __init setup_arch(char **cmdline_p)
 {
 {
 	struct machine_desc *mdesc;
 	struct machine_desc *mdesc;
 
 
-	unwind_init();
-
 	setup_processor();
 	setup_processor();
 	mdesc = setup_machine_fdt(__atags_pointer);
 	mdesc = setup_machine_fdt(__atags_pointer);
 	if (!mdesc)
 	if (!mdesc)
@@ -911,6 +909,12 @@ void __init setup_arch(char **cmdline_p)
 	machine_desc = mdesc;
 	machine_desc = mdesc;
 	machine_name = mdesc->name;
 	machine_name = mdesc->name;
 
 
+#ifdef CONFIG_ZONE_DMA
+	if (mdesc->dma_zone_size) {
+		extern unsigned long arm_dma_zone_size;
+		arm_dma_zone_size = mdesc->dma_zone_size;
+	}
+#endif
 	if (mdesc->restart_mode)
 	if (mdesc->restart_mode)
 		reboot_setup(&mdesc->restart_mode);
 		reboot_setup(&mdesc->restart_mode);
 
 
@@ -945,12 +949,6 @@ void __init setup_arch(char **cmdline_p)
 
 
 	tcm_init();
 	tcm_init();
 
 
-#ifdef CONFIG_ZONE_DMA
-	if (mdesc->dma_zone_size) {
-		extern unsigned long arm_dma_zone_size;
-		arm_dma_zone_size = mdesc->dma_zone_size;
-	}
-#endif
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 	handle_arch_irq = mdesc->handle_irq;
 	handle_arch_irq = mdesc->handle_irq;
 #endif
 #endif

+ 84 - 45
arch/arm/kernel/unwind.c

@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
 
 
 struct unwind_ctrl_block {
 struct unwind_ctrl_block {
 	unsigned long vrs[16];		/* virtual register set */
 	unsigned long vrs[16];		/* virtual register set */
-	unsigned long *insn;		/* pointer to the current instructions word */
+	const unsigned long *insn;	/* pointer to the current instructions word */
 	int entries;			/* number of entries left to interpret */
 	int entries;			/* number of entries left to interpret */
 	int byte;			/* current byte number in the instructions word */
 	int byte;			/* current byte number in the instructions word */
 };
 };
@@ -83,8 +83,9 @@ enum regs {
 	PC = 15
 	PC = 15
 };
 };
 
 
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
 
 
 static DEFINE_SPINLOCK(unwind_lock);
 static DEFINE_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
 static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
 })
 })
 
 
 /*
 /*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
  * guaranteed to be sorted in ascending order by the linker.
  * guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
  */
  */
-static struct unwind_idx *search_index(unsigned long addr,
-				       struct unwind_idx *first,
-				       struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+				       const struct unwind_idx *start,
+				       const struct unwind_idx *origin,
+				       const struct unwind_idx *stop)
 {
 {
-	pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+	unsigned long addr_prel31;
+
+	pr_debug("%s(%08lx, %p, %p, %p)\n",
+			__func__, addr, start, origin, stop);
+
+	/*
+	 * only search in the section with the matching sign. This way the
+	 * prel31 numbers can be compared as unsigned longs.
+	 */
+	if (addr < (unsigned long)start)
+		/* negative offsets: [start; origin) */
+		stop = origin;
+	else
+		/* positive offsets: [origin; stop) */
+		start = origin;
+
+	/* prel31 for address relavive to start */
+	addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
 
 
-	if (addr < first->addr) {
+	while (start < stop - 1) {
+		const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+		/*
+		 * As addr_prel31 is relative to start an offset is needed to
+		 * make it relative to mid.
+		 */
+		if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+				mid->addr_offset)
+			stop = mid;
+		else {
+			/* keep addr_prel31 relative to start */
+			addr_prel31 -= ((unsigned long)mid -
+					(unsigned long)start);
+			start = mid;
+		}
+	}
+
+	if (likely(start->addr_offset <= addr_prel31))
+		return start;
+	else {
 		pr_warning("unwind: Unknown symbol address %08lx\n", addr);
 		pr_warning("unwind: Unknown symbol address %08lx\n", addr);
 		return NULL;
 		return NULL;
-	} else if (addr >= last->addr)
-		return last;
+	}
+}
 
 
-	while (first < last - 1) {
-		struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+		const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+	pr_debug("%s(%p, %p)\n", __func__, start, stop);
+	while (start < stop) {
+		const struct unwind_idx *mid = start + ((stop - start) >> 1);
 
 
-		if (addr < mid->addr)
-			last = mid;
+		if (mid->addr_offset >= 0x40000000)
+			/* negative offset */
+			start = mid + 1;
 		else
 		else
-			first = mid;
+			/* positive offset */
+			stop = mid;
 	}
 	}
-
-	return first;
+	pr_debug("%s -> %p\n", __func__, stop);
+	return stop;
 }
 }
 
 
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
 {
 {
-	struct unwind_idx *idx = NULL;
+	const struct unwind_idx *idx = NULL;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	pr_debug("%s(%08lx)\n", __func__, addr);
 	pr_debug("%s(%08lx)\n", __func__, addr);
 
 
-	if (core_kernel_text(addr))
+	if (core_kernel_text(addr)) {
+		if (unlikely(!__origin_unwind_idx))
+			__origin_unwind_idx =
+				unwind_find_origin(__start_unwind_idx,
+						__stop_unwind_idx);
+
 		/* main unwind table */
 		/* main unwind table */
 		idx = search_index(addr, __start_unwind_idx,
 		idx = search_index(addr, __start_unwind_idx,
-				   __stop_unwind_idx - 1);
-	else {
+				   __origin_unwind_idx,
+				   __stop_unwind_idx);
+	} else {
 		/* module unwind tables */
 		/* module unwind tables */
 		struct unwind_table *table;
 		struct unwind_table *table;
 
 
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
 			if (addr >= table->begin_addr &&
 			if (addr >= table->begin_addr &&
 			    addr < table->end_addr) {
 			    addr < table->end_addr) {
 				idx = search_index(addr, table->start,
 				idx = search_index(addr, table->start,
-						   table->stop - 1);
+						   table->origin,
+						   table->stop);
 				/* Move-to-front to exploit common traces */
 				/* Move-to-front to exploit common traces */
 				list_move(&table->list, &unwind_tables);
 				list_move(&table->list, &unwind_tables);
 				break;
 				break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
 int unwind_frame(struct stackframe *frame)
 int unwind_frame(struct stackframe *frame)
 {
 {
 	unsigned long high, low;
 	unsigned long high, low;
-	struct unwind_idx *idx;
+	const struct unwind_idx *idx;
 	struct unwind_ctrl_block ctrl;
 	struct unwind_ctrl_block ctrl;
 
 
 	/* only go to a higher address on the stack */
 	/* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
 				      unsigned long text_size)
 				      unsigned long text_size)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	struct unwind_idx *idx;
 	struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
 	struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
 
 
 	pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
 	pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
 	if (!tab)
 	if (!tab)
 		return tab;
 		return tab;
 
 
-	tab->start = (struct unwind_idx *)start;
-	tab->stop = (struct unwind_idx *)(start + size);
+	tab->start = (const struct unwind_idx *)start;
+	tab->stop = (const struct unwind_idx *)(start + size);
+	tab->origin = unwind_find_origin(tab->start, tab->stop);
 	tab->begin_addr = text_addr;
 	tab->begin_addr = text_addr;
 	tab->end_addr = text_addr + text_size;
 	tab->end_addr = text_addr + text_size;
 
 
-	/* Convert the symbol addresses to absolute values */
-	for (idx = tab->start; idx < tab->stop; idx++)
-		idx->addr = prel31_to_addr(&idx->addr);
-
 	spin_lock_irqsave(&unwind_lock, flags);
 	spin_lock_irqsave(&unwind_lock, flags);
 	list_add_tail(&tab->list, &unwind_tables);
 	list_add_tail(&tab->list, &unwind_tables);
 	spin_unlock_irqrestore(&unwind_lock, flags);
 	spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
 
 
 	kfree(tab);
 	kfree(tab);
 }
 }
-
-int __init unwind_init(void)
-{
-	struct unwind_idx *idx;
-
-	/* Convert the symbol addresses to absolute values */
-	for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
-		idx->addr = prel31_to_addr(&idx->addr);
-
-	pr_debug("unwind: ARM stack unwinding initialised\n");
-
-	return 0;
-}

+ 10 - 3
arch/arm/mach-exynos/mct.c

@@ -44,8 +44,6 @@ struct mct_clock_event_device {
 	char name[10];
 	char name[10];
 };
 };
 
 
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
 static void exynos4_mct_write(unsigned int value, void *addr)
 static void exynos4_mct_write(unsigned int value, void *addr)
 {
 {
 	void __iomem *stat_addr;
 	void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
 }
 }
 
 
 #ifdef CONFIG_LOCAL_TIMERS
 #ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
 /* Clock event handling */
 /* Clock event handling */
 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
 {
 {
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
 
 void local_timer_stop(struct clock_event_device *evt)
 void local_timer_stop(struct clock_event_device *evt)
 {
 {
+	unsigned int cpu = smp_processor_id();
 	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
 	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
 	if (mct_int_type == MCT_INT_SPI)
 	if (mct_int_type == MCT_INT_SPI)
-		disable_irq(evt->irq);
+		if (cpu == 0)
+			remove_irq(evt->irq, &mct_tick0_event_irq);
+		else
+			remove_irq(evt->irq, &mct_tick1_event_irq);
 	else
 	else
 		disable_percpu_irq(IRQ_MCT_LOCALTIMER);
 		disable_percpu_irq(IRQ_MCT_LOCALTIMER);
 }
 }
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
 
 
 	clk_rate = clk_get_rate(mct_clk);
 	clk_rate = clk_get_rate(mct_clk);
 
 
+#ifdef CONFIG_LOCAL_TIMERS
 	if (mct_int_type == MCT_INT_PPI) {
 	if (mct_int_type == MCT_INT_PPI) {
 		int err;
 		int err;
 
 
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
 		     IRQ_MCT_LOCALTIMER, err);
 		     IRQ_MCT_LOCALTIMER, err);
 	}
 	}
+#endif /* CONFIG_LOCAL_TIMERS */
 }
 }
 
 
 static void __init exynos4_timer_init(void)
 static void __init exynos4_timer_init(void)

+ 1 - 0
arch/arm/mach-imx/Kconfig

@@ -98,6 +98,7 @@ config MACH_SCB9328
 config MACH_APF9328
 config MACH_APF9328
 	bool "APF9328"
 	bool "APF9328"
 	select SOC_IMX1
 	select SOC_IMX1
+	select IMX_HAVE_PLATFORM_IMX_I2C
 	select IMX_HAVE_PLATFORM_IMX_UART
 	select IMX_HAVE_PLATFORM_IMX_UART
 	help
 	help
 	  Say Yes here if you are using the Armadeus APF9328 development board
 	  Say Yes here if you are using the Armadeus APF9328 development board

+ 10 - 0
arch/arm/mach-imx/mach-apf9328.c

@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/physmap.h>
 #include <linux/dm9000.h>
 #include <linux/dm9000.h>
+#include <linux/i2c.h>
 
 
 #include <asm/mach-types.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
@@ -41,6 +42,9 @@ static const int apf9328_pins[] __initconst = {
 	PB29_PF_UART2_RTS,
 	PB29_PF_UART2_RTS,
 	PB30_PF_UART2_TXD,
 	PB30_PF_UART2_TXD,
 	PB31_PF_UART2_RXD,
 	PB31_PF_UART2_RXD,
+	/* I2C */
+	PA15_PF_I2C_SDA,
+	PA16_PF_I2C_SCL,
 };
 };
 
 
 /*
 /*
@@ -103,6 +107,10 @@ static const struct imxuart_platform_data uart1_pdata __initconst = {
 	.flags = IMXUART_HAVE_RTSCTS,
 	.flags = IMXUART_HAVE_RTSCTS,
 };
 };
 
 
+static const struct imxi2c_platform_data apf9328_i2c_data __initconst = {
+	.bitrate = 100000,
+};
+
 static struct platform_device *devices[] __initdata = {
 static struct platform_device *devices[] __initdata = {
 	&apf9328_flash_device,
 	&apf9328_flash_device,
 	&dm9000x_device,
 	&dm9000x_device,
@@ -119,6 +127,8 @@ static void __init apf9328_init(void)
 	imx1_add_imx_uart0(NULL);
 	imx1_add_imx_uart0(NULL);
 	imx1_add_imx_uart1(&uart1_pdata);
 	imx1_add_imx_uart1(&uart1_pdata);
 
 
+	imx1_add_imx_i2c(&apf9328_i2c_data);
+
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 }
 }
 
 

+ 1 - 1
arch/arm/mach-imx/mach-mx31_3ds.c

@@ -492,7 +492,7 @@ static struct mc13xxx_platform_data mc13783_pdata = {
 		.regulators = mx31_3ds_regulators,
 		.regulators = mx31_3ds_regulators,
 		.num_regulators = ARRAY_SIZE(mx31_3ds_regulators),
 		.num_regulators = ARRAY_SIZE(mx31_3ds_regulators),
 	},
 	},
-	.flags  = MC13XXX_USE_TOUCHSCREEN,
+	.flags  = MC13XXX_USE_TOUCHSCREEN | MC13XXX_USE_RTC,
 };
 };
 
 
 /* SPI */
 /* SPI */

+ 1 - 1
arch/arm/mach-mx5/board-mx51_babbage.c

@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
 {
 {
 	iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
 	iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
 	iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
 	iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
-		PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+		PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
 
 
 	imx51_soc_init();
 	imx51_soc_init();
 
 

+ 1 - 1
arch/arm/mach-mx5/board-mx53_evk.c

@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
 	gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
 	gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
 }
 }
 
 
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
 	.phy = PHY_INTERFACE_MODE_RMII,
 	.phy = PHY_INTERFACE_MODE_RMII,
 };
 };
 
 

+ 1 - 1
arch/arm/mach-mx5/board-mx53_loco.c

@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
 	gpio_set_value(LOCO_FEC_PHY_RST, 1);
 	gpio_set_value(LOCO_FEC_PHY_RST, 1);
 }
 }
 
 
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
 	.phy = PHY_INTERFACE_MODE_RMII,
 	.phy = PHY_INTERFACE_MODE_RMII,
 };
 };
 
 

+ 1 - 1
arch/arm/mach-mx5/board-mx53_smd.c

@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
 	gpio_set_value(SMD_FEC_PHY_RST, 1);
 	gpio_set_value(SMD_FEC_PHY_RST, 1);
 }
 }
 
 
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
 	.phy = PHY_INTERFACE_MODE_RMII,
 	.phy = PHY_INTERFACE_MODE_RMII,
 };
 };
 
 

+ 1 - 1
arch/arm/mach-omap2/board-rx51-peripherals.c

@@ -198,7 +198,7 @@ static struct platform_device rx51_charger_device = {
 static void __init rx51_charger_init(void)
 static void __init rx51_charger_init(void)
 {
 {
 	WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
 	WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
-		GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+		GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
 
 
 	platform_device_register(&rx51_charger_device);
 	platform_device_register(&rx51_charger_device);
 }
 }

+ 3 - 3
arch/arm/mach-omap2/mcbsp.c

@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
 		pdata->reg_size = 4;
 		pdata->reg_size = 4;
 		pdata->has_ccr = true;
 		pdata->has_ccr = true;
 	}
 	}
+	pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+	if (id == 1)
+		pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 
 
 	if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
 	if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
 		if (id == 2)
 		if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
 					name, oh->name);
 					name, oh->name);
 		return PTR_ERR(pdev);
 		return PTR_ERR(pdev);
 	}
 	}
-	pdata->set_clk_src = omap2_mcbsp_set_clk_src;
-	if (id == 1)
-		pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 	omap_mcbsp_count++;
 	omap_mcbsp_count++;
 	return 0;
 	return 0;
 }
 }

+ 1 - 0
arch/arm/mach-s5pv210/mach-smdkv210.c

@@ -274,6 +274,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
 
 
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
 	.pwm_id = 3,
 	.pwm_id = 3,
+	.pwm_period_ns = 1000,
 };
 };
 
 
 static void __init smdkv210_map_io(void)
 static void __init smdkv210_map_io(void)

+ 1 - 0
arch/arm/plat-mxc/cpufreq.c

@@ -17,6 +17,7 @@
  * the CPU clock speed on the fly.
  * the CPU clock speed on the fly.
  */
  */
 
 
+#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/err.h>

+ 4 - 0
arch/arm/plat-mxc/include/mach/iomux-mx25.h

@@ -265,16 +265,20 @@
 #define MX25_PAD_CSI_D2__CSI_D2		IOMUX_PAD(0x318, 0x120, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D2__CSI_D2		IOMUX_PAD(0x318, 0x120, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D2__UART5_RXD_MUX	IOMUX_PAD(0x318, 0x120, 0x11, 0x578, 1, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D2__UART5_RXD_MUX	IOMUX_PAD(0x318, 0x120, 0x11, 0x578, 1, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D2__GPIO_1_27	IOMUX_PAD(0x318, 0x120, 0x15, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D2__GPIO_1_27	IOMUX_PAD(0x318, 0x120, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D2__CSPI3_MOSI	IOMUX_PAD(0x318, 0x120, 0x17, 0, 0, NO_PAD_CTRL)
 
 
 #define MX25_PAD_CSI_D3__CSI_D3		IOMUX_PAD(0x31c, 0x124, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D3__CSI_D3		IOMUX_PAD(0x31c, 0x124, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D3__GPIO_1_28	IOMUX_PAD(0x31c, 0x124, 0x15, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D3__GPIO_1_28	IOMUX_PAD(0x31c, 0x124, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D3__CSPI3_MISO	IOMUX_PAD(0x31c, 0x124, 0x17, 0x4b4, 1, NO_PAD_CTRL)
 
 
 #define MX25_PAD_CSI_D4__CSI_D4		IOMUX_PAD(0x320, 0x128, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D4__CSI_D4		IOMUX_PAD(0x320, 0x128, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D4__UART5_RTS	IOMUX_PAD(0x320, 0x128, 0x11, 0x574, 1, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D4__UART5_RTS	IOMUX_PAD(0x320, 0x128, 0x11, 0x574, 1, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D4__GPIO_1_29	IOMUX_PAD(0x320, 0x128, 0x15, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D4__GPIO_1_29	IOMUX_PAD(0x320, 0x128, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D4__CSPI3_SCLK	IOMUX_PAD(0x320, 0x128, 0x17, 0, 0, NO_PAD_CTRL)
 
 
 #define MX25_PAD_CSI_D5__CSI_D5		IOMUX_PAD(0x324, 0x12c, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D5__CSI_D5		IOMUX_PAD(0x324, 0x12c, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D5__GPIO_1_30	IOMUX_PAD(0x324, 0x12c, 0x15, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D5__GPIO_1_30	IOMUX_PAD(0x324, 0x12c, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D5__CSPI3_RDY	IOMUX_PAD(0x324, 0x12c, 0x17, 0, 0, NO_PAD_CTRL)
 
 
 #define MX25_PAD_CSI_D6__CSI_D6		IOMUX_PAD(0x328, 0x130, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D6__CSI_D6		IOMUX_PAD(0x328, 0x130, 0x10, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D6__GPIO_1_31	IOMUX_PAD(0x328, 0x130, 0x15, 0, 0, NO_PAD_CTRL)
 #define MX25_PAD_CSI_D6__GPIO_1_31	IOMUX_PAD(0x328, 0x130, 0x15, 0, 0, NO_PAD_CTRL)

+ 6 - 1
arch/arm/plat-mxc/pwm.c

@@ -32,6 +32,9 @@
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN                (1 << 24)
+#define MX3_PWMCR_WAITEN                (1 << 23)
+#define MX3_PWMCR_DBGEN			(1 << 22)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_EN              (1 << 0)
 #define MX3_PWMCR_EN              (1 << 0)
@@ -77,7 +80,9 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
 		writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
 		writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
 		writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 		writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
 
-		cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+		cr = MX3_PWMCR_PRESCALER(prescale) |
+			MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+			MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
 
 
 		if (cpu_is_mx25())
 		if (cpu_is_mx25())
 			cr |= MX3_PWMCR_CLKSRC_IPG;
 			cr |= MX3_PWMCR_CLKSRC_IPG;

+ 0 - 1
arch/arm/plat-samsung/dev-backlight.c

@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/pwm_backlight.h>
 #include <linux/pwm_backlight.h>
-#include <linux/slab.h>
 
 
 #include <plat/devs.h>
 #include <plat/devs.h>
 #include <plat/gpio-cfg.h>
 #include <plat/gpio-cfg.h>

+ 2 - 4
arch/sparc/kernel/ds.c

@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
 
 
 	dp->rcv_buf_len = 4096;
 	dp->rcv_buf_len = 4096;
 
 
-	dp->ds_states = kzalloc(sizeof(ds_states_template),
-				GFP_KERNEL);
+	dp->ds_states = kmemdup(ds_states_template,
+				sizeof(ds_states_template), GFP_KERNEL);
 	if (!dp->ds_states)
 	if (!dp->ds_states)
 		goto out_free_rcv_buf;
 		goto out_free_rcv_buf;
 
 
-	memcpy(dp->ds_states, ds_states_template,
-	       sizeof(ds_states_template));
 	dp->num_ds_states = ARRAY_SIZE(ds_states_template);
 	dp->num_ds_states = ARRAY_SIZE(ds_states_template);
 
 
 	for (i = 0; i < dp->num_ds_states; i++)
 	for (i = 0; i < dp->num_ds_states; i++)

+ 1 - 3
arch/sparc/kernel/prom_common.c

@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
 	void *new_val;
 	void *new_val;
 	int err;
 	int err;
 
 
-	new_val = kmalloc(len, GFP_KERNEL);
+	new_val = kmemdup(val, len, GFP_KERNEL);
 	if (!new_val)
 	if (!new_val)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	memcpy(new_val, val, len);
-
 	err = -ENODEV;
 	err = -ENODEV;
 
 
 	mutex_lock(&of_set_property_mutex);
 	mutex_lock(&of_set_property_mutex);

+ 1 - 2
arch/sparc/mm/btfixup.c

@@ -302,8 +302,7 @@ void __init btfixup(void)
 				case 'i':	/* INT */
 				case 'i':	/* INT */
 					if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
 					if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
 						set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
 						set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-					else if ((insn & 0x80002000) == 0x80002000 &&
-					         (insn & 0x01800000) != 0x01800000) /* %LO */
+					else if ((insn & 0x80002000) == 0x80002000) /* %LO */
 						set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
 						set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
 					else {
 					else {
 						prom_printf(insn_i, p, addr, insn);
 						prom_printf(insn_i, p, addr, insn);

+ 0 - 8
arch/x86/include/asm/e820.h

@@ -53,13 +53,6 @@
  */
  */
 #define E820_RESERVED_KERN        128
 #define E820_RESERVED_KERN        128
 
 
-/*
- * Address ranges that need to be mapped by the kernel direct
- * mapping. This is used to make sure regions such as
- * EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch().
- */
-#define E820_RESERVED_EFI         129
-
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
 #include <linux/types.h>
 struct e820entry {
 struct e820entry {
@@ -122,7 +115,6 @@ static inline void early_memtest(unsigned long start, unsigned long end)
 }
 }
 #endif
 #endif
 
 
-extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type);
 extern unsigned long e820_end_of_ram_pfn(void);
 extern unsigned long e820_end_of_ram_pfn(void);
 extern unsigned long e820_end_of_low_ram_pfn(void);
 extern unsigned long e820_end_of_low_ram_pfn(void);
 extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
 extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);

+ 5 - 0
arch/x86/include/asm/efi.h

@@ -33,6 +33,8 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)	\
 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)	\
 	efi_call_virt(f, a1, a2, a3, a4, a5, a6)
 	efi_call_virt(f, a1, a2, a3, a4, a5, a6)
 
 
+#define efi_ioremap(addr, size, type)		ioremap_cache(addr, size)
+
 #else /* !CONFIG_X86_32 */
 #else /* !CONFIG_X86_32 */
 
 
 extern u64 efi_call0(void *fp);
 extern u64 efi_call0(void *fp);
@@ -82,6 +84,9 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
 	efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
 	efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
 		  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 		  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 
 
+extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
+				 u32 type);
+
 #endif /* CONFIG_X86_32 */
 #endif /* CONFIG_X86_32 */
 
 
 extern int add_efi_memmap;
 extern int add_efi_memmap;

+ 1 - 2
arch/x86/kernel/e820.c

@@ -135,7 +135,6 @@ static void __init e820_print_type(u32 type)
 		printk(KERN_CONT "(usable)");
 		printk(KERN_CONT "(usable)");
 		break;
 		break;
 	case E820_RESERVED:
 	case E820_RESERVED:
-	case E820_RESERVED_EFI:
 		printk(KERN_CONT "(reserved)");
 		printk(KERN_CONT "(reserved)");
 		break;
 		break;
 	case E820_ACPI:
 	case E820_ACPI:
@@ -784,7 +783,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
 /*
 /*
  * Find the highest page frame number we have available
  * Find the highest page frame number we have available
  */
  */
-unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
+static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
 {
 {
 	int i;
 	int i;
 	unsigned long last_pfn = 0;
 	unsigned long last_pfn = 0;

+ 1 - 20
arch/x86/kernel/setup.c

@@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow);
 
 
 void __init setup_arch(char **cmdline_p)
 void __init setup_arch(char **cmdline_p)
 {
 {
-	unsigned long end_pfn;
-
 #ifdef CONFIG_X86_32
 #ifdef CONFIG_X86_32
 	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 	visws_early_detect();
 	visws_early_detect();
@@ -934,24 +932,7 @@ void __init setup_arch(char **cmdline_p)
 	init_gbpages();
 	init_gbpages();
 
 
 	/* max_pfn_mapped is updated here */
 	/* max_pfn_mapped is updated here */
-	end_pfn = max_low_pfn;
-
-#ifdef CONFIG_X86_64
-	/*
-	 * There may be regions after the last E820_RAM region that we
-	 * want to include in the kernel direct mapping, such as
-	 * EFI_RUNTIME_SERVICES_DATA.
-	 */
-	if (efi_enabled) {
-		unsigned long efi_end;
-
-		efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI);
-		if (efi_end > max_low_pfn)
-			end_pfn = efi_end;
-	}
-#endif
-
-	max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT);
+	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
 	max_pfn_mapped = max_low_pfn_mapped;
 	max_pfn_mapped = max_low_pfn_mapped;
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64

+ 11 - 18
arch/x86/platform/efi/efi.c

@@ -323,13 +323,10 @@ static void __init do_add_efi_memmap(void)
 		case EFI_UNUSABLE_MEMORY:
 		case EFI_UNUSABLE_MEMORY:
 			e820_type = E820_UNUSABLE;
 			e820_type = E820_UNUSABLE;
 			break;
 			break;
-		case EFI_RUNTIME_SERVICES_DATA:
-			e820_type = E820_RESERVED_EFI;
-			break;
 		default:
 		default:
 			/*
 			/*
 			 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
 			 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
-			 * EFI_MEMORY_MAPPED_IO
+			 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
 			 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
 			 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
 			 */
 			 */
 			e820_type = E820_RESERVED;
 			e820_type = E820_RESERVED;
@@ -674,21 +671,10 @@ void __init efi_enter_virtual_mode(void)
 		end_pfn = PFN_UP(end);
 		end_pfn = PFN_UP(end);
 		if (end_pfn <= max_low_pfn_mapped
 		if (end_pfn <= max_low_pfn_mapped
 		    || (end_pfn > (1UL << (32 - PAGE_SHIFT))
 		    || (end_pfn > (1UL << (32 - PAGE_SHIFT))
-			&& end_pfn <= max_pfn_mapped)) {
+			&& end_pfn <= max_pfn_mapped))
 			va = __va(md->phys_addr);
 			va = __va(md->phys_addr);
-
-			if (!(md->attribute & EFI_MEMORY_WB)) {
-				addr = (u64) (unsigned long)va;
-				npages = md->num_pages;
-				memrange_efi_to_native(&addr, &npages);
-				set_memory_uc(addr, npages);
-			}
-		} else {
-			if (!(md->attribute & EFI_MEMORY_WB))
-				va = ioremap_nocache(md->phys_addr, size);
-			else
-				va = ioremap_cache(md->phys_addr, size);
-		}
+		else
+			va = efi_ioremap(md->phys_addr, size, md->type);
 
 
 		md->virt_addr = (u64) (unsigned long) va;
 		md->virt_addr = (u64) (unsigned long) va;
 
 
@@ -698,6 +684,13 @@ void __init efi_enter_virtual_mode(void)
 			continue;
 			continue;
 		}
 		}
 
 
+		if (!(md->attribute & EFI_MEMORY_WB)) {
+			addr = md->virt_addr;
+			npages = md->num_pages;
+			memrange_efi_to_native(&addr, &npages);
+			set_memory_uc(addr, npages);
+		}
+
 		systab = (u64) (unsigned long) efi_phys.systab;
 		systab = (u64) (unsigned long) efi_phys.systab;
 		if (md->phys_addr <= systab && systab < end) {
 		if (md->phys_addr <= systab && systab < end) {
 			systab += md->virt_addr - md->phys_addr;
 			systab += md->virt_addr - md->phys_addr;

+ 2 - 46
arch/x86/platform/efi/efi_32.c

@@ -39,43 +39,14 @@
  */
  */
 
 
 static unsigned long efi_rt_eflags;
 static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
 
 
 void efi_call_phys_prelog(void)
 void efi_call_phys_prelog(void)
 {
 {
-	unsigned long cr4;
-	unsigned long temp;
 	struct desc_ptr gdt_descr;
 	struct desc_ptr gdt_descr;
 
 
 	local_irq_save(efi_rt_eflags);
 	local_irq_save(efi_rt_eflags);
 
 
-	/*
-	 * If I don't have PAE, I should just duplicate two entries in page
-	 * directory. If I have PAE, I just need to duplicate one entry in
-	 * page directory.
-	 */
-	cr4 = read_cr4_safe();
-
-	if (cr4 & X86_CR4_PAE) {
-		efi_bak_pg_dir_pointer[0].pgd =
-		    swapper_pg_dir[pgd_index(0)].pgd;
-		swapper_pg_dir[0].pgd =
-		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-	} else {
-		efi_bak_pg_dir_pointer[0].pgd =
-		    swapper_pg_dir[pgd_index(0)].pgd;
-		efi_bak_pg_dir_pointer[1].pgd =
-		    swapper_pg_dir[pgd_index(0x400000)].pgd;
-		swapper_pg_dir[pgd_index(0)].pgd =
-		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-		temp = PAGE_OFFSET + 0x400000;
-		swapper_pg_dir[pgd_index(0x400000)].pgd =
-		    swapper_pg_dir[pgd_index(temp)].pgd;
-	}
-
-	/*
-	 * After the lock is released, the original page table is restored.
-	 */
+	load_cr3(initial_page_table);
 	__flush_tlb_all();
 	__flush_tlb_all();
 
 
 	gdt_descr.address = __pa(get_cpu_gdt_table(0));
 	gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
 
 
 void efi_call_phys_epilog(void)
 void efi_call_phys_epilog(void)
 {
 {
-	unsigned long cr4;
 	struct desc_ptr gdt_descr;
 	struct desc_ptr gdt_descr;
 
 
 	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
 	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
 	gdt_descr.size = GDT_SIZE - 1;
 	gdt_descr.size = GDT_SIZE - 1;
 	load_gdt(&gdt_descr);
 	load_gdt(&gdt_descr);
 
 
-	cr4 = read_cr4_safe();
-
-	if (cr4 & X86_CR4_PAE) {
-		swapper_pg_dir[pgd_index(0)].pgd =
-		    efi_bak_pg_dir_pointer[0].pgd;
-	} else {
-		swapper_pg_dir[pgd_index(0)].pgd =
-		    efi_bak_pg_dir_pointer[0].pgd;
-		swapper_pg_dir[pgd_index(0x400000)].pgd =
-		    efi_bak_pg_dir_pointer[1].pgd;
-	}
-
-	/*
-	 * After the lock is released, the original page table is restored.
-	 */
+	load_cr3(swapper_pg_dir);
 	__flush_tlb_all();
 	__flush_tlb_all();
 
 
 	local_irq_restore(efi_rt_eflags);
 	local_irq_restore(efi_rt_eflags);

+ 17 - 0
arch/x86/platform/efi/efi_64.c

@@ -80,3 +80,20 @@ void __init efi_call_phys_epilog(void)
 	local_irq_restore(efi_flags);
 	local_irq_restore(efi_flags);
 	early_code_mapping_set_exec(0);
 	early_code_mapping_set_exec(0);
 }
 }
+
+void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
+				 u32 type)
+{
+	unsigned long last_map_pfn;
+
+	if (type == EFI_MEMORY_MAPPED_IO)
+		return ioremap(phys_addr, size);
+
+	last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
+	if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
+		unsigned long top = last_map_pfn << PAGE_SHIFT;
+		efi_ioremap(top, size - (top - phys_addr), type);
+	}
+
+	return (void __iomem *)__va(phys_addr);
+}

+ 15 - 3
arch/x86/xen/setup.c

@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
 	domid_t domid = DOMID_SELF;
 	domid_t domid = DOMID_SELF;
 	int ret;
 	int ret;
 
 
-	ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
-	if (ret > 0)
-		max_pages = ret;
+	/*
+	 * For the initial domain we use the maximum reservation as
+	 * the maximum page.
+	 *
+	 * For guest domains the current maximum reservation reflects
+	 * the current maximum rather than the static maximum. In this
+	 * case the e820 map provided to us will cover the static
+	 * maximum region.
+	 */
+	if (xen_initial_domain()) {
+		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+		if (ret > 0)
+			max_pages = ret;
+	}
+
 	return min(max_pages, MAX_DOMAIN_PAGES);
 	return min(max_pages, MAX_DOMAIN_PAGES);
 }
 }
 
 

+ 11 - 12
block/blk-core.c

@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
 		if (drain_all)
 		if (drain_all)
 			blk_throtl_drain(q);
 			blk_throtl_drain(q);
 
 
-		__blk_run_queue(q);
+		/*
+		 * This function might be called on a queue which failed
+		 * driver init after queue creation.  Some drivers
+		 * (e.g. fd) get unhappy in such cases.  Kick queue iff
+		 * dispatch queue has something on it.
+		 */
+		if (!list_empty(&q->queue_head))
+			__blk_run_queue(q);
 
 
 		if (drain_all)
 		if (drain_all)
 			nr_rqs = q->rq.count[0] + q->rq.count[1];
 			nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 	q->backing_dev_info.state = 0;
 	q->backing_dev_info.state = 0;
 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
 	q->backing_dev_info.name = "block";
 	q->backing_dev_info.name = "block";
+	q->node = node_id;
 
 
 	err = bdi_init(&q->backing_dev_info);
 	err = bdi_init(&q->backing_dev_info);
 	if (err) {
 	if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 	if (!uninit_q)
 	if (!uninit_q)
 		return NULL;
 		return NULL;
 
 
-	q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+	q = blk_init_allocated_queue(uninit_q, rfn, lock);
 	if (!q)
 	if (!q)
 		blk_cleanup_queue(uninit_q);
 		blk_cleanup_queue(uninit_q);
 
 
@@ -562,19 +570,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
 struct request_queue *
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 			 spinlock_t *lock)
 			 spinlock_t *lock)
-{
-	return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-			      spinlock_t *lock, int node_id)
 {
 {
 	if (!q)
 	if (!q)
 		return NULL;
 		return NULL;
 
 
-	q->node = node_id;
 	if (blk_init_free_list(q))
 	if (blk_init_free_list(q))
 		return NULL;
 		return NULL;
 
 
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
 
 	return NULL;
 	return NULL;
 }
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 
 int blk_get_queue(struct request_queue *q)
 int blk_get_queue(struct request_queue *q)
 {
 {

+ 14 - 2
block/cfq-iosched.c

@@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
 		}
 		}
 	}
 	}
 
 
-	if (ret)
+	if (ret && ret != -EEXIST)
 		printk(KERN_ERR "cfq: cic link failed!\n");
 		printk(KERN_ERR "cfq: cic link failed!\n");
 
 
 	return ret;
 	return ret;
@@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
 {
 	struct io_context *ioc = NULL;
 	struct io_context *ioc = NULL;
 	struct cfq_io_context *cic;
 	struct cfq_io_context *cic;
+	int ret;
 
 
 	might_sleep_if(gfp_mask & __GFP_WAIT);
 	might_sleep_if(gfp_mask & __GFP_WAIT);
 
 
@@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 	if (!ioc)
 	if (!ioc)
 		return NULL;
 		return NULL;
 
 
+retry:
 	cic = cfq_cic_lookup(cfqd, ioc);
 	cic = cfq_cic_lookup(cfqd, ioc);
 	if (cic)
 	if (cic)
 		goto out;
 		goto out;
@@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 	if (cic == NULL)
 	if (cic == NULL)
 		goto err;
 		goto err;
 
 
-	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+	ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+	if (ret == -EEXIST) {
+		/* someone has linked cic to ioc already */
+		cfq_cic_free(cic);
+		goto retry;
+	} else if (ret)
 		goto err_free;
 		goto err_free;
 
 
 out:
 out:
@@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
 
 
 	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
 	if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
 		kfree(cfqg);
 		kfree(cfqg);
+
+		spin_lock(&cic_index_lock);
+		ida_remove(&cic_index_ida, cfqd->cic_index);
+		spin_unlock(&cic_index_lock);
+
 		kfree(cfqd);
 		kfree(cfqd);
 		return NULL;
 		return NULL;
 	}
 	}

+ 4 - 2
drivers/block/cciss.c

@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
 			c->Request.Timeout = 0;
 			c->Request.Timeout = 0;
 			c->Request.CDB[0] = BMIC_WRITE;
 			c->Request.CDB[0] = BMIC_WRITE;
 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+			c->Request.CDB[7] = (size >> 8) & 0xFF;
+			c->Request.CDB[8] = size & 0xFF;
 			break;
 			break;
 		case TEST_UNIT_READY:
 		case TEST_UNIT_READY:
 			c->Request.CDBLen = 6;
 			c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
 {
 {
 	if (h->msix_vector || h->msi_vector) {
 	if (h->msix_vector || h->msi_vector) {
 		if (!request_irq(h->intr[h->intr_mode], msixhandler,
 		if (!request_irq(h->intr[h->intr_mode], msixhandler,
-				IRQF_DISABLED, h->devname, h))
+				0, h->devname, h))
 			return 0;
 			return 0;
 		dev_err(&h->pdev->dev, "Unable to get msi irq %d"
 		dev_err(&h->pdev->dev, "Unable to get msi irq %d"
 			" for %s\n", h->intr[h->intr_mode],
 			" for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
 	}
 	}
 
 
 	if (!request_irq(h->intr[h->intr_mode], intxhandler,
 	if (!request_irq(h->intr[h->intr_mode], intxhandler,
-			IRQF_DISABLED, h->devname, h))
+			IRQF_SHARED, h->devname, h))
 		return 0;
 		return 0;
 	dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
 	dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
 		h->intr[h->intr_mode], h->devname);
 		h->intr[h->intr_mode], h->devname);

+ 2 - 2
drivers/block/loop.c

@@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 
 
 		/*
 		/*
 		 * We use punch hole to reclaim the free space used by the
 		 * We use punch hole to reclaim the free space used by the
-		 * image a.k.a. discard. However we do support discard if
+		 * image a.k.a. discard. However we do not support discard if
 		 * encryption is enabled, because it may give an attacker
 		 * encryption is enabled, because it may give an attacker
 		 * useful information.
 		 * useful information.
 		 */
 		 */
@@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
 	}
 	}
 
 
 	q->limits.discard_granularity = inode->i_sb->s_blocksize;
 	q->limits.discard_granularity = inode->i_sb->s_blocksize;
-	q->limits.discard_alignment = inode->i_sb->s_blocksize;
+	q->limits.discard_alignment = 0;
 	q->limits.max_discard_sectors = UINT_MAX >> 9;
 	q->limits.max_discard_sectors = UINT_MAX >> 9;
 	q->limits.discard_zeroes_data = 1;
 	q->limits.discard_zeroes_data = 1;
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);

+ 10 - 91
drivers/block/rbd.c

@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list);      /* clients */
 
 
 static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
 static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
 static void rbd_dev_release(struct device *dev);
 static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf,
-				 size_t size);
 static ssize_t rbd_snap_add(struct device *dev,
 static ssize_t rbd_snap_add(struct device *dev,
 			    struct device_attribute *attr,
 			    struct device_attribute *attr,
 			    const char *buf,
 			    const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
 	u32 snap_count = le32_to_cpu(ondisk->snap_count);
 	u32 snap_count = le32_to_cpu(ondisk->snap_count);
 	int ret = -ENOMEM;
 	int ret = -ENOMEM;
 
 
+	if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+		return -ENXIO;
+	}
+
 	init_rwsem(&header->snap_rwsem);
 	init_rwsem(&header->snap_rwsem);
 	header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
 	header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
 	header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
 	header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1355,32 +1355,6 @@ fail:
 	return ret;
 	return ret;
 }
 }
 
 
-/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
-				     u64 snapid,
-				     const char *obj)
-{
-	struct ceph_osd_req_op *ops;
-	int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
-	if (ret < 0)
-		return ret;
-
-	ops[0].snap.snapid = snapid;
-
-	ret = rbd_req_sync_op(dev, NULL,
-			       CEPH_NOSNAP,
-			       0,
-			       CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-			       ops,
-			       1, obj, 0, 0, NULL, NULL, NULL);
-
-	rbd_destroy_ops(ops);
-
-	return ret;
-}
-
 /*
 /*
  * Request sync osd read
  * Request sync osd read
  */
  */
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
 			goto out_dh;
 			goto out_dh;
 
 
 		rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
 		rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
-		if (rc < 0)
+		if (rc < 0) {
+			if (rc == -ENXIO) {
+				pr_warning("unrecognized header format"
+					   " for image %s", rbd_dev->obj);
+			}
 			goto out_dh;
 			goto out_dh;
+		}
 
 
 		if (snap_count != header->total_snaps) {
 		if (snap_count != header->total_snaps) {
 			snap_count = header->total_snaps;
 			snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
 static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
 static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
 
 
 static struct attribute *rbd_attrs[] = {
 static struct attribute *rbd_attrs[] = {
 	&dev_attr_size.attr,
 	&dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
 	&dev_attr_current_snap.attr,
 	&dev_attr_current_snap.attr,
 	&dev_attr_refresh.attr,
 	&dev_attr_refresh.attr,
 	&dev_attr_create_snap.attr,
 	&dev_attr_create_snap.attr,
-	&dev_attr_rollback_snap.attr,
 	NULL
 	NULL
 };
 };
 
 
@@ -2424,64 +2401,6 @@ err_unlock:
 	return ret;
 	return ret;
 }
 }
 
 
-static ssize_t rbd_snap_rollback(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf,
-				 size_t count)
-{
-	struct rbd_device *rbd_dev = dev_to_rbd(dev);
-	int ret;
-	u64 snapid;
-	u64 cur_ofs;
-	char *seg_name = NULL;
-	char *snap_name = kmalloc(count + 1, GFP_KERNEL);
-	ret = -ENOMEM;
-	if (!snap_name)
-		return ret;
-
-	/* parse snaps add command */
-	snprintf(snap_name, count, "%s", buf);
-	seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
-	if (!seg_name)
-		goto done;
-
-	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
-	ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
-	if (ret < 0)
-		goto done_unlock;
-
-	dout("snapid=%lld\n", snapid);
-
-	cur_ofs = 0;
-	while (cur_ofs < rbd_dev->header.image_size) {
-		cur_ofs += rbd_get_segment(&rbd_dev->header,
-					   rbd_dev->obj,
-					   cur_ofs, (u64)-1,
-					   seg_name, NULL);
-		dout("seg_name=%s\n", seg_name);
-
-		ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
-		if (ret < 0)
-			pr_warning("could not roll back obj %s err=%d\n",
-				   seg_name, ret);
-	}
-
-	ret = __rbd_update_snaps(rbd_dev);
-	if (ret < 0)
-		goto done_unlock;
-
-	ret = count;
-
-done_unlock:
-	mutex_unlock(&ctl_mutex);
-done:
-	kfree(seg_name);
-	kfree(snap_name);
-
-	return ret;
-}
-
 static struct bus_attribute rbd_bus_attrs[] = {
 static struct bus_attribute rbd_bus_attrs[] = {
 	__ATTR(add, S_IWUSR, NULL, rbd_add),
 	__ATTR(add, S_IWUSR, NULL, rbd_add),
 	__ATTR(remove, S_IWUSR, NULL, rbd_remove),
 	__ATTR(remove, S_IWUSR, NULL, rbd_remove),

+ 215 - 147
drivers/block/swim3.c

@@ -16,6 +16,8 @@
  * handle GCR disks
  * handle GCR disks
  */
  */
 
 
+#undef DEBUG
+
 #include <linux/stddef.h>
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched.h>
@@ -36,13 +38,11 @@
 #include <asm/machdep.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/pmac_feature.h>
 
 
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
 #define MAX_FLOPPIES	2
 #define MAX_FLOPPIES	2
 
 
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
 enum swim_state {
 enum swim_state {
 	idle,
 	idle,
 	locating,
 	locating,
@@ -177,7 +177,6 @@ struct swim3 {
 
 
 struct floppy_state {
 struct floppy_state {
 	enum swim_state	state;
 	enum swim_state	state;
-	spinlock_t lock;
 	struct swim3 __iomem *swim3;	/* hardware registers */
 	struct swim3 __iomem *swim3;	/* hardware registers */
 	struct dbdma_regs __iomem *dma;	/* DMA controller registers */
 	struct dbdma_regs __iomem *dma;	/* DMA controller registers */
 	int	swim3_intr;	/* interrupt number for SWIM3 */
 	int	swim3_intr;	/* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
 	int	wanted;
 	int	wanted;
 	struct macio_dev *mdev;
 	struct macio_dev *mdev;
 	char	dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
 	char	dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+	int	index;
+	struct request *cur_req;
 };
 };
 
 
+#define swim3_err(fmt, arg...)	dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...)	dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...)	dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...)	dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...)	do { } while(0)
+#endif
+
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
 static int floppy_count = 0;
 static DEFINE_SPINLOCK(swim3_lock);
 static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
 	0, 0, 0, 0, 0, 0
 	0, 0, 0, 0, 0, 0
 };
 };
 
 
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
-			void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
 static void seek_track(struct floppy_state *fs, int n);
 static void seek_track(struct floppy_state *fs, int n);
 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
 static void act(struct floppy_state *fs);
 static void act(struct floppy_state *fs);
 static void scan_timeout(unsigned long data);
 static void scan_timeout(unsigned long data);
 static void seek_timeout(unsigned long data);
 static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
 					unsigned int clearing);
 					unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 static int floppy_revalidate(struct gendisk *disk);
 
 
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
 {
 {
-	if (__blk_end_request(fd_req, err, nr_bytes))
-		return true;
+	struct request *req = fs->cur_req;
+	int rc;
 
 
-	fd_req = NULL;
-	return false;
-}
+	swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
+		  err, nr_bytes, req);
 
 
-static bool swim3_end_request_cur(int err)
-{
-	return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+	if (err)
+		nr_bytes = blk_rq_cur_bytes(req);
+	rc = __blk_end_request(req, err, nr_bytes);
+	if (rc)
+		return true;
+	fs->cur_req = NULL;
+	return false;
 }
 }
 
 
 static void swim3_select(struct floppy_state *fs, int sel)
 static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
 	return (stat & DATA) == 0;
 	return (stat & DATA) == 0;
 }
 }
 
 
-static void do_fd_request(struct request_queue * q)
-{
-	int i;
-
-	for(i=0; i<floppy_count; i++) {
-		struct floppy_state *fs = &floppy_states[i];
-		if (fs->mdev->media_bay &&
-		    check_media_bay(fs->mdev->media_bay) != MB_FD)
-			continue;
-		start_request(fs);
-	}
-}
-
 static void start_request(struct floppy_state *fs)
 static void start_request(struct floppy_state *fs)
 {
 {
 	struct request *req;
 	struct request *req;
 	unsigned long x;
 	unsigned long x;
 
 
+	swim3_dbg("start request, initial state=%d\n", fs->state);
+
 	if (fs->state == idle && fs->wanted) {
 	if (fs->state == idle && fs->wanted) {
 		fs->state = available;
 		fs->state = available;
 		wake_up(&fs->wait);
 		wake_up(&fs->wait);
 		return;
 		return;
 	}
 	}
 	while (fs->state == idle) {
 	while (fs->state == idle) {
-		if (!fd_req) {
-			fd_req = blk_fetch_request(swim3_queue);
-			if (!fd_req)
+		swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+		if (!fs->cur_req) {
+			fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+			swim3_dbg("  fetched request %p\n", fs->cur_req);
+			if (!fs->cur_req)
 				break;
 				break;
 		}
 		}
-		req = fd_req;
-#if 0
-		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
-		       req->rq_disk->disk_name, req->cmd,
-		       (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
-		printk("           errors=%d current_nr_sectors=%u\n",
-		       req->errors, blk_rq_cur_sectors(req));
+		req = fs->cur_req;
+
+		if (fs->mdev->media_bay &&
+		    check_media_bay(fs->mdev->media_bay) != MB_FD) {
+			swim3_dbg("%s", "  media bay absent, dropping req\n");
+			swim3_end_request(fs, -ENODEV, 0);
+			continue;
+		}
+
+#if 0 /* This is really too verbose */
+		swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+			  req->rq_disk->disk_name, req->cmd,
+			  (long)blk_rq_pos(req), blk_rq_sectors(req),
+			  req->buffer);
+		swim3_dbg("           errors=%d current_nr_sectors=%u\n",
+			  req->errors, blk_rq_cur_sectors(req));
 #endif
 #endif
 
 
 		if (blk_rq_pos(req) >= fs->total_secs) {
 		if (blk_rq_pos(req) >= fs->total_secs) {
-			swim3_end_request_cur(-EIO);
+			swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
+				  (long)blk_rq_pos(req), (long)fs->total_secs);
+			swim3_end_request(fs, -EIO, 0);
 			continue;
 			continue;
 		}
 		}
 		if (fs->ejected) {
 		if (fs->ejected) {
-			swim3_end_request_cur(-EIO);
+			swim3_dbg("%s", "  disk ejected\n");
+			swim3_end_request(fs, -EIO, 0);
 			continue;
 			continue;
 		}
 		}
 
 
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
 			if (fs->write_prot < 0)
 			if (fs->write_prot < 0)
 				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
 				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
 			if (fs->write_prot) {
 			if (fs->write_prot) {
-				swim3_end_request_cur(-EIO);
+				swim3_dbg("%s", "  try to write, disk write protected\n");
+				swim3_end_request(fs, -EIO, 0);
 				continue;
 				continue;
 			}
 			}
 		}
 		}
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
 		x = ((long)blk_rq_pos(req)) % fs->secpercyl;
 		x = ((long)blk_rq_pos(req)) % fs->secpercyl;
 		fs->head = x / fs->secpertrack;
 		fs->head = x / fs->secpertrack;
 		fs->req_sector = x % fs->secpertrack + 1;
 		fs->req_sector = x % fs->secpertrack + 1;
-		fd_req = req;
 		fs->state = do_transfer;
 		fs->state = do_transfer;
 		fs->retries = 0;
 		fs->retries = 0;
 
 
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
 	}
 	}
 }
 }
 
 
+static void do_fd_request(struct request_queue * q)
+{
+	start_request(q->queuedata);
+}
+
 static void set_timeout(struct floppy_state *fs, int nticks,
 static void set_timeout(struct floppy_state *fs, int nticks,
 			void (*proc)(unsigned long))
 			void (*proc)(unsigned long))
 {
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&fs->lock, flags);
 	if (fs->timeout_pending)
 	if (fs->timeout_pending)
 		del_timer(&fs->timeout);
 		del_timer(&fs->timeout);
 	fs->timeout.expires = jiffies + nticks;
 	fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
 	fs->timeout.data = (unsigned long) fs;
 	fs->timeout.data = (unsigned long) fs;
 	add_timer(&fs->timeout);
 	add_timer(&fs->timeout);
 	fs->timeout_pending = 1;
 	fs->timeout_pending = 1;
-	spin_unlock_irqrestore(&fs->lock, flags);
 }
 }
 
 
 static inline void scan_track(struct floppy_state *fs)
 static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
 	struct swim3 __iomem *sw = fs->swim3;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct dbdma_cmd *cp = fs->dma_cmd;
 	struct dbdma_cmd *cp = fs->dma_cmd;
 	struct dbdma_regs __iomem *dr = fs->dma;
 	struct dbdma_regs __iomem *dr = fs->dma;
+	struct request *req = fs->cur_req;
 
 
-	if (blk_rq_cur_sectors(fd_req) <= 0) {
-		printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+	if (blk_rq_cur_sectors(req) <= 0) {
+		swim3_warn("%s", "Transfer 0 sectors ?\n");
 		return;
 		return;
 	}
 	}
-	if (rq_data_dir(fd_req) == WRITE)
+	if (rq_data_dir(req) == WRITE)
 		n = 1;
 		n = 1;
 	else {
 	else {
 		n = fs->secpertrack - fs->req_sector + 1;
 		n = fs->secpertrack - fs->req_sector + 1;
-		if (n > blk_rq_cur_sectors(fd_req))
-			n = blk_rq_cur_sectors(fd_req);
+		if (n > blk_rq_cur_sectors(req))
+			n = blk_rq_cur_sectors(req);
 	}
 	}
+
+	swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
+		  fs->req_sector, fs->secpertrack, fs->head, n);
+
 	fs->scount = n;
 	fs->scount = n;
 	swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
 	swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
 	out_8(&sw->sector, fs->req_sector);
 	out_8(&sw->sector, fs->req_sector);
 	out_8(&sw->nsect, n);
 	out_8(&sw->nsect, n);
 	out_8(&sw->gap3, 0);
 	out_8(&sw->gap3, 0);
 	out_le32(&dr->cmdptr, virt_to_bus(cp));
 	out_le32(&dr->cmdptr, virt_to_bus(cp));
-	if (rq_data_dir(fd_req) == WRITE) {
+	if (rq_data_dir(req) == WRITE) {
 		/* Set up 3 dma commands: write preamble, data, postamble */
 		/* Set up 3 dma commands: write preamble, data, postamble */
 		init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
 		init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
 		++cp;
 		++cp;
-		init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+		init_dma(cp, OUTPUT_MORE, req->buffer, 512);
 		++cp;
 		++cp;
 		init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
 		init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
 	} else {
 	} else {
-		init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+		init_dma(cp, INPUT_LAST, req->buffer, n * 512);
 	}
 	}
 	++cp;
 	++cp;
 	out_le16(&cp->command, DBDMA_STOP);
 	out_le16(&cp->command, DBDMA_STOP);
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
 	in_8(&sw->error);
 	in_8(&sw->error);
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
-	if (rq_data_dir(fd_req) == WRITE)
+	if (rq_data_dir(req) == WRITE)
 		out_8(&sw->control_bis, WRITE_SECTORS);
 		out_8(&sw->control_bis, WRITE_SECTORS);
 	in_8(&sw->intr);
 	in_8(&sw->intr);
 	out_le32(&dr->control, (RUN << 16) | RUN);
 	out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
 static void act(struct floppy_state *fs)
 static void act(struct floppy_state *fs)
 {
 {
 	for (;;) {
 	for (;;) {
+		swim3_dbg("  act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+			  fs->state, fs->req_cyl, fs->cur_cyl);
+
 		switch (fs->state) {
 		switch (fs->state) {
 		case idle:
 		case idle:
 			return;		/* XXX shouldn't get here */
 			return;		/* XXX shouldn't get here */
 
 
 		case locating:
 		case locating:
 			if (swim3_readbit(fs, TRACK_ZERO)) {
 			if (swim3_readbit(fs, TRACK_ZERO)) {
+				swim3_dbg("%s", "    locate track 0\n");
 				fs->cur_cyl = 0;
 				fs->cur_cyl = 0;
 				if (fs->req_cyl == 0)
 				if (fs->req_cyl == 0)
 					fs->state = do_transfer;
 					fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
 				break;
 				break;
 			}
 			}
 			if (fs->req_cyl == fs->cur_cyl) {
 			if (fs->req_cyl == fs->cur_cyl) {
-				printk("whoops, seeking 0\n");
+				swim3_warn("%s", "Whoops, seeking 0\n");
 				fs->state = do_transfer;
 				fs->state = do_transfer;
 				break;
 				break;
 			}
 			}
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
 		case do_transfer:
 		case do_transfer:
 			if (fs->cur_cyl != fs->req_cyl) {
 			if (fs->cur_cyl != fs->req_cyl) {
 				if (fs->retries > 5) {
 				if (fs->retries > 5) {
-					swim3_end_request_cur(-EIO);
+					swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+						  fs->req_cyl, fs->cur_cyl);
+					swim3_end_request(fs, -EIO, 0);
 					fs->state = idle;
 					fs->state = idle;
 					return;
 					return;
 				}
 				}
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
 			return;
 			return;
 
 
 		default:
 		default:
-			printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+			swim3_err("Unknown state %d\n", fs->state);
 			return;
 			return;
 		}
 		}
 	}
 	}
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
 {
 {
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct swim3 __iomem *sw = fs->swim3;
+	unsigned long flags;
+
+	swim3_dbg("* scan timeout, state=%d\n", fs->state);
 
 
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	fs->timeout_pending = 0;
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
 	out_8(&sw->select, RELAX);
 	out_8(&sw->select, RELAX);
 	out_8(&sw->intr_enable, 0);
 	out_8(&sw->intr_enable, 0);
 	fs->cur_cyl = -1;
 	fs->cur_cyl = -1;
 	if (fs->retries > 5) {
 	if (fs->retries > 5) {
-		swim3_end_request_cur(-EIO);
+		swim3_end_request(fs, -EIO, 0);
 		fs->state = idle;
 		fs->state = idle;
 		start_request(fs);
 		start_request(fs);
 	} else {
 	} else {
 		fs->state = jogging;
 		fs->state = jogging;
 		act(fs);
 		act(fs);
 	}
 	}
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 }
 
 
 static void seek_timeout(unsigned long data)
 static void seek_timeout(unsigned long data)
 {
 {
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct swim3 __iomem *sw = fs->swim3;
+	unsigned long flags;
+
+	swim3_dbg("* seek timeout, state=%d\n", fs->state);
 
 
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	fs->timeout_pending = 0;
 	out_8(&sw->control_bic, DO_SEEK);
 	out_8(&sw->control_bic, DO_SEEK);
 	out_8(&sw->select, RELAX);
 	out_8(&sw->select, RELAX);
 	out_8(&sw->intr_enable, 0);
 	out_8(&sw->intr_enable, 0);
-	printk(KERN_ERR "swim3: seek timeout\n");
-	swim3_end_request_cur(-EIO);
+	swim3_err("%s", "Seek timeout\n");
+	swim3_end_request(fs, -EIO, 0);
 	fs->state = idle;
 	fs->state = idle;
 	start_request(fs);
 	start_request(fs);
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 }
 
 
 static void settle_timeout(unsigned long data)
 static void settle_timeout(unsigned long data)
 {
 {
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct swim3 __iomem *sw = fs->swim3;
+	unsigned long flags;
+
+	swim3_dbg("* settle timeout, state=%d\n", fs->state);
 
 
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	fs->timeout_pending = 0;
 	if (swim3_readbit(fs, SEEK_COMPLETE)) {
 	if (swim3_readbit(fs, SEEK_COMPLETE)) {
 		out_8(&sw->select, RELAX);
 		out_8(&sw->select, RELAX);
 		fs->state = locating;
 		fs->state = locating;
 		act(fs);
 		act(fs);
-		return;
+		goto unlock;
 	}
 	}
 	out_8(&sw->select, RELAX);
 	out_8(&sw->select, RELAX);
 	if (fs->settle_time < 2*HZ) {
 	if (fs->settle_time < 2*HZ) {
 		++fs->settle_time;
 		++fs->settle_time;
 		set_timeout(fs, 1, settle_timeout);
 		set_timeout(fs, 1, settle_timeout);
-		return;
+		goto unlock;
 	}
 	}
-	printk(KERN_ERR "swim3: seek settle timeout\n");
-	swim3_end_request_cur(-EIO);
+	swim3_err("%s", "Seek settle timeout\n");
+	swim3_end_request(fs, -EIO, 0);
 	fs->state = idle;
 	fs->state = idle;
 	start_request(fs);
 	start_request(fs);
+ unlock:
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 }
 
 
 static void xfer_timeout(unsigned long data)
 static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct floppy_state *fs = (struct floppy_state *) data;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct dbdma_regs __iomem *dr = fs->dma;
 	struct dbdma_regs __iomem *dr = fs->dma;
+	unsigned long flags;
 	int n;
 	int n;
 
 
+	swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->timeout_pending = 0;
 	fs->timeout_pending = 0;
 	out_le32(&dr->control, RUN << 16);
 	out_le32(&dr->control, RUN << 16);
 	/* We must wait a bit for dbdma to stop */
 	/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
 	out_8(&sw->intr_enable, 0);
 	out_8(&sw->intr_enable, 0);
 	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
 	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
 	out_8(&sw->select, RELAX);
 	out_8(&sw->select, RELAX);
-	printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-	       (rq_data_dir(fd_req)==WRITE? "writ": "read"),
-	       (long)blk_rq_pos(fd_req));
-	swim3_end_request_cur(-EIO);
+	swim3_err("Timeout %sing sector %ld\n",
+	       (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+	       (long)blk_rq_pos(fs->cur_req));
+	swim3_end_request(fs, -EIO, 0);
 	fs->state = idle;
 	fs->state = idle;
 	start_request(fs);
 	start_request(fs);
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 }
 
 
 static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 	int stat, resid;
 	int stat, resid;
 	struct dbdma_regs __iomem *dr;
 	struct dbdma_regs __iomem *dr;
 	struct dbdma_cmd *cp;
 	struct dbdma_cmd *cp;
+	unsigned long flags;
+	struct request *req = fs->cur_req;
+
+	swim3_dbg("* interrupt, state=%d\n", fs->state);
 
 
+	spin_lock_irqsave(&swim3_lock, flags);
 	intr = in_8(&sw->intr);
 	intr = in_8(&sw->intr);
 	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
 	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
 	if ((intr & ERROR_INTR) && fs->state != do_transfer)
 	if ((intr & ERROR_INTR) && fs->state != do_transfer)
-		printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
-		       fs->state, rq_data_dir(fd_req), intr, err);
+		swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+			  fs->state, rq_data_dir(req), intr, err);
 	switch (fs->state) {
 	switch (fs->state) {
 	case locating:
 	case locating:
 		if (intr & SEEN_SECTOR) {
 		if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 			del_timer(&fs->timeout);
 			del_timer(&fs->timeout);
 			fs->timeout_pending = 0;
 			fs->timeout_pending = 0;
 			if (sw->ctrack == 0xff) {
 			if (sw->ctrack == 0xff) {
-				printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+				swim3_err("%s", "Seen sector but cyl=ff?\n");
 				fs->cur_cyl = -1;
 				fs->cur_cyl = -1;
 				if (fs->retries > 5) {
 				if (fs->retries > 5) {
-					swim3_end_request_cur(-EIO);
+					swim3_end_request(fs, -EIO, 0);
 					fs->state = idle;
 					fs->state = idle;
 					start_request(fs);
 					start_request(fs);
 				} else {
 				} else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 			fs->cur_cyl = sw->ctrack;
 			fs->cur_cyl = sw->ctrack;
 			fs->cur_sector = sw->csect;
 			fs->cur_sector = sw->csect;
 			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
 			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
-				printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
-				       fs->expect_cyl, fs->cur_cyl);
+				swim3_err("Expected cyl %d, got %d\n",
+					  fs->expect_cyl, fs->cur_cyl);
 			fs->state = do_transfer;
 			fs->state = do_transfer;
 			act(fs);
 			act(fs);
 		}
 		}
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 		fs->timeout_pending = 0;
 		fs->timeout_pending = 0;
 		dr = fs->dma;
 		dr = fs->dma;
 		cp = fs->dma_cmd;
 		cp = fs->dma_cmd;
-		if (rq_data_dir(fd_req) == WRITE)
+		if (rq_data_dir(req) == WRITE)
 			++cp;
 			++cp;
 		/*
 		/*
 		 * Check that the main data transfer has finished.
 		 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 		if (intr & ERROR_INTR) {
 		if (intr & ERROR_INTR) {
 			n = fs->scount - 1 - resid / 512;
 			n = fs->scount - 1 - resid / 512;
 			if (n > 0) {
 			if (n > 0) {
-				blk_update_request(fd_req, 0, n << 9);
+				blk_update_request(req, 0, n << 9);
 				fs->req_sector += n;
 				fs->req_sector += n;
 			}
 			}
 			if (fs->retries < 5) {
 			if (fs->retries < 5) {
 				++fs->retries;
 				++fs->retries;
 				act(fs);
 				act(fs);
 			} else {
 			} else {
-				printk("swim3: error %sing block %ld (err=%x)\n",
-				       rq_data_dir(fd_req) == WRITE? "writ": "read",
-				       (long)blk_rq_pos(fd_req), err);
-				swim3_end_request_cur(-EIO);
+				swim3_err("Error %sing block %ld (err=%x)\n",
+				       rq_data_dir(req) == WRITE? "writ": "read",
+				       (long)blk_rq_pos(req), err);
+				swim3_end_request(fs, -EIO, 0);
 				fs->state = idle;
 				fs->state = idle;
 			}
 			}
 		} else {
 		} else {
 			if ((stat & ACTIVE) == 0 || resid != 0) {
 			if ((stat & ACTIVE) == 0 || resid != 0) {
 				/* musta been an error */
 				/* musta been an error */
-				printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
-				printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
-				       fs->state, rq_data_dir(fd_req), intr, err);
-				swim3_end_request_cur(-EIO);
+				swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+				swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
+					  fs->state, rq_data_dir(req), intr, err);
+				swim3_end_request(fs, -EIO, 0);
 				fs->state = idle;
 				fs->state = idle;
 				start_request(fs);
 				start_request(fs);
 				break;
 				break;
 			}
 			}
-			if (swim3_end_request(0, fs->scount << 9)) {
+			fs->retries = 0;
+			if (swim3_end_request(fs, 0, fs->scount << 9)) {
 				fs->req_sector += fs->scount;
 				fs->req_sector += fs->scount;
 				if (fs->req_sector > fs->secpertrack) {
 				if (fs->req_sector > fs->secpertrack) {
 					fs->req_sector -= fs->secpertrack;
 					fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 			start_request(fs);
 			start_request(fs);
 		break;
 		break;
 	default:
 	default:
-		printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+		swim3_err("Don't know what to do in state %d\n", fs->state);
 	}
 	}
+	spin_unlock_irqrestore(&swim3_lock, flags);
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
 }
 }
 */
 */
 
 
+/* Called under the mutex to grab exclusive access to a drive */
 static int grab_drive(struct floppy_state *fs, enum swim_state state,
 static int grab_drive(struct floppy_state *fs, enum swim_state state,
 		      int interruptible)
 		      int interruptible)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(&fs->lock, flags);
-	if (fs->state != idle) {
+	swim3_dbg("%s", "-> grab drive\n");
+
+	spin_lock_irqsave(&swim3_lock, flags);
+	if (fs->state != idle && fs->state != available) {
 		++fs->wanted;
 		++fs->wanted;
 		while (fs->state != available) {
 		while (fs->state != available) {
+			spin_unlock_irqrestore(&swim3_lock, flags);
 			if (interruptible && signal_pending(current)) {
 			if (interruptible && signal_pending(current)) {
 				--fs->wanted;
 				--fs->wanted;
-				spin_unlock_irqrestore(&fs->lock, flags);
 				return -EINTR;
 				return -EINTR;
 			}
 			}
 			interruptible_sleep_on(&fs->wait);
 			interruptible_sleep_on(&fs->wait);
+			spin_lock_irqsave(&swim3_lock, flags);
 		}
 		}
 		--fs->wanted;
 		--fs->wanted;
 	}
 	}
 	fs->state = state;
 	fs->state = state;
-	spin_unlock_irqrestore(&fs->lock, flags);
+	spin_unlock_irqrestore(&swim3_lock, flags);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(&fs->lock, flags);
+	swim3_dbg("%s", "-> release drive\n");
+
+	spin_lock_irqsave(&swim3_lock, flags);
 	fs->state = idle;
 	fs->state = idle;
 	start_request(fs);
 	start_request(fs);
-	spin_unlock_irqrestore(&fs->lock, flags);
+	spin_unlock_irqrestore(&swim3_lock, flags);
 }
 }
 
 
 static int fd_eject(struct floppy_state *fs)
 static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct floppy_state *fs = disk->private_data;
 	struct floppy_state *fs = disk->private_data;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct swim3 __iomem *sw = fs->swim3;
+
 	mutex_lock(&swim3_mutex);
 	mutex_lock(&swim3_mutex);
 	if (fs->ref_count > 0 && --fs->ref_count == 0) {
 	if (fs->ref_count > 0 && --fs->ref_count == 0) {
 		swim3_action(fs, MOTOR_OFF);
 		swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
 	.revalidate_disk= floppy_revalidate,
 	.revalidate_disk= floppy_revalidate,
 };
 };
 
 
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+	struct floppy_state *fs = macio_get_drvdata(mdev);
+	struct swim3 __iomem *sw = fs->swim3;
+
+	if (!fs)
+		return;
+	if (mb_state != MB_FD)
+		return;
+
+	/* Clear state */
+	out_8(&sw->intr_enable, 0);
+	in_8(&sw->intr);
+	in_8(&sw->error);
+}
+
 static int swim3_add_device(struct macio_dev *mdev, int index)
 static int swim3_add_device(struct macio_dev *mdev, int index)
 {
 {
 	struct device_node *swim = mdev->ofdev.dev.of_node;
 	struct device_node *swim = mdev->ofdev.dev.of_node;
 	struct floppy_state *fs = &floppy_states[index];
 	struct floppy_state *fs = &floppy_states[index];
 	int rc = -EBUSY;
 	int rc = -EBUSY;
 
 
+	/* Do this first for message macros */
+	memset(fs, 0, sizeof(*fs));
+	fs->mdev = mdev;
+	fs->index = index;
+
 	/* Check & Request resources */
 	/* Check & Request resources */
 	if (macio_resource_count(mdev) < 2) {
 	if (macio_resource_count(mdev) < 2) {
-		printk(KERN_WARNING "ifd%d: no address for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "No address in device-tree\n");
 		return -ENXIO;
 		return -ENXIO;
 	}
 	}
-	if (macio_irq_count(mdev) < 2) {
-		printk(KERN_WARNING "fd%d: no intrs for device %s\n",
-			index, swim->full_name);
+	if (macio_irq_count(mdev) < 1) {
+		swim3_err("%s", "No interrupt in device-tree\n");
+		return -ENXIO;
 	}
 	}
 	if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
 	if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
-		printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Can't request mmio resource\n");
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 	if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
 	if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
-		printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Can't request dma resource\n");
 		macio_release_resource(mdev, 0);
 		macio_release_resource(mdev, 0);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
 	if (mdev->media_bay == NULL)
 	if (mdev->media_bay == NULL)
 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
 	
 	
-	memset(fs, 0, sizeof(*fs));
-	spin_lock_init(&fs->lock);
 	fs->state = idle;
 	fs->state = idle;
 	fs->swim3 = (struct swim3 __iomem *)
 	fs->swim3 = (struct swim3 __iomem *)
 		ioremap(macio_resource_start(mdev, 0), 0x200);
 		ioremap(macio_resource_start(mdev, 0), 0x200);
 	if (fs->swim3 == NULL) {
 	if (fs->swim3 == NULL) {
-		printk("fd%d: couldn't map registers for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Couldn't map mmio registers\n");
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto out_release;
 		goto out_release;
 	}
 	}
 	fs->dma = (struct dbdma_regs __iomem *)
 	fs->dma = (struct dbdma_regs __iomem *)
 		ioremap(macio_resource_start(mdev, 1), 0x200);
 		ioremap(macio_resource_start(mdev, 1), 0x200);
 	if (fs->dma == NULL) {
 	if (fs->dma == NULL) {
-		printk("fd%d: couldn't map DMA for %s\n",
-		       index, swim->full_name);
+		swim3_err("%s", "Couldn't map dma registers\n");
 		iounmap(fs->swim3);
 		iounmap(fs->swim3);
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto out_release;
 		goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
 	fs->secpercyl = 36;
 	fs->secpercyl = 36;
 	fs->secpertrack = 18;
 	fs->secpertrack = 18;
 	fs->total_secs = 2880;
 	fs->total_secs = 2880;
-	fs->mdev = mdev;
 	init_waitqueue_head(&fs->wait);
 	init_waitqueue_head(&fs->wait);
 
 
 	fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
 	fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
 	memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
 	memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
 	st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
 	st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
 
 
+	if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+		swim3_mb_event(mdev, MB_FD);
+
 	if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
 	if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
-		printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
-		       index, fs->swim3_intr, swim->full_name);
+		swim3_err("%s", "Couldn't request interrupt\n");
 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
 		goto out_unmap;
 		goto out_unmap;
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
-/*
-	if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
-		printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
-		       fs->dma_intr);
-		return -EBUSY;
-	}
-*/
 
 
 	init_timer(&fs->timeout);
 	init_timer(&fs->timeout);
 
 
-	printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+	swim3_info("SWIM3 floppy controller %s\n",
 		mdev->media_bay ? "in media bay" : "");
 		mdev->media_bay ? "in media bay" : "");
 
 
 	return 0;
 	return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
 
 
 static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
 static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
 {
-	int i, rc;
 	struct gendisk *disk;
 	struct gendisk *disk;
+	int index, rc;
+
+	index = floppy_count++;
+	if (index >= MAX_FLOPPIES)
+		return -ENXIO;
 
 
 	/* Add the drive */
 	/* Add the drive */
-	rc = swim3_add_device(mdev, floppy_count);
+	rc = swim3_add_device(mdev, index);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
+	/* Now register that disk. Same comment about failure handling */
+	disk = disks[index] = alloc_disk(1);
+	if (disk == NULL)
+		return -ENOMEM;
+	disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+	if (disk->queue == NULL) {
+		put_disk(disk);
+		return -ENOMEM;
+	}
+	disk->queue->queuedata = &floppy_states[index];
 
 
-	/* Now create the queue if not there yet */
-	if (swim3_queue == NULL) {
+	if (index == 0) {
 		/* If we failed, there isn't much we can do as the driver is still
 		/* If we failed, there isn't much we can do as the driver is still
 		 * too dumb to remove the device, just bail out
 		 * too dumb to remove the device, just bail out
 		 */
 		 */
 		if (register_blkdev(FLOPPY_MAJOR, "fd"))
 		if (register_blkdev(FLOPPY_MAJOR, "fd"))
 			return 0;
 			return 0;
-		swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
-		if (swim3_queue == NULL) {
-			unregister_blkdev(FLOPPY_MAJOR, "fd");
-			return 0;
-		}
 	}
 	}
 
 
-	/* Now register that disk. Same comment about failure handling */
-	i = floppy_count++;
-	disk = disks[i] = alloc_disk(1);
-	if (disk == NULL)
-		return 0;
-
 	disk->major = FLOPPY_MAJOR;
 	disk->major = FLOPPY_MAJOR;
-	disk->first_minor = i;
+	disk->first_minor = index;
 	disk->fops = &floppy_fops;
 	disk->fops = &floppy_fops;
-	disk->private_data = &floppy_states[i];
-	disk->queue = swim3_queue;
+	disk->private_data = &floppy_states[index];
 	disk->flags |= GENHD_FL_REMOVABLE;
 	disk->flags |= GENHD_FL_REMOVABLE;
-	sprintf(disk->disk_name, "fd%d", i);
+	sprintf(disk->disk_name, "fd%d", index);
 	set_capacity(disk, 2880);
 	set_capacity(disk, 2880);
 	add_disk(disk);
 	add_disk(disk);
 
 
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
 		.of_match_table	= swim3_match,
 		.of_match_table	= swim3_match,
 	},
 	},
 	.probe		= swim3_attach,
 	.probe		= swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+	.mediabay_event	= swim3_mb_event,
+#endif
 #if 0
 #if 0
 	.suspend	= swim3_suspend,
 	.suspend	= swim3_suspend,
 	.resume		= swim3_resume,
 	.resume		= swim3_resume,

+ 3 - 3
drivers/bluetooth/Kconfig

@@ -188,7 +188,7 @@ config BT_MRVL
 	  The core driver to support Marvell Bluetooth devices.
 	  The core driver to support Marvell Bluetooth devices.
 
 
 	  This driver is required if you want to support
 	  This driver is required if you want to support
-	  Marvell Bluetooth devices, such as 8688/8787.
+	  Marvell Bluetooth devices, such as 8688/8787/8797.
 
 
 	  Say Y here to compile Marvell Bluetooth driver
 	  Say Y here to compile Marvell Bluetooth driver
 	  into the kernel or say M to compile it as module.
 	  into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
 	  The driver for Marvell Bluetooth chipsets with SDIO interface.
 	  The driver for Marvell Bluetooth chipsets with SDIO interface.
 
 
 	  This driver is required if you want to use Marvell Bluetooth
 	  This driver is required if you want to use Marvell Bluetooth
-	  devices with SDIO interface. Currently SD8688/SD8787 chipsets are
-	  supported.
+	  devices with SDIO interface. Currently SD8688/SD8787/SD8797
+	  chipsets are supported.
 
 
 	  Say Y here to compile support for Marvell BT-over-SDIO driver
 	  Say Y here to compile support for Marvell BT-over-SDIO driver
 	  into the kernel or say M to compile it as module.
 	  into the kernel or say M to compile it as module.

+ 13 - 2
drivers/bluetooth/btmrvl_sdio.c

@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
 	.io_port_1 = 0x01,
 	.io_port_1 = 0x01,
 	.io_port_2 = 0x02,
 	.io_port_2 = 0x02,
 };
 };
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
 	.cfg = 0x00,
 	.cfg = 0x00,
 	.host_int_mask = 0x02,
 	.host_int_mask = 0x02,
 	.host_intstatus = 0x03,
 	.host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
 	.helper		= NULL,
 	.helper		= NULL,
 	.firmware	= "mrvl/sd8787_uapsta.bin",
 	.firmware	= "mrvl/sd8787_uapsta.bin",
-	.reg		= &btmrvl_reg_8787,
+	.reg		= &btmrvl_reg_87xx,
+	.sd_blksz_fw_dl	= 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+	.helper		= NULL,
+	.firmware	= "mrvl/sd8797_uapsta.bin",
+	.reg		= &btmrvl_reg_87xx,
 	.sd_blksz_fw_dl	= 256,
 	.sd_blksz_fw_dl	= 256,
 };
 };
 
 
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
 	/* Marvell SD8787 Bluetooth device */
 	/* Marvell SD8787 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
 			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
 			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+	/* Marvell SD8797 Bluetooth device */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+			.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
 
 
 	{ }	/* Terminating entry */
 	{ }	/* Terminating entry */
 };
 };
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE("sd8688_helper.bin");
 MODULE_FIRMWARE("sd8688_helper.bin");
 MODULE_FIRMWARE("sd8688.bin");
 MODULE_FIRMWARE("sd8688.bin");
 MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
 MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");

+ 1 - 2
drivers/bluetooth/btusb.c

@@ -777,9 +777,8 @@ skip_waking:
 		usb_mark_last_busy(data->udev);
 		usb_mark_last_busy(data->udev);
 	}
 	}
 
 
-	usb_free_urb(urb);
-
 done:
 done:
+	usb_free_urb(urb);
 	return err;
 	return err;
 }
 }
 
 

+ 40 - 2
drivers/firmware/iscsi_ibft.c

@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
 	ibft_cleanup();
 	ibft_cleanup();
 }
 }
 
 
+#ifdef CONFIG_ACPI
+static const struct {
+	char *sign;
+} ibft_signs[] = {
+	/*
+	 * One spec says "IBFT", the other says "iBFT". We have to check
+	 * for both.
+	 */
+	{ ACPI_SIG_IBFT },
+	{ "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+	int i;
+	struct acpi_table_header *table = NULL;
+
+	if (acpi_disabled)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+		acpi_get_table(ibft_signs[i].sign, 0, &table);
+		ibft_addr = (struct acpi_table_ibft *)table;
+	}
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
 /*
 /*
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  */
  */
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
 {
 {
 	int rc = 0;
 	int rc = 0;
 
 
+	/*
+	   As on UEFI systems the setup_arch()/find_ibft_region()
+	   is called before ACPI tables are parsed and it only does
+	   legacy finding.
+	*/
+	if (!ibft_addr)
+		acpi_find_ibft_region();
+
 	if (ibft_addr) {
 	if (ibft_addr) {
-		printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-		       (u64)isa_virt_to_bus(ibft_addr));
+		pr_info("iBFT detected.\n");
 
 
 		rc = ibft_check_device();
 		rc = ibft_check_device();
 		if (rc)
 		if (rc)

+ 2 - 24
drivers/firmware/iscsi_ibft_find.c

@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
 static const struct {
 static const struct {
 	char *sign;
 	char *sign;
 } ibft_signs[] = {
 } ibft_signs[] = {
-#ifdef CONFIG_ACPI
-	/*
-	 * One spec says "IBFT", the other says "iBFT". We have to check
-	 * for both.
-	 */
-	{ ACPI_SIG_IBFT },
-#endif
 	{ "iBFT" },
 	{ "iBFT" },
 	{ "BIFT" },	/* Broadcom iSCSI Offload */
 	{ "BIFT" },	/* Broadcom iSCSI Offload */
 };
 };
@@ -62,14 +55,6 @@ static const struct {
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_SIZE 0x20000 /* 128kB */
 #define VGA_SIZE 0x20000 /* 128kB */
 
 
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
-	ibft_addr = (struct acpi_table_ibft *)header;
-	return 0;
-}
-#endif /* CONFIG_ACPI */
-
 static int __init find_ibft_in_mem(void)
 static int __init find_ibft_in_mem(void)
 {
 {
 	unsigned long pos;
 	unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
 				 * the table cannot be valid. */
 				 * the table cannot be valid. */
 				if (pos + len <= (IBFT_END-1)) {
 				if (pos + len <= (IBFT_END-1)) {
 					ibft_addr = (struct acpi_table_ibft *)virt;
 					ibft_addr = (struct acpi_table_ibft *)virt;
+					pr_info("iBFT found at 0x%lx.\n", pos);
 					goto done;
 					goto done;
 				}
 				}
 			}
 			}
@@ -108,20 +94,12 @@ done:
  */
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
 {
-#ifdef CONFIG_ACPI
-	int i;
-#endif
 	ibft_addr = NULL;
 	ibft_addr = NULL;
 
 
-#ifdef CONFIG_ACPI
-	for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
-		acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
 	/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
 	/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
 	 * only use ACPI for this */
 	 * only use ACPI for this */
 
 
-	if (!ibft_addr && !efi_enabled)
+	if (!efi_enabled)
 		find_ibft_in_mem();
 		find_ibft_in_mem();
 
 
 	if (ibft_addr) {
 	if (ibft_addr) {

+ 8 - 13
drivers/gpio/gpio-da9052.c

@@ -22,7 +22,6 @@
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/pdata.h>
 #include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
 
 
 #define DA9052_INPUT				1
 #define DA9052_INPUT				1
 #define DA9052_OUTPUT_OPENDRAIN		2
 #define DA9052_OUTPUT_OPENDRAIN		2
@@ -43,6 +42,9 @@
 #define DA9052_GPIO_MASK_UPPER_NIBBLE		0xF0
 #define DA9052_GPIO_MASK_UPPER_NIBBLE		0xF0
 #define DA9052_GPIO_MASK_LOWER_NIBBLE		0x0F
 #define DA9052_GPIO_MASK_LOWER_NIBBLE		0x0F
 #define DA9052_GPIO_NIBBLE_SHIFT		4
 #define DA9052_GPIO_NIBBLE_SHIFT		4
+#define DA9052_IRQ_GPI0			16
+#define DA9052_GPIO_ODD_SHIFT			7
+#define DA9052_GPIO_EVEN_SHIFT			3
 
 
 struct da9052_gpio {
 struct da9052_gpio {
 	struct da9052 *da9052;
 	struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 {
 {
 	struct da9052_gpio *gpio = to_da9052_gpio(gc);
 	struct da9052_gpio *gpio = to_da9052_gpio(gc);
-	unsigned char register_value = 0;
 	int ret;
 	int ret;
 
 
 	if (da9052_gpio_port_odd(offset)) {
 	if (da9052_gpio_port_odd(offset)) {
-		if (value) {
-			register_value = DA9052_GPIO_ODD_PORT_MODE;
 			ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
 			ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
 						DA9052_GPIO_0_1_REG,
 						DA9052_GPIO_0_1_REG,
 						DA9052_GPIO_ODD_PORT_MODE,
 						DA9052_GPIO_ODD_PORT_MODE,
-						register_value);
+						value << DA9052_GPIO_ODD_SHIFT);
 			if (ret != 0)
 			if (ret != 0)
 				dev_err(gpio->da9052->dev,
 				dev_err(gpio->da9052->dev,
 					"Failed to updated gpio odd reg,%d",
 					"Failed to updated gpio odd reg,%d",
 					ret);
 					ret);
-		}
 	} else {
 	} else {
-		if (value) {
-			register_value = DA9052_GPIO_EVEN_PORT_MODE;
 			ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
 			ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
 						DA9052_GPIO_0_1_REG,
 						DA9052_GPIO_0_1_REG,
 						DA9052_GPIO_EVEN_PORT_MODE,
 						DA9052_GPIO_EVEN_PORT_MODE,
-						register_value);
+						value << DA9052_GPIO_EVEN_SHIFT);
 			if (ret != 0)
 			if (ret != 0)
 				dev_err(gpio->da9052->dev,
 				dev_err(gpio->da9052->dev,
 					"Failed to updated gpio even reg,%d",
 					"Failed to updated gpio even reg,%d",
 					ret);
 					ret);
-		}
 	}
 	}
 }
 }
 
 
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
 	.direction_input = da9052_gpio_direction_input,
 	.direction_input = da9052_gpio_direction_input,
 	.direction_output = da9052_gpio_direction_output,
 	.direction_output = da9052_gpio_direction_output,
 	.to_irq = da9052_gpio_to_irq,
 	.to_irq = da9052_gpio_to_irq,
-	.can_sleep = 1;
-	.ngpio = 16;
-	.base = -1;
+	.can_sleep = 1,
+	.ngpio = 16,
+	.base = -1,
 };
 };
 
 
 static int __devinit da9052_gpio_probe(struct platform_device *pdev)
 static int __devinit da9052_gpio_probe(struct platform_device *pdev)

+ 31 - 1
drivers/gpio/gpio-ml-ioh.c

@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
 		  &chip->reg->regs[chip->ch].imask);
 		  &chip->reg->regs[chip->ch].imask);
 }
 }
 
 
+static void ioh_irq_disable(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct ioh_gpio *chip = gc->private;
+	unsigned long flags;
+	u32 ien;
+
+	spin_lock_irqsave(&chip->spinlock, flags);
+	ien = ioread32(&chip->reg->regs[chip->ch].ien);
+	ien &= ~(1 << (d->irq - chip->irq_base));
+	iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+	spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct ioh_gpio *chip = gc->private;
+	unsigned long flags;
+	u32 ien;
+
+	spin_lock_irqsave(&chip->spinlock, flags);
+	ien = ioread32(&chip->reg->regs[chip->ch].ien);
+	ien |= 1 << (d->irq - chip->irq_base);
+	iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+	spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
 static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 {
 {
 	struct ioh_gpio *chip = dev_id;
 	struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 	int i, j;
 	int i, j;
 	int ret = IRQ_NONE;
 	int ret = IRQ_NONE;
 
 
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < 8; i++, chip++) {
 		reg_val = ioread32(&chip->reg->regs[i].istatus);
 		reg_val = ioread32(&chip->reg->regs[i].istatus);
 		for (j = 0; j < num_ports[i]; j++) {
 		for (j = 0; j < num_ports[i]; j++) {
 			if (reg_val & BIT(j)) {
 			if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
 	ct->chip.irq_mask = ioh_irq_mask;
 	ct->chip.irq_mask = ioh_irq_mask;
 	ct->chip.irq_unmask = ioh_irq_unmask;
 	ct->chip.irq_unmask = ioh_irq_unmask;
 	ct->chip.irq_set_type = ioh_irq_type;
 	ct->chip.irq_set_type = ioh_irq_type;
+	ct->chip.irq_disable = ioh_irq_disable;
+	ct->chip.irq_enable = ioh_irq_enable;
 
 
 	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
 	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
 			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
 			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);

+ 13 - 5
drivers/gpio/gpio-mpc8xxx.c

@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
 	return 0;
 	return 0;
 }
 }
 
 
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	/* GPIO 28..31 are input only on MPC5121 */
+	if (gpio >= 28)
+		return -EINVAL;
+
+	return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
 {
 	struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
 	struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
 	mm_gc->save_regs = mpc8xxx_gpio_save_regs;
 	mm_gc->save_regs = mpc8xxx_gpio_save_regs;
 	gc->ngpio = MPC8XXX_GPIO_PINS;
 	gc->ngpio = MPC8XXX_GPIO_PINS;
 	gc->direction_input = mpc8xxx_gpio_dir_in;
 	gc->direction_input = mpc8xxx_gpio_dir_in;
-	gc->direction_output = mpc8xxx_gpio_dir_out;
-	if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
-		gc->get = mpc8572_gpio_get;
-	else
-		gc->get = mpc8xxx_gpio_get;
+	gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+		mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+	gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+		mpc8572_gpio_get : mpc8xxx_gpio_get;
 	gc->set = mpc8xxx_gpio_set;
 	gc->set = mpc8xxx_gpio_set;
 	gc->to_irq = mpc8xxx_gpio_to_irq;
 	gc->to_irq = mpc8xxx_gpio_to_irq;
 
 

+ 0 - 4
drivers/gpio/gpio-pl061.c

@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
 	int ret, irq, i;
 	int ret, irq, i;
 	static DECLARE_BITMAP(init_irq, NR_IRQS);
 	static DECLARE_BITMAP(init_irq, NR_IRQS);
 
 
-	pdata = dev->dev.platform_data;
-	if (pdata == NULL)
-		return -ENODEV;
-
 	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 	if (chip == NULL)
 	if (chip == NULL)
 		return -ENOMEM;
 		return -ENOMEM;

+ 1 - 0
drivers/gpu/drm/i915/i915_debugfs.c

@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
 	const struct intel_device_info *info = INTEL_INFO(dev);
 	const struct intel_device_info *info = INTEL_INFO(dev);
 
 
 	seq_printf(m, "gen: %d\n", info->gen);
 	seq_printf(m, "gen: %d\n", info->gen);
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
 	B(is_mobile);
 	B(is_mobile);
 	B(is_i85x);
 	B(is_i85x);

+ 10 - 0
drivers/gpu/drm/i915/i915_dma.c

@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 
 
 	diff1 = now - dev_priv->last_time1;
 	diff1 = now - dev_priv->last_time1;
 
 
+	/* Prevent division-by-zero if we are asking too fast.
+	 * Also, we don't get interesting results if we are polling
+	 * faster than once in 10ms, so just return the saved value
+	 * in such cases.
+	 */
+	if (diff1 <= 10)
+		return dev_priv->chipset_power;
+
 	count1 = I915_READ(DMIEC);
 	count1 = I915_READ(DMIEC);
 	count2 = I915_READ(DDREC);
 	count2 = I915_READ(DDREC);
 	count3 = I915_READ(CSIEC);
 	count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 	dev_priv->last_count1 = total_count;
 	dev_priv->last_count1 = total_count;
 	dev_priv->last_time1 = now;
 	dev_priv->last_time1 = now;
 
 
+	dev_priv->chipset_power = ret;
+
 	return ret;
 	return ret;
 }
 }
 
 

+ 33 - 10
drivers/gpu/drm/i915/i915_drv.c

@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
 MODULE_PARM_DESC(powersave,
 MODULE_PARM_DESC(powersave,
 		"Enable powersavings, fbc, downclocking, etc. (default: true)");
 		"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
 
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 module_param_named(semaphores, i915_semaphores, int, 0600);
 MODULE_PARM_DESC(semaphores,
 MODULE_PARM_DESC(semaphores,
-		"Use semaphores for inter-ring sync (default: false)");
+		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
 
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 MODULE_PARM_DESC(i915_enable_rc6,
 MODULE_PARM_DESC(i915_enable_rc6,
-		"Enable power-saving render C-state 6 (default: true)");
+		"Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
 
 
 int i915_enable_fbc __read_mostly = -1;
 int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
 	}
 	}
 }
 }
 
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
 {
 	int count;
 	int count;
 
 
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 		udelay(10);
 		udelay(10);
 }
 }
 
 
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+	int count;
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+		udelay(10);
+
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+	POSTING_READ(FORCEWAKE_MT);
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+		udelay(10);
+}
+
 /*
 /*
  * Generally this is called implicitly by the register read function. However,
  * Generally this is called implicitly by the register read function. However,
  * if some sequence requires the GT to not power down then this function should
  * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 
 
 	/* Forcewake is atomic in case we get in here without the lock */
 	/* Forcewake is atomic in case we get in here without the lock */
 	if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
 	if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
-		__gen6_gt_force_wake_get(dev_priv);
+		dev_priv->display.force_wake_get(dev_priv);
 }
 }
 
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
 {
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
 	POSTING_READ(FORCEWAKE);
 	POSTING_READ(FORCEWAKE);
 }
 }
 
 
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+	POSTING_READ(FORCEWAKE_MT);
+}
+
 /*
 /*
  * see gen6_gt_force_wake_get()
  * see gen6_gt_force_wake_get()
  */
  */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
 
 	if (atomic_dec_and_test(&dev_priv->forcewake_count))
 	if (atomic_dec_and_test(&dev_priv->forcewake_count))
-		__gen6_gt_force_wake_put(dev_priv);
+		dev_priv->display.force_wake_put(dev_priv);
 }
 }
 
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
 /* We give fast paths for the really cool registers */
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
 	(((dev_priv)->info->gen >= 6) && \
 	(((dev_priv)->info->gen >= 6) && \
-	((reg) < 0x40000) && \
-	((reg) != FORCEWAKE))
+	 ((reg) < 0x40000) &&		 \
+	 ((reg) != FORCEWAKE) &&	 \
+	 ((reg) != ECOBUS))
 
 
 #define __i915_read(x, y) \
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \

+ 14 - 4
drivers/gpu/drm/i915/i915_drv.h

@@ -107,6 +107,7 @@ struct opregion_header;
 struct opregion_acpi;
 struct opregion_acpi;
 struct opregion_swsci;
 struct opregion_swsci;
 struct opregion_asle;
 struct opregion_asle;
+struct drm_i915_private;
 
 
 struct intel_opregion {
 struct intel_opregion {
 	struct opregion_header *header;
 	struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
 			  struct drm_i915_gem_object *obj);
 			  struct drm_i915_gem_object *obj);
 	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			    int x, int y);
 			    int x, int y);
+	void (*force_wake_get)(struct drm_i915_private *dev_priv);
+	void (*force_wake_put)(struct drm_i915_private *dev_priv);
 	/* clock updates for mode set */
 	/* clock updates for mode set */
 	/* cursor updates */
 	/* cursor updates */
 	/* render clock increase/decrease */
 	/* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
 
 
 	u64 last_count1;
 	u64 last_count1;
 	unsigned long last_time1;
 	unsigned long last_time1;
+	unsigned long chipset_power;
 	u64 last_count2;
 	u64 last_count2;
 	struct timespec last_time2;
 	struct timespec last_time2;
 	unsigned long gfx_power;
 	unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
 extern int i915_panel_ignore_lid __read_mostly;
 extern unsigned int i915_powersave __read_mostly;
 extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
 extern unsigned int i915_lvds_downclock __read_mostly;
 extern unsigned int i915_lvds_downclock __read_mostly;
 extern int i915_panel_use_ssc __read_mostly;
 extern int i915_panel_use_ssc __read_mostly;
 extern int i915_vbt_sdvo_panel_type __read_mostly;
 extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 
 
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
 
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
 /* overlay */
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 /* We give fast paths for the really cool registers */
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
 	(((dev_priv)->info->gen >= 6) && \
 	(((dev_priv)->info->gen >= 6) && \
-	((reg) < 0x40000) && \
-	((reg) != FORCEWAKE))
+	 ((reg) < 0x40000) &&		 \
+	 ((reg) != FORCEWAKE) &&	 \
+	 ((reg) != ECOBUS))
 
 
 #define __i915_read(x, y) \
 #define __i915_read(x, y) \
 	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);

+ 1 - 6
drivers/gpu/drm/i915/i915_gem.c

@@ -2026,13 +2026,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
 	 * to handle this, the waiter on a request often wants an associated
 	 * to handle this, the waiter on a request often wants an associated
 	 * buffer to have made it to the inactive list, and we would need
 	 * buffer to have made it to the inactive list, and we would need
 	 * a separate wait queue to handle that.
 	 * a separate wait queue to handle that.
-	 *
-	 * To avoid a recursion with the ilk VT-d workaround (that calls
-	 * gpu_idle when unbinding objects with interruptible==false) don't
-	 * retire requests in that case (because it might call unbind if the
-	 * active list holds the last reference to the object).
 	 */
 	 */
-	if (ret == 0 && dev_priv->mm.interruptible)
+	if (ret == 0)
 		i915_gem_retire_requests_ring(ring);
 		i915_gem_retire_requests_ring(ring);
 
 
 	return ret;
 	return ret;

+ 18 - 1
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include "intel_drv.h"
+#include <linux/dma_remapping.h>
 
 
 struct change_domains {
 struct change_domains {
 	uint32_t invalidate_domains;
 	uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
 	return 0;
 	return 0;
 }
 }
 
 
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	if (i915_semaphores >= 0)
+		return i915_semaphores;
+
+	/* Enable semaphores on SNB when IO remapping is off */
+	if (INTEL_INFO(dev)->gen == 6)
+		return !intel_iommu_enabled;
+
+	return 1;
+}
+
 static int
 static int
 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 			       struct intel_ring_buffer *to)
 			       struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 		return 0;
 		return 0;
 
 
 	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
 	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
-	if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+	if (!intel_enable_semaphores(obj->base.dev))
 		return i915_gem_object_wait_rendering(obj);
 		return i915_gem_object_wait_rendering(obj);
 
 
 	idx = intel_ring_sync_index(from, to);
 	idx = intel_ring_sync_index(from, to);

+ 26 - 4
drivers/gpu/drm/i915/i915_reg.h

@@ -3303,10 +3303,10 @@
 /* or SDVOB */
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
 #define  PORT_ENABLE    (1 << 31)
-#define  TRANSCODER_A   (0)
-#define  TRANSCODER_B   (1 << 30)
-#define  TRANSCODER(pipe)	((pipe) << 30)
-#define  TRANSCODER_MASK   (1 << 30)
+#define  TRANSCODER(pipe)       ((pipe) << 30)
+#define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
+#define  TRANSCODER_MASK        (1 << 30)
+#define  TRANSCODER_MASK_CPT    (3 << 29)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
@@ -3447,8 +3447,30 @@
 #define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
 #define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
 
 
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB		(0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB		(0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB		(0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB		(0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB		(0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB		(0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB		(0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB		(0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB		(0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB		(0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB		(0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB		(0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB	(0x3f<<22)
+
 #define  FORCEWAKE				0xA18C
 #define  FORCEWAKE				0xA18C
 #define  FORCEWAKE_ACK				0x130090
 #define  FORCEWAKE_ACK				0x130090
+#define  FORCEWAKE_MT				0xa188 /* multi-threaded */
+#define  FORCEWAKE_MT_ACK			0x130040
+#define  ECOBUS					0xa180
+#define    FORCEWAKE_MT_ENABLE			(1<<5)
 
 
 #define  GT_FIFO_FREE_ENTRIES			0x120008
 #define  GT_FIFO_FREE_ENTRIES			0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20

+ 79 - 10
drivers/gpu/drm/i915/intel_display.c

@@ -38,8 +38,8 @@
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "i915_trace.h"
 #include "drm_dp_helper.h"
 #include "drm_dp_helper.h"
-
 #include "drm_crtc_helper.h"
 #include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
 
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
 
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 /**
 /**
  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  * @crtc: CRTC structure
  * @crtc: CRTC structure
+ * @mode: requested mode
  *
  *
  * A pipe may be connected to one or more outputs.  Based on the depth of the
  * A pipe may be connected to one or more outputs.  Based on the depth of the
  * attached framebuffer, choose a good color depth to use on the pipe.
  * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  *    Displays may support a restricted set as well, check EDID and clamp as
  *    Displays may support a restricted set as well, check EDID and clamp as
  *      appropriate.
  *      appropriate.
+ *    DP may want to dither down to 6bpc to fit larger modes
  *
  *
  * RETURNS:
  * RETURNS:
  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  * true if they don't match).
  * true if they don't match).
  */
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
-					 unsigned int *pipe_bpp)
+					 unsigned int *pipe_bpp,
+					 struct drm_display_mode *mode)
 {
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
 		}
 		}
 	}
 	}
 
 
+	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+		display_bpc = 6;
+	}
+
 	/*
 	/*
 	 * We could just drive the pipe at the highest bpc all the time and
 	 * We could just drive the pipe at the highest bpc all the time and
 	 * enable dithering as needed, but that costs bandwidth.  So choose
 	 * enable dithering as needed, but that costs bandwidth.  So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
 	}
 	}
 
 
+	/* default to 8bpc */
+	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+	if (is_dp) {
+		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+			pipeconf |= PIPECONF_BPP_6 |
+				    PIPECONF_DITHER_EN |
+				    PIPECONF_DITHER_TYPE_SP;
+		}
+	}
+
 	dpll |= DPLL_VCO_ENABLE;
 	dpll |= DPLL_VCO_ENABLE;
 
 
 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 	/* determine panel color depth */
 	/* determine panel color depth */
 	temp = I915_READ(PIPECONF(pipe));
 	temp = I915_READ(PIPECONF(pipe));
 	temp &= ~PIPE_BPC_MASK;
 	temp &= ~PIPE_BPC_MASK;
-	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
 	switch (pipe_bpp) {
 	switch (pipe_bpp) {
 	case 18:
 	case 18:
 		temp |= PIPE_6BPC;
 		temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	work->old_fb_obj = intel_fb->obj;
 	work->old_fb_obj = intel_fb->obj;
 	INIT_WORK(&work->work, intel_unpin_work_fn);
 	INIT_WORK(&work->work, intel_unpin_work_fn);
 
 
+	ret = drm_vblank_get(dev, intel_crtc->pipe);
+	if (ret)
+		goto free_work;
+
 	/* We borrow the event spin lock for protecting unpin_work */
 	/* We borrow the event spin lock for protecting unpin_work */
 	spin_lock_irqsave(&dev->event_lock, flags);
 	spin_lock_irqsave(&dev->event_lock, flags);
 	if (intel_crtc->unpin_work) {
 	if (intel_crtc->unpin_work) {
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 		kfree(work);
 		kfree(work);
+		drm_vblank_put(dev, intel_crtc->pipe);
 
 
 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
 		return -EBUSY;
 		return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 
 	crtc->fb = fb;
 	crtc->fb = fb;
 
 
-	ret = drm_vblank_get(dev, intel_crtc->pipe);
-	if (ret)
-		goto cleanup_objs;
-
 	work->pending_flip_obj = obj;
 	work->pending_flip_obj = obj;
 
 
 	work->enable_stall_check = true;
 	work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 
 cleanup_pending:
 cleanup_pending:
 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 	drm_gem_object_unreference(&work->old_fb_obj->base);
 	drm_gem_object_unreference(&obj->base);
 	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
 	intel_crtc->unpin_work = NULL;
 	intel_crtc->unpin_work = NULL;
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
 
+	drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
 	kfree(work);
 	kfree(work);
 
 
 	return ret;
 	return ret;
@@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 }
 
 
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+	/*
+	 * Respect the kernel parameter if it is set
+	 */
+	if (i915_enable_rc6 >= 0)
+		return i915_enable_rc6;
+
+	/*
+	 * Disable RC6 on Ironlake
+	 */
+	if (INTEL_INFO(dev)->gen == 5)
+		return 0;
+
+	/*
+	 * Enable rc6 on Sandybridge if DMA remapping is disabled
+	 */
+	if (INTEL_INFO(dev)->gen == 6) {
+		DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
+				 intel_iommu_enabled ? "true" : "false",
+				 !intel_iommu_enabled ? "en" : "dis");
+		return !intel_iommu_enabled;
+	}
+	DRM_DEBUG_DRIVER("RC6 enabled\n");
+	return 1;
+}
+
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
 {
 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
 
-	if (i915_enable_rc6)
+	if (intel_enable_rc6(dev_priv->dev))
 		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
 		rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
 			GEN6_RC_CTL_RC6_ENABLE;
 			GEN6_RC_CTL_RC6_ENABLE;
 
 
@@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
 	/* rc6 disabled by default due to repeated reports of hanging during
 	/* rc6 disabled by default due to repeated reports of hanging during
 	 * boot and resume.
 	 * boot and resume.
 	 */
 	 */
-	if (!i915_enable_rc6)
+	if (!intel_enable_rc6(dev))
 		return;
 		return;
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
 
 
 	/* For FIFO watermark updates */
 	/* For FIFO watermark updates */
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+		/* IVB configs may use multi-threaded forcewake */
+		if (IS_IVYBRIDGE(dev)) {
+			u32	ecobus;
+
+			mutex_lock(&dev->struct_mutex);
+			__gen6_gt_force_wake_mt_get(dev_priv);
+			ecobus = I915_READ(ECOBUS);
+			__gen6_gt_force_wake_mt_put(dev_priv);
+			mutex_unlock(&dev->struct_mutex);
+
+			if (ecobus & FORCEWAKE_MT_ENABLE) {
+				DRM_DEBUG_KMS("Using MT version of forcewake\n");
+				dev_priv->display.force_wake_get =
+					__gen6_gt_force_wake_mt_get;
+				dev_priv->display.force_wake_put =
+					__gen6_gt_force_wake_mt_put;
+			}
+		}
+
 		if (HAS_PCH_IBX(dev))
 		if (HAS_PCH_IBX(dev))
 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
 		else if (HAS_PCH_CPT(dev))
 		else if (HAS_PCH_CPT(dev))

+ 133 - 40
drivers/gpu/drm/i915/intel_dp.c

@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
  */
  */
 
 
 static int
 static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
 {
 {
 	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int bpp = 24;
 	int bpp = 24;
 
 
-	if (intel_crtc)
+	if (check_bpp)
+		bpp = check_bpp;
+	else if (intel_crtc)
 		bpp = intel_crtc->bpp;
 		bpp = intel_crtc->bpp;
 
 
 	return (pixel_clock * bpp + 9) / 10;
 	return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
 	int max_lanes = intel_dp_max_lane_count(intel_dp);
 	int max_lanes = intel_dp_max_lane_count(intel_dp);
+	int max_rate, mode_rate;
 
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
 		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
 		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
 			return MODE_PANEL;
 			return MODE_PANEL;
 	}
 	}
 
 
-	if (intel_dp_link_required(intel_dp, mode->clock)
-	    > intel_dp_max_data_rate(max_link_clock, max_lanes))
-		return MODE_CLOCK_HIGH;
+	mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+	if (mode_rate > max_rate) {
+			mode_rate = intel_dp_link_required(intel_dp,
+							   mode->clock, 18);
+			if (mode_rate > max_rate)
+				return MODE_CLOCK_HIGH;
+			else
+				mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+	}
 
 
 	if (mode->clock < 10000)
 	if (mode->clock < 10000)
 		return MODE_CLOCK_LOW;
 		return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 	 * clock divider.
 	 * clock divider.
 	 */
 	 */
 	if (is_cpu_edp(intel_dp)) {
 	if (is_cpu_edp(intel_dp)) {
-		if (IS_GEN6(dev))
-			aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+		if (IS_GEN6(dev) || IS_GEN7(dev))
+			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
 		else
 		else
 			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
 			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
 	} else if (HAS_PCH_SPLIT(dev))
 	} else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	int lane_count, clock;
 	int lane_count, clock;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+	int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		for (clock = 0; clock <= max_clock; clock++) {
 		for (clock = 0; clock <= max_clock; clock++) {
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
 
-			if (intel_dp_link_required(intel_dp, mode->clock)
+			if (intel_dp_link_required(intel_dp, mode->clock, bpp)
 					<= link_avail) {
 					<= link_avail) {
 				intel_dp->link_bw = bws[clock];
 				intel_dp->link_bw = bws[clock];
 				intel_dp->lane_count = lane_count;
 				intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	}
 	}
 
 
 	/*
 	/*
-	 * There are three kinds of DP registers:
+	 * There are four kinds of DP registers:
 	 *
 	 *
 	 * 	IBX PCH
 	 * 	IBX PCH
-	 * 	CPU
+	 * 	SNB CPU
+	 *	IVB CPU
 	 * 	CPT PCH
 	 * 	CPT PCH
 	 *
 	 *
 	 * IBX PCH and CPU are the same for almost everything,
 	 * IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
 
 	/* Split out the IBX/CPU vs CPT settings */
 	/* Split out the IBX/CPU vs CPT settings */
 
 
-	if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			intel_dp->DP |= DP_SYNC_HS_HIGH;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			intel_dp->DP |= DP_SYNC_VS_HIGH;
+		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+			intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+		intel_dp->DP |= intel_crtc->pipe << 29;
+
+		/* don't miss out required setting for eDP */
+		intel_dp->DP |= DP_PLL_ENABLE;
+		if (adjusted_mode->clock < 200000)
+			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+		else
+			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
 		intel_dp->DP |= intel_dp->color_range;
 		intel_dp->DP |= intel_dp->color_range;
 
 
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char	*link_train_names[] = {
  * These are source-specific values; current Intel hardware supports
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  */
  */
-#define I830_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT	    DP_TRAIN_VOLTAGE_SWING_1200
 
 
 static uint8_t
 static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
 {
-	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-	case DP_TRAIN_VOLTAGE_SWING_400:
-		return DP_TRAIN_PRE_EMPHASIS_6;
-	case DP_TRAIN_VOLTAGE_SWING_600:
-		return DP_TRAIN_PRE_EMPHASIS_6;
-	case DP_TRAIN_VOLTAGE_SWING_800:
-		return DP_TRAIN_PRE_EMPHASIS_3_5;
-	case DP_TRAIN_VOLTAGE_SWING_1200:
-	default:
-		return DP_TRAIN_PRE_EMPHASIS_0;
+	struct drm_device *dev = intel_dp->base.base.dev;
+
+	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+		return DP_TRAIN_VOLTAGE_SWING_800;
+	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		return DP_TRAIN_VOLTAGE_SWING_1200;
+	else
+		return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+	struct drm_device *dev = intel_dp->base.base.dev;
+
+	if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	} else {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
 	}
 	}
 }
 }
 
 
 static void
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
 {
-	struct drm_device *dev = intel_dp->base.base.dev;
 	uint8_t v = 0;
 	uint8_t v = 0;
 	uint8_t p = 0;
 	uint8_t p = 0;
 	int lane;
 	int lane;
 	uint8_t	*adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
 	uint8_t	*adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
-	int voltage_max;
+	uint8_t voltage_max;
+	uint8_t preemph_max;
 
 
 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
 		uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
 		uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
 			p = this_p;
 			p = this_p;
 	}
 	}
 
 
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
-		voltage_max = I830_DP_VOLTAGE_MAX_CPT;
-	else
-		voltage_max = I830_DP_VOLTAGE_MAX;
+	voltage_max = intel_dp_voltage_max(intel_dp);
 	if (v >= voltage_max)
 	if (v >= voltage_max)
 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
 
-	if (p >= intel_dp_pre_emphasis_max(v))
-		p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+	if (p >= preemph_max)
+		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
 
 	for (lane = 0; lane < 4; lane++)
 	for (lane = 0; lane < 4; lane++)
 		intel_dp->train_set[lane] = v | p;
 		intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
 	}
 	}
 }
 }
 
 
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_600MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_500MV_0DB_IVB;
+	}
+}
+
 static uint8_t
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
 		      int lane)
 		      int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 				  DP_LINK_CONFIGURATION_SIZE);
 				  DP_LINK_CONFIGURATION_SIZE);
 
 
 	DP |= DP_PORT_EN;
 	DP |= DP_PORT_EN;
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 	else
 	else
 		DP &= ~DP_LINK_TRAIN_MASK;
 		DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
 		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
 		uint32_t    signal_levels;
 		uint32_t    signal_levels;
 
 
-		if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 		} else {
 		} else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 		}
 
 
-		if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
 			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
 		else
 		else
 			reg = DP | DP_LINK_TRAIN_PAT_1;
 			reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 			break;
 			break;
 		}
 		}
 
 
-		if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
 		} else {
 		} else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
 		}
 		}
 
 
-		if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
 			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
 		else
 		else
 			reg = DP | DP_LINK_TRAIN_PAT_2;
 			reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 		++tries;
 		++tries;
 	}
 	}
 
 
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 		reg = DP | DP_LINK_TRAIN_OFF_CPT;
 		reg = DP | DP_LINK_TRAIN_OFF_CPT;
 	else
 	else
 		reg = DP | DP_LINK_TRAIN_OFF;
 		reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 		udelay(100);
 		udelay(100);
 	}
 	}
 
 
-	if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
 	} else {
 	} else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 	msleep(17);
 	msleep(17);
 
 
 	if (is_edp(intel_dp)) {
 	if (is_edp(intel_dp)) {
-		if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
 			DP |= DP_LINK_TRAIN_OFF_CPT;
 			DP |= DP_LINK_TRAIN_OFF_CPT;
 		else
 		else
 			DP |= DP_LINK_TRAIN_OFF;
 			DP |= DP_LINK_TRAIN_OFF;

+ 1 - 0
drivers/gpu/drm/i915/intel_drv.h

@@ -110,6 +110,7 @@
 /* drm_display_mode->private_flags */
 /* drm_display_mode->private_flags */
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
 
 
 static inline void
 static inline void
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,

+ 8 - 0
drivers/gpu/drm/i915/intel_lvds.c

@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
 		},
 		},
 	},
 	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Asus AT5NM10T-I",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+		},
+	},
 
 
 	{ }	/* terminating entry */
 	{ }	/* terminating entry */
 };
 };

+ 5 - 11
drivers/gpu/drm/i915/intel_panel.c

@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
 		max >>= 16;
 		max >>= 16;
 	} else {
 	} else {
-		if (IS_PINEVIEW(dev)) {
+		if (INTEL_INFO(dev)->gen < 4)
 			max >>= 17;
 			max >>= 17;
-		} else {
+		else
 			max >>= 16;
 			max >>= 16;
-			if (INTEL_INFO(dev)->gen < 4)
-				max &= ~1;
-		}
 
 
 		if (is_backlight_combination_mode(dev))
 		if (is_backlight_combination_mode(dev))
 			max *= 0xff;
 			max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
 		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 	} else {
 	} else {
 		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-		if (IS_PINEVIEW(dev))
+		if (INTEL_INFO(dev)->gen < 4)
 			val >>= 1;
 			val >>= 1;
 
 
 		if (is_backlight_combination_mode(dev)) {
 		if (is_backlight_combination_mode(dev)) {
 			u8 lbpc;
 			u8 lbpc;
 
 
-			val &= ~1;
 			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
 			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
 			val *= lbpc;
 			val *= lbpc;
 		}
 		}
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
 	}
 	}
 
 
 	tmp = I915_READ(BLC_PWM_CTL);
 	tmp = I915_READ(BLC_PWM_CTL);
-	if (IS_PINEVIEW(dev)) {
-		tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+	if (INTEL_INFO(dev)->gen < 4) 
 		level <<= 1;
 		level <<= 1;
-	} else
-		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 }
 
 

+ 26 - 10
drivers/gpu/drm/i915/intel_sdvo.c

@@ -50,6 +50,7 @@
 #define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
 #define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
 #define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
 
 
 static const char *tv_format_names[] = {
 static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
 		}
 		}
 		sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
 		sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
 	}
 	}
-	if (intel_crtc->pipe == 1)
-		sdvox |= SDVO_PIPE_B_SELECT;
+
+	if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+		sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+	else
+		sdvox |= TRANSCODER(intel_crtc->pipe);
+
 	if (intel_sdvo->has_hdmi_audio)
 	if (intel_sdvo->has_hdmi_audio)
 		sdvox |= SDVO_AUDIO_ENABLE;
 		sdvox |= SDVO_AUDIO_ENABLE;
 
 
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
 	return status;
 	return status;
 }
 }
 
 
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+				  struct edid *edid)
+{
+	bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+	bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+	DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+		      connector_is_digital, monitor_is_digital);
+	return connector_is_digital == monitor_is_digital;
+}
+
 static enum drm_connector_status
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
 {
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 		if (edid == NULL)
 		if (edid == NULL)
 			edid = intel_sdvo_get_analog_edid(connector);
 			edid = intel_sdvo_get_analog_edid(connector);
 		if (edid != NULL) {
 		if (edid != NULL) {
-			if (edid->input & DRM_EDID_INPUT_DIGITAL)
-				ret = connector_status_disconnected;
-			else
+			if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+							      edid))
 				ret = connector_status_connected;
 				ret = connector_status_connected;
+			else
+				ret = connector_status_disconnected;
+
 			connector->display_info.raw_edid = NULL;
 			connector->display_info.raw_edid = NULL;
 			kfree(edid);
 			kfree(edid);
 		} else
 		} else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 		edid = intel_sdvo_get_analog_edid(connector);
 		edid = intel_sdvo_get_analog_edid(connector);
 
 
 	if (edid != NULL) {
 	if (edid != NULL) {
-		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-		bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-		bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
-		if (connector_is_digital == monitor_is_digital) {
+		if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+						      edid)) {
 			drm_mode_connector_update_edid_property(connector, edid);
 			drm_mode_connector_update_edid_property(connector, edid);
 			drm_add_edid_modes(connector, edid);
 			drm_add_edid_modes(connector, edid);
 		}
 		}

+ 2 - 2
drivers/hwmon/jz4740-hwmon.c

@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
 {
 {
 	struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
 	struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
 	struct completion *completion = &hwmon->read_completion;
 	struct completion *completion = &hwmon->read_completion;
-	unsigned long t;
+	long t;
 	unsigned long val;
 	unsigned long val;
 	int ret;
 	int ret;
 
 
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
 	.probe	= jz4740_hwmon_probe,
 	.probe	= jz4740_hwmon_probe,
 	.remove = __devexit_p(jz4740_hwmon_remove),
 	.remove = __devexit_p(jz4740_hwmon_remove),
 	.driver = {
 	.driver = {

+ 5 - 0
drivers/iommu/intel-iommu.c

@@ -405,6 +405,9 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
 
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
 static int dmar_map_gfx = 1;
 static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_strict;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
 
 
 	bus_register_notifier(&pci_bus_type, &device_nb);
 	bus_register_notifier(&pci_bus_type, &device_nb);
 
 
+	intel_iommu_enabled = 1;
+
 	return 0;
 	return 0;
 }
 }
 
 

+ 8 - 0
drivers/mmc/card/block.c

@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
 		  MMC_QUIRK_BLK_NO_CMD23),
 		  MMC_QUIRK_BLK_NO_CMD23),
 	MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
 	MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_BLK_NO_CMD23),
 		  MMC_QUIRK_BLK_NO_CMD23),
+
+	/*
+	 * Some Micron MMC cards needs longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
 	END_FIXUP
 	END_FIXUP
 };
 };
 
 

+ 64 - 34
drivers/mmc/core/core.c

@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 			data->timeout_clks = 0;
 			data->timeout_clks = 0;
 		}
 		}
 	}
 	}
+
+	/*
+	 * Some cards require longer data read timeout than indicated in CSD.
+	 * Address this by setting the read timeout to a "reasonably high"
+	 * value. For the cards tested, 300ms has proven enough. If necessary,
+	 * this value can be increased if other problematic cards require this.
+	 */
+	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+		data->timeout_ns = 300000000;
+		data->timeout_clks = 0;
+	}
+
 	/*
 	/*
 	 * Some cards need very high timeouts if driven in SPI mode.
 	 * Some cards need very high timeouts if driven in SPI mode.
 	 * The worst observed timeout was 900ms after writing a
 	 * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 	mmc_host_clk_release(host);
 	mmc_host_clk_release(host);
 }
 }
 
 
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+	struct mmc_card *card;
+	unsigned int timeout;
+	unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+	int err = 0;
+
+	card = host->card;
+
+	/*
+	 * Send power notify command only if card
+	 * is mmc and notify state is powered ON
+	 */
+	if (card && mmc_card_mmc(card) &&
+	    (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+		if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+			notify_type = EXT_CSD_POWER_OFF_SHORT;
+			timeout = card->ext_csd.generic_cmd6_time;
+			card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+		} else {
+			notify_type = EXT_CSD_POWER_OFF_LONG;
+			timeout = card->ext_csd.power_off_longtime;
+			card->poweroff_notify_state = MMC_POWEROFF_LONG;
+		}
+
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_POWER_OFF_NOTIFICATION,
+				 notify_type, timeout);
+
+		if (err && err != -EBADMSG)
+			pr_err("Device failed to respond within %d poweroff "
+			       "time. Forcefully powering down the device\n",
+			       timeout);
+
+		/* Set the card state to no notification after the poweroff */
+		card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+	}
+}
+
 /*
 /*
  * Apply power to the MMC stack.  This is a two-stage process.
  * Apply power to the MMC stack.  This is a two-stage process.
  * First, we enable power to the card without the clock running.
  * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
 
 
 void mmc_power_off(struct mmc_host *host)
 void mmc_power_off(struct mmc_host *host)
 {
 {
-	struct mmc_card *card;
-	unsigned int notify_type;
-	unsigned int timeout;
-	int err;
-
 	mmc_host_clk_hold(host);
 	mmc_host_clk_hold(host);
 
 
-	card = host->card;
 	host->ios.clock = 0;
 	host->ios.clock = 0;
 	host->ios.vdd = 0;
 	host->ios.vdd = 0;
 
 
-	if (card && mmc_card_mmc(card) &&
-	    (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
-		if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
-			notify_type = EXT_CSD_POWER_OFF_SHORT;
-			timeout = card->ext_csd.generic_cmd6_time;
-			card->poweroff_notify_state = MMC_POWEROFF_SHORT;
-		} else {
-			notify_type = EXT_CSD_POWER_OFF_LONG;
-			timeout = card->ext_csd.power_off_longtime;
-			card->poweroff_notify_state = MMC_POWEROFF_LONG;
-		}
-
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 EXT_CSD_POWER_OFF_NOTIFICATION,
-				 notify_type, timeout);
-
-		if (err && err != -EBADMSG)
-			pr_err("Device failed to respond within %d poweroff "
-			       "time. Forcefully powering down the device\n",
-			       timeout);
-
-		/* Set the card state to no notification after the poweroff */
-		card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
-	}
+	mmc_poweroff_notify(host);
 
 
 	/*
 	/*
 	 * Reset ocr mask to be the highest possible voltage supported for
 	 * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
 
 
 	mmc_bus_get(host);
 	mmc_bus_get(host);
 
 
-	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+	if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
 		err = host->bus_ops->sleep(host);
 		err = host->bus_ops->sleep(host);
 
 
 	mmc_bus_put(host);
 	mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
 		 * pre-claim the host.
 		 * pre-claim the host.
 		 */
 		 */
 		if (mmc_try_claim_host(host)) {
 		if (mmc_try_claim_host(host)) {
-			if (host->bus_ops->suspend)
+			if (host->bus_ops->suspend) {
+				/*
+				 * For eMMC 4.5 device send notify command
+				 * before sleep, because in sleep state eMMC 4.5
+				 * devices respond to only RESET and AWAKE cmd
+				 */
+				mmc_poweroff_notify(host);
 				err = host->bus_ops->suspend(host);
 				err = host->bus_ops->suspend(host);
+			}
+			mmc_do_release_host(host);
+
 			if (err == -ENOSYS || !host->bus_ops->resume) {
 			if (err == -ENOSYS || !host->bus_ops->resume) {
 				/*
 				/*
 				 * We simply "remove" the card in this case.
 				 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
 				host->pm_flags = 0;
 				host->pm_flags = 0;
 				err = 0;
 				err = 0;
 			}
 			}
-			mmc_do_release_host(host);
 		} else {
 		} else {
 			err = -EBUSY;
 			err = -EBUSY;
 		}
 		}

+ 8 - 4
drivers/mmc/core/mmc.c

@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	 * set the notification byte in the ext_csd register of device
 	 * set the notification byte in the ext_csd register of device
 	 */
 	 */
 	if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
 	if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
-	    (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+	    (card->ext_csd.rev >= 6)) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_POWER_OFF_NOTIFICATION,
 				 EXT_CSD_POWER_OFF_NOTIFICATION,
 				 EXT_CSD_POWER_ON,
 				 EXT_CSD_POWER_ON,
 				 card->ext_csd.generic_cmd6_time);
 				 card->ext_csd.generic_cmd6_time);
 		if (err && err != -EBADMSG)
 		if (err && err != -EBADMSG)
 			goto free_card;
 			goto free_card;
-	}
 
 
-	if (!err)
-		card->poweroff_notify_state = MMC_POWERED_ON;
+		/*
+		 * The err can be -EBADMSG or 0,
+		 * so check for success and update the flag
+		 */
+		if (!err)
+			card->poweroff_notify_state = MMC_POWERED_ON;
+	}
 
 
 	/*
 	/*
 	 * Activate high speed (if supported)
 	 * Activate high speed (if supported)

+ 1 - 0
drivers/mmc/host/mxcmmc.c

@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 				"failed to config DMA channel. Falling back to PIO\n");
 				"failed to config DMA channel. Falling back to PIO\n");
 			dma_release_channel(host->dma);
 			dma_release_channel(host->dma);
 			host->do_dma = 0;
 			host->do_dma = 0;
+			host->dma = NULL;
 		}
 		}
 	}
 	}
 
 

+ 5 - 2
drivers/mmc/host/omap_hsmmc.c

@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
 			host->data->sg_len,
 			host->data->sg_len,
 			omap_hsmmc_get_dma_dir(host, host->data));
 			omap_hsmmc_get_dma_dir(host, host->data));
 		omap_free_dma(dma_ch);
 		omap_free_dma(dma_ch);
+		host->data->host_cookie = 0;
 	}
 	}
 	host->data = NULL;
 	host->data = NULL;
 }
 }
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
 	struct mmc_data *data = mrq->data;
 	struct mmc_data *data = mrq->data;
 
 
 	if (host->use_dma) {
 	if (host->use_dma) {
-		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-			     omap_hsmmc_get_dma_dir(host, data));
+		if (data->host_cookie)
+			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+				     data->sg_len,
+				     omap_hsmmc_get_dma_dir(host, data));
 		data->host_cookie = 0;
 		data->host_cookie = 0;
 	}
 	}
 }
 }

+ 1 - 0
drivers/mmc/host/sdhci-cns3xxx.c

@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/host.h>
+#include <linux/module.h>
 #include <mach/cns3xxx.h>
 #include <mach/cns3xxx.h>
 #include "sdhci-pltfm.h"
 #include "sdhci-pltfm.h"
 
 

+ 0 - 2
drivers/mmc/host/sdhci-s3c.c

@@ -644,8 +644,6 @@ static int sdhci_s3c_resume(struct platform_device *dev)
 static struct platform_driver sdhci_s3c_driver = {
 static struct platform_driver sdhci_s3c_driver = {
 	.probe		= sdhci_s3c_probe,
 	.probe		= sdhci_s3c_probe,
 	.remove		= __devexit_p(sdhci_s3c_remove),
 	.remove		= __devexit_p(sdhci_s3c_remove),
-	.suspend	= sdhci_s3c_suspend,
-	.resume	        = sdhci_s3c_resume,
 	.driver		= {
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.owner	= THIS_MODULE,
 		.name	= "s3c-sdhci",
 		.name	= "s3c-sdhci",

+ 1 - 1
drivers/mmc/host/sh_mmcif.c

@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 		if (host->power) {
 		if (host->power) {
 			pm_runtime_put(&host->pd->dev);
 			pm_runtime_put(&host->pd->dev);
 			host->power = false;
 			host->power = false;
-			if (p->down_pwr)
+			if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
 				p->down_pwr(host->pd);
 				p->down_pwr(host->pd);
 		}
 		}
 		host->state = STATE_IDLE;
 		host->state = STATE_IDLE;

+ 1 - 1
drivers/mmc/host/tmio_mmc_pio.c

@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 		/* start bus clock */
 		/* start bus clock */
 		tmio_mmc_clk_start(host);
 		tmio_mmc_clk_start(host);
 	} else if (ios->power_mode != MMC_POWER_UP) {
 	} else if (ios->power_mode != MMC_POWER_UP) {
-		if (host->set_pwr)
+		if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
 			host->set_pwr(host->pdev, 0);
 			host->set_pwr(host->pdev, 0);
 		if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
 		if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
 		    pdata->power) {
 		    pdata->power) {

+ 7 - 4
drivers/net/ethernet/freescale/fec.c

@@ -232,6 +232,7 @@ struct fec_enet_private {
 	struct	platform_device *pdev;
 	struct	platform_device *pdev;
 
 
 	int	opened;
 	int	opened;
+	int	dev_id;
 
 
 	/* Phylib and MDIO interface */
 	/* Phylib and MDIO interface */
 	struct	mii_bus *mii_bus;
 	struct	mii_bus *mii_bus;
@@ -837,7 +838,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
 
 
 	/* Adjust MAC if using macaddr */
 	/* Adjust MAC if using macaddr */
 	if (iap == macaddr)
 	if (iap == macaddr)
-		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
 }
 }
 
 
 /* ------------------------------------------------------------------------- */
 /* ------------------------------------------------------------------------- */
@@ -953,7 +954,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
 	char mdio_bus_id[MII_BUS_ID_SIZE];
 	char mdio_bus_id[MII_BUS_ID_SIZE];
 	char phy_name[MII_BUS_ID_SIZE + 3];
 	char phy_name[MII_BUS_ID_SIZE + 3];
 	int phy_id;
 	int phy_id;
-	int dev_id = fep->pdev->id;
+	int dev_id = fep->dev_id;
 
 
 	fep->phy_dev = NULL;
 	fep->phy_dev = NULL;
 
 
@@ -1031,7 +1032,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
 	 * mdio interface in board design, and need to be configured by
 	 * mdio interface in board design, and need to be configured by
 	 * fec0 mii_bus.
 	 * fec0 mii_bus.
 	 */
 	 */
-	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
 		/* fec1 uses fec0 mii_bus */
 		/* fec1 uses fec0 mii_bus */
 		fep->mii_bus = fec0_mii_bus;
 		fep->mii_bus = fec0_mii_bus;
 		return 0;
 		return 0;
@@ -1063,7 +1064,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
 	fep->mii_bus->read = fec_enet_mdio_read;
 	fep->mii_bus->read = fec_enet_mdio_read;
 	fep->mii_bus->write = fec_enet_mdio_write;
 	fep->mii_bus->write = fec_enet_mdio_write;
 	fep->mii_bus->reset = fec_enet_mdio_reset;
 	fep->mii_bus->reset = fec_enet_mdio_reset;
-	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
 	fep->mii_bus->priv = fep;
 	fep->mii_bus->priv = fep;
 	fep->mii_bus->parent = &pdev->dev;
 	fep->mii_bus->parent = &pdev->dev;
 
 
@@ -1521,6 +1522,7 @@ fec_probe(struct platform_device *pdev)
 	int i, irq, ret = 0;
 	int i, irq, ret = 0;
 	struct resource *r;
 	struct resource *r;
 	const struct of_device_id *of_id;
 	const struct of_device_id *of_id;
+	static int dev_id;
 
 
 	of_id = of_match_device(fec_dt_ids, &pdev->dev);
 	of_id = of_match_device(fec_dt_ids, &pdev->dev);
 	if (of_id)
 	if (of_id)
@@ -1548,6 +1550,7 @@ fec_probe(struct platform_device *pdev)
 
 
 	fep->hwp = ioremap(r->start, resource_size(r));
 	fep->hwp = ioremap(r->start, resource_size(r));
 	fep->pdev = pdev;
 	fep->pdev = pdev;
+	fep->dev_id = dev_id++;
 
 
 	if (!fep->hwp) {
 	if (!fep->hwp) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;

+ 8 - 45
drivers/net/ethernet/freescale/fsl_pq_mdio.c

@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
 }
 }
 EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
 
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
-	int i;
-
-	for (i = PHY_MAX_ADDR; i > 0; i--) {
-		u32 phy_id;
-
-		if (get_phy_id(new_bus, i, &phy_id))
-			return -1;
-
-		if (phy_id == 0xffffffff)
-			break;
-	}
-
-	return i;
-}
-
 
 
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 {
 {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 	struct gfar __iomem *enet_regs;
 	struct gfar __iomem *enet_regs;
 
 
 	/*
 	/*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
 	} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 	} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 			of_device_is_compatible(np, "fsl,etsec2-tbi")) {
 			of_device_is_compatible(np, "fsl,etsec2-tbi")) {
 		return of_iomap(np, 1);
 		return of_iomap(np, 1);
-	} else
-		return NULL;
-}
+	}
 #endif
 #endif
+	return NULL;
+}
 
 
 
 
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
 static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
 {
 {
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 	struct device_node *np = NULL;
 	struct device_node *np = NULL;
 	int err = 0;
 	int err = 0;
 
 
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
 		return err;
 		return err;
 	else
 	else
 		return -EINVAL;
 		return -EINVAL;
-}
+#else
+	return -ENODEV;
 #endif
 #endif
-
+}
 
 
 static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 {
 {
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 			of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 			of_device_is_compatible(np, "fsl,etsec2-mdio") ||
 			of_device_is_compatible(np, "fsl,etsec2-tbi") ||
 			of_device_is_compatible(np, "fsl,etsec2-tbi") ||
 			of_device_is_compatible(np, "gianfar")) {
 			of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 		tbipa = get_gfar_tbipa(regs, np);
 		tbipa = get_gfar_tbipa(regs, np);
 		if (!tbipa) {
 		if (!tbipa) {
 			err = -EINVAL;
 			err = -EINVAL;
 			goto err_free_irqs;
 			goto err_free_irqs;
 		}
 		}
-#else
-		err = -ENODEV;
-		goto err_free_irqs;
-#endif
 	} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
 	} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
 			of_device_is_compatible(np, "ucc_geth_phy")) {
 			of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 		u32 id;
 		u32 id;
 		static u32 mii_mng_master;
 		static u32 mii_mng_master;
 
 
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 			mii_mng_master = id;
 			mii_mng_master = id;
 			ucc_set_qe_mux_mii_mng(id - 1);
 			ucc_set_qe_mux_mii_mng(id - 1);
 		}
 		}
-#else
-		err = -ENODEV;
-		goto err_free_irqs;
-#endif
 	} else {
 	} else {
 		err = -ENODEV;
 		err = -ENODEV;
 		goto err_free_irqs;
 		goto err_free_irqs;
@@ -386,16 +359,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 	}
 	}
 
 
 	if (tbiaddr == -1) {
 	if (tbiaddr == -1) {
-		out_be32(tbipa, 0);
-
-		tbiaddr = fsl_pq_mdio_find_free(new_bus);
-	}
-
-	/*
-	 * We define TBIPA at 0 to be illegal, opting to fail for boards that
-	 * have PHYs at 1-31, rather than change tbipa and rescan.
-	 */
-	if (tbiaddr == 0) {
 		err = -EBUSY;
 		err = -EBUSY;
 
 
 		goto err_free_irqs;
 		goto err_free_irqs;

+ 1 - 3
drivers/net/ppp/pptp.c

@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
 	lock_sock(sk);
 	lock_sock(sk);
 
 
 	opt->src_addr = sp->sa_addr.pptp;
 	opt->src_addr = sp->sa_addr.pptp;
-	if (add_chan(po)) {
-		release_sock(sk);
+	if (add_chan(po))
 		error = -EBUSY;
 		error = -EBUSY;
-	}
 
 
 	release_sock(sk);
 	release_sock(sk);
 	return error;
 	return error;

+ 1 - 1
drivers/net/wireless/ath/ath9k/main.c

@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
 			ath_start_ani(common);
 			ath_start_ani(common);
 	}
 	}
 
 
-	if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+	if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
 		struct ath_hw_antcomb_conf div_ant_conf;
 		struct ath_hw_antcomb_conf div_ant_conf;
 		u8 lna_conf;
 		u8 lna_conf;
 
 

+ 1 - 1
drivers/net/wireless/rtlwifi/rtl8192ce/phy.c

@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
 		}
 		}
 	case ERFSLEEP:{
 	case ERFSLEEP:{
 			if (ppsc->rfpwr_state == ERFOFF)
 			if (ppsc->rfpwr_state == ERFOFF)
-				break;
+				return false;
 			for (queue_id = 0, i = 0;
 			for (queue_id = 0, i = 0;
 			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 				ring = &pcipriv->dev.tx_ring[queue_id];
 				ring = &pcipriv->dev.tx_ring[queue_id];

+ 1 - 1
drivers/net/wireless/rtlwifi/rtl8192cu/phy.c

@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
 		break;
 		break;
 	case ERFSLEEP:
 	case ERFSLEEP:
 		if (ppsc->rfpwr_state == ERFOFF)
 		if (ppsc->rfpwr_state == ERFOFF)
-			break;
+			return false;
 		for (queue_id = 0, i = 0;
 		for (queue_id = 0, i = 0;
 		     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 		     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 			ring = &pcipriv->dev.tx_ring[queue_id];
 			ring = &pcipriv->dev.tx_ring[queue_id];

+ 1 - 1
drivers/net/wireless/rtlwifi/rtl8192de/phy.c

@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
 		break;
 		break;
 	case ERFSLEEP:
 	case ERFSLEEP:
 		if (ppsc->rfpwr_state == ERFOFF)
 		if (ppsc->rfpwr_state == ERFOFF)
-			break;
+			return false;
 
 
 		for (queue_id = 0, i = 0;
 		for (queue_id = 0, i = 0;
 		     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 		     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {

+ 1 - 1
drivers/net/wireless/rtlwifi/rtl8192se/phy.c

@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
 		}
 		}
 	case ERFSLEEP:
 	case ERFSLEEP:
 			if (ppsc->rfpwr_state == ERFOFF)
 			if (ppsc->rfpwr_state == ERFOFF)
-				break;
+				return false;
 
 
 			for (queue_id = 0, i = 0;
 			for (queue_id = 0, i = 0;
 			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
 			     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {

+ 5 - 22
drivers/sbus/char/bbc_i2c.c

@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
 	int ret = 0;
 	int ret = 0;
 
 
 	while (len > 0) {
 	while (len > 0) {
-		int err = bbc_i2c_writeb(client, *buf, off);
-
-		if (err < 0) {
-			ret = err;
+		ret = bbc_i2c_writeb(client, *buf, off);
+		if (ret < 0)
 			break;
 			break;
-		}
-
 		len--;
 		len--;
 		buf++;
 		buf++;
 		off++;
 		off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
 	int ret = 0;
 	int ret = 0;
 
 
 	while (len > 0) {
 	while (len > 0) {
-		int err = bbc_i2c_readb(client, buf, off);
-		if (err < 0) {
-			ret = err;
+		ret = bbc_i2c_readb(client, buf, off);
+		if (ret < 0)
 			break;
 			break;
-		}
 		len--;
 		len--;
 		buf++;
 		buf++;
 		off++;
 		off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
 	.remove		= __devexit_p(bbc_i2c_remove),
 	.remove		= __devexit_p(bbc_i2c_remove),
 };
 };
 
 
-static int __init bbc_i2c_init(void)
-{
-	return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
-	platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
 
 
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 1 - 12
drivers/sbus/char/display7seg.c

@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
 	.remove		= __devexit_p(d7s_remove),
 	.remove		= __devexit_p(d7s_remove),
 };
 };
 
 
-static int __init d7s_init(void)
-{
-	return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
-	platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);

+ 1 - 11
drivers/sbus/char/envctrl.c

@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
 	.remove		= __devexit_p(envctrl_remove),
 	.remove		= __devexit_p(envctrl_remove),
 };
 };
 
 
-static int __init envctrl_init(void)
-{
-	return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
-	platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
 
 
-module_init(envctrl_init);
-module_exit(envctrl_exit);
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 1 - 11
drivers/sbus/char/flash.c

@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
 	.remove		= __devexit_p(flash_remove),
 	.remove		= __devexit_p(flash_remove),
 };
 };
 
 
-static int __init flash_init(void)
-{
-	return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
-	platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
 
 
-module_init(flash_init);
-module_exit(flash_cleanup);
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 1 - 11
drivers/sbus/char/uctrl.c

@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
 };
 };
 
 
 
 
-static int __init uctrl_init(void)
-{
-	return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
-	platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
 
 
-module_init(uctrl_init);
-module_exit(uctrl_exit);
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 6 - 2
drivers/ssb/driver_pcicore.c

@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
 
 
 static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
 {
-	ssb_pcicore_fix_sprom_core_index(pc);
+	struct ssb_device *pdev = pc->dev;
+	struct ssb_bus *bus = pdev->bus;
+
+	if (bus->bustype == SSB_BUSTYPE_PCI)
+		ssb_pcicore_fix_sprom_core_index(pc);
 
 
 	/* Disable PCI interrupts. */
 	/* Disable PCI interrupts. */
-	ssb_write32(pc->dev, SSB_INTVEC, 0);
+	ssb_write32(pdev, SSB_INTVEC, 0);
 
 
 	/* Additional PCIe always once-executed workarounds */
 	/* Additional PCIe always once-executed workarounds */
 	if (pc->dev->id.coreid == SSB_DEV_PCIE) {
 	if (pc->dev->id.coreid == SSB_DEV_PCIE) {

+ 1 - 0
drivers/staging/rtl8712/usb_intf.c

@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
 	{USB_DEVICE(0x0DF6, 0x0045)},
 	{USB_DEVICE(0x0DF6, 0x0045)},
 	{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
 	{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
 	{USB_DEVICE(0x0DF6, 0x004B)},
 	{USB_DEVICE(0x0DF6, 0x004B)},
+	{USB_DEVICE(0x0DF6, 0x005D)},
 	{USB_DEVICE(0x0DF6, 0x0063)},
 	{USB_DEVICE(0x0DF6, 0x0063)},
 	/* Sweex */
 	/* Sweex */
 	{USB_DEVICE(0x177F, 0x0154)},
 	{USB_DEVICE(0x177F, 0x0154)},

+ 12 - 3
drivers/staging/tidspbridge/core/dsp-clock.c

@@ -54,6 +54,7 @@
 
 
 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
 #define DMT_ID(id) ((id) + 4)
 #define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS		4
 
 
 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
 #define MCBSP_ID(id) ((id) - 6)
 #define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
  */
  */
 void dsp_clk_exit(void)
 void dsp_clk_exit(void)
 {
 {
+	int i;
+
 	dsp_clock_disable_all(dsp_clocks);
 	dsp_clock_disable_all(dsp_clocks);
 
 
+	for (i = 0; i < DM_TIMER_CLOCKS; i++)
+		omap_dm_timer_free(timer[i]);
+
 	clk_put(iva2_clk);
 	clk_put(iva2_clk);
 	clk_put(ssi.sst_fck);
 	clk_put(ssi.sst_fck);
 	clk_put(ssi.ssr_fck);
 	clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
 void dsp_clk_init(void)
 void dsp_clk_init(void)
 {
 {
 	static struct platform_device dspbridge_device;
 	static struct platform_device dspbridge_device;
+	int i, id;
 
 
 	dspbridge_device.dev.bus = &platform_bus_type;
 	dspbridge_device.dev.bus = &platform_bus_type;
 
 
+	for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+		timer[i] = omap_dm_timer_request_specific(id);
+
 	iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
 	iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
 	if (IS_ERR(iva2_clk))
 	if (IS_ERR(iva2_clk))
 		dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
 		dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
 		clk_enable(iva2_clk);
 		clk_enable(iva2_clk);
 		break;
 		break;
 	case GPT_CLK:
 	case GPT_CLK:
-		timer[clk_id - 1] =
-				omap_dm_timer_request_specific(DMT_ID(clk_id));
+		status = omap_dm_timer_start(timer[clk_id - 1]);
 		break;
 		break;
 #ifdef CONFIG_OMAP_MCBSP
 #ifdef CONFIG_OMAP_MCBSP
 	case MCBSP_CLK:
 	case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
 		clk_disable(iva2_clk);
 		clk_disable(iva2_clk);
 		break;
 		break;
 	case GPT_CLK:
 	case GPT_CLK:
-		omap_dm_timer_free(timer[clk_id - 1]);
+		status = omap_dm_timer_stop(timer[clk_id - 1]);
 		break;
 		break;
 #ifdef CONFIG_OMAP_MCBSP
 #ifdef CONFIG_OMAP_MCBSP
 	case MCBSP_CLK:
 	case MCBSP_CLK:

+ 0 - 4
drivers/staging/tidspbridge/rmgr/drv_interface.c

@@ -24,11 +24,7 @@
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 #include <linux/pm.h>
-
-#ifdef MODULE
 #include <linux/module.h>
 #include <linux/module.h>
-#endif
-
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
 #include <linux/moduleparam.h>

+ 10 - 0
drivers/usb/class/cdc-acm.c

@@ -1458,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
 	},
 	},
 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
 	},
 	},
+	/* Motorola H24 HSPA module: */
+	{ USB_DEVICE(0x22b8, 0x2d91) }, /* modem                                */
+	{ USB_DEVICE(0x22b8, 0x2d92) }, /* modem           + diagnostics        */
+	{ USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port                      */
+	{ USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics        */
+	{ USB_DEVICE(0x22b8, 0x2d96) }, /* modem                         + NMEA */
+	{ USB_DEVICE(0x22b8, 0x2d97) }, /* modem           + diagnostics + NMEA */
+	{ USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port               + NMEA */
+	{ USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
 	{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
 	{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
 	.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
 	.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
 					   data interface instead of
 					   data interface instead of

+ 1 - 0
drivers/usb/gadget/f_mass_storage.c

@@ -2975,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
 	fsg_common_put(common);
 	fsg_common_put(common);
 	usb_free_descriptors(fsg->function.descriptors);
 	usb_free_descriptors(fsg->function.descriptors);
 	usb_free_descriptors(fsg->function.hs_descriptors);
 	usb_free_descriptors(fsg->function.hs_descriptors);
+	usb_free_descriptors(fsg->function.ss_descriptors);
 	kfree(fsg);
 	kfree(fsg);
 }
 }
 
 

+ 1 - 1
drivers/usb/renesas_usbhs/mod.c

@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
 		if (mod->irq_attch)
 		if (mod->irq_attch)
 			intenb1 |= ATTCHE;
 			intenb1 |= ATTCHE;
 
 
-		if (mod->irq_attch)
+		if (mod->irq_dtch)
 			intenb1 |= DTCHE;
 			intenb1 |= DTCHE;
 
 
 		if (mod->irq_sign)
 		if (mod->irq_sign)

+ 1 - 0
drivers/usb/renesas_usbhs/mod_host.c

@@ -1267,6 +1267,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
 		dev_err(dev, "Failed to create hcd\n");
 		dev_err(dev, "Failed to create hcd\n");
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
+	hcd->has_tt = 1; /* for low/full speed */
 
 
 	pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
 	pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
 	if (!pipe_info) {
 	if (!pipe_info) {

+ 6 - 1
drivers/usb/serial/option.c

@@ -663,7 +663,12 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },

+ 2 - 2
drivers/xen/swiotlb-xen.c

@@ -166,7 +166,7 @@ retry:
 	/*
 	/*
 	 * Get IO TLB memory from any location.
 	 * Get IO TLB memory from any location.
 	 */
 	 */
-	xen_io_tlb_start = alloc_bootmem(bytes);
+	xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
 	if (!xen_io_tlb_start) {
 	if (!xen_io_tlb_start) {
 		m = "Cannot allocate Xen-SWIOTLB buffer!\n";
 		m = "Cannot allocate Xen-SWIOTLB buffer!\n";
 		goto error;
 		goto error;
@@ -179,7 +179,7 @@ retry:
 			       bytes,
 			       bytes,
 			       xen_io_tlb_nslabs);
 			       xen_io_tlb_nslabs);
 	if (rc) {
 	if (rc) {
-		free_bootmem(__pa(xen_io_tlb_start), bytes);
+		free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
 		m = "Failed to get contiguous memory for DMA from Xen!\n"\
 		m = "Failed to get contiguous memory for DMA from Xen!\n"\
 		    "You either: don't have the permissions, do not have"\
 		    "You either: don't have the permissions, do not have"\
 		    " enough free memory under 4GB, or the hypervisor memory"\
 		    " enough free memory under 4GB, or the hypervisor memory"\

+ 55 - 62
fs/btrfs/async-thread.c

@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
 	int idle;
 	int idle;
 };
 };
 
 
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
 /*
 /*
  * btrfs_start_workers uses kthread_run, which can block waiting for memory
  * btrfs_start_workers uses kthread_run, which can block waiting for memory
  * for a very long time.  It will actually throttle on page writeback,
  * for a very long time.  It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
 {
 {
 	struct worker_start *start;
 	struct worker_start *start;
 	start = container_of(work, struct worker_start, work);
 	start = container_of(work, struct worker_start, work);
-	btrfs_start_workers(start->queue, 1);
+	__btrfs_start_workers(start->queue);
 	kfree(start);
 	kfree(start);
 }
 }
 
 
-static int start_new_worker(struct btrfs_workers *queue)
-{
-	struct worker_start *start;
-	int ret;
-
-	start = kzalloc(sizeof(*start), GFP_NOFS);
-	if (!start)
-		return -ENOMEM;
-
-	start->work.func = start_new_worker_func;
-	start->queue = queue;
-	ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
-	if (ret)
-		kfree(start);
-	return ret;
-}
-
 /*
 /*
  * helper function to move a thread onto the idle list after it
  * helper function to move a thread onto the idle list after it
  * has finished some requests.
  * has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 {
 {
 	struct btrfs_workers *workers = worker->workers;
 	struct btrfs_workers *workers = worker->workers;
+	struct worker_start *start;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	rmb();
 	rmb();
 	if (!workers->atomic_start_pending)
 	if (!workers->atomic_start_pending)
 		return;
 		return;
 
 
+	start = kzalloc(sizeof(*start), GFP_NOFS);
+	if (!start)
+		return;
+
+	start->work.func = start_new_worker_func;
+	start->queue = workers;
+
 	spin_lock_irqsave(&workers->lock, flags);
 	spin_lock_irqsave(&workers->lock, flags);
 	if (!workers->atomic_start_pending)
 	if (!workers->atomic_start_pending)
 		goto out;
 		goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 
 
 	workers->num_workers_starting += 1;
 	workers->num_workers_starting += 1;
 	spin_unlock_irqrestore(&workers->lock, flags);
 	spin_unlock_irqrestore(&workers->lock, flags);
-	start_new_worker(workers);
+	btrfs_queue_worker(workers->atomic_worker_start, &start->work);
 	return;
 	return;
 
 
 out:
 out:
+	kfree(start);
 	spin_unlock_irqrestore(&workers->lock, flags);
 	spin_unlock_irqrestore(&workers->lock, flags);
 }
 }
 
 
@@ -331,7 +325,7 @@ again:
 			run_ordered_completions(worker->workers, work);
 			run_ordered_completions(worker->workers, work);
 
 
 			check_pending_worker_creates(worker);
 			check_pending_worker_creates(worker);
-
+			cond_resched();
 		}
 		}
 
 
 		spin_lock_irq(&worker->lock);
 		spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
  * starts new worker threads.  This does not enforce the max worker
  * starts new worker threads.  This does not enforce the max worker
  * count in case you need to temporarily go past it.
  * count in case you need to temporarily go past it.
  */
  */
-static int __btrfs_start_workers(struct btrfs_workers *workers,
-				 int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
 {
 {
 	struct btrfs_worker_thread *worker;
 	struct btrfs_worker_thread *worker;
 	int ret = 0;
 	int ret = 0;
-	int i;
 
 
-	for (i = 0; i < num_workers; i++) {
-		worker = kzalloc(sizeof(*worker), GFP_NOFS);
-		if (!worker) {
-			ret = -ENOMEM;
-			goto fail;
-		}
+	worker = kzalloc(sizeof(*worker), GFP_NOFS);
+	if (!worker) {
+		ret = -ENOMEM;
+		goto fail;
+	}
 
 
-		INIT_LIST_HEAD(&worker->pending);
-		INIT_LIST_HEAD(&worker->prio_pending);
-		INIT_LIST_HEAD(&worker->worker_list);
-		spin_lock_init(&worker->lock);
-
-		atomic_set(&worker->num_pending, 0);
-		atomic_set(&worker->refs, 1);
-		worker->workers = workers;
-		worker->task = kthread_run(worker_loop, worker,
-					   "btrfs-%s-%d", workers->name,
-					   workers->num_workers + i);
-		if (IS_ERR(worker->task)) {
-			ret = PTR_ERR(worker->task);
-			kfree(worker);
-			goto fail;
-		}
-		spin_lock_irq(&workers->lock);
-		list_add_tail(&worker->worker_list, &workers->idle_list);
-		worker->idle = 1;
-		workers->num_workers++;
-		workers->num_workers_starting--;
-		WARN_ON(workers->num_workers_starting < 0);
-		spin_unlock_irq(&workers->lock);
+	INIT_LIST_HEAD(&worker->pending);
+	INIT_LIST_HEAD(&worker->prio_pending);
+	INIT_LIST_HEAD(&worker->worker_list);
+	spin_lock_init(&worker->lock);
+
+	atomic_set(&worker->num_pending, 0);
+	atomic_set(&worker->refs, 1);
+	worker->workers = workers;
+	worker->task = kthread_run(worker_loop, worker,
+				   "btrfs-%s-%d", workers->name,
+				   workers->num_workers + 1);
+	if (IS_ERR(worker->task)) {
+		ret = PTR_ERR(worker->task);
+		kfree(worker);
+		goto fail;
 	}
 	}
+	spin_lock_irq(&workers->lock);
+	list_add_tail(&worker->worker_list, &workers->idle_list);
+	worker->idle = 1;
+	workers->num_workers++;
+	workers->num_workers_starting--;
+	WARN_ON(workers->num_workers_starting < 0);
+	spin_unlock_irq(&workers->lock);
+
 	return 0;
 	return 0;
 fail:
 fail:
-	btrfs_stop_workers(workers);
+	spin_lock_irq(&workers->lock);
+	workers->num_workers_starting--;
+	spin_unlock_irq(&workers->lock);
 	return ret;
 	return ret;
 }
 }
 
 
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
 {
 {
 	spin_lock_irq(&workers->lock);
 	spin_lock_irq(&workers->lock);
-	workers->num_workers_starting += num_workers;
+	workers->num_workers_starting++;
 	spin_unlock_irq(&workers->lock);
 	spin_unlock_irq(&workers->lock);
-	return __btrfs_start_workers(workers, num_workers);
+	return __btrfs_start_workers(workers);
 }
 }
 
 
 /*
 /*
@@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
 	struct btrfs_worker_thread *worker;
 	struct btrfs_worker_thread *worker;
 	unsigned long flags;
 	unsigned long flags;
 	struct list_head *fallback;
 	struct list_head *fallback;
+	int ret;
 
 
 again:
 again:
 	spin_lock_irqsave(&workers->lock, flags);
 	spin_lock_irqsave(&workers->lock, flags);
@@ -584,7 +578,9 @@ again:
 			workers->num_workers_starting++;
 			workers->num_workers_starting++;
 			spin_unlock_irqrestore(&workers->lock, flags);
 			spin_unlock_irqrestore(&workers->lock, flags);
 			/* we're below the limit, start another worker */
 			/* we're below the limit, start another worker */
-			__btrfs_start_workers(workers, 1);
+			ret = __btrfs_start_workers(workers);
+			if (ret)
+				goto fallback;
 			goto again;
 			goto again;
 		}
 		}
 	}
 	}
@@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
 /*
 /*
  * places a struct btrfs_work into the pending queue of one of the kthreads
  * places a struct btrfs_work into the pending queue of one of the kthreads
  */
  */
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 {
 {
 	struct btrfs_worker_thread *worker;
 	struct btrfs_worker_thread *worker;
 	unsigned long flags;
 	unsigned long flags;
@@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 
 
 	/* don't requeue something already on a list */
 	/* don't requeue something already on a list */
 	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
 	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
-		goto out;
+		return;
 
 
 	worker = find_worker(workers);
 	worker = find_worker(workers);
 	if (workers->ordered) {
 	if (workers->ordered) {
@@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 	if (wake)
 	if (wake)
 		wake_up_process(worker->task);
 		wake_up_process(worker->task);
 	spin_unlock_irqrestore(&worker->lock, flags);
 	spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
-	return 0;
 }
 }

+ 2 - 2
fs/btrfs/async-thread.h

@@ -109,8 +109,8 @@ struct btrfs_workers {
 	char *name;
 	char *name;
 };
 };
 
 
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
 int btrfs_stop_workers(struct btrfs_workers *workers);
 int btrfs_stop_workers(struct btrfs_workers *workers);
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
 			struct btrfs_workers *async_starter);
 			struct btrfs_workers *async_starter);

+ 2 - 1
fs/btrfs/ctree.h

@@ -2692,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);

+ 2 - 2
fs/btrfs/delayed-inode.c

@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
 	 * we're accounted for.
 	 * we're accounted for.
 	 */
 	 */
-	if (!trans->bytes_reserved &&
-	    src_rsv != &root->fs_info->delalloc_block_rsv) {
+	if (!src_rsv || (!trans->bytes_reserved &&
+	    src_rsv != &root->fs_info->delalloc_block_rsv)) {
 		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
 		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
 		/*
 		/*
 		 * Since we're under a transaction reserve_metadata_bytes could
 		 * Since we're under a transaction reserve_metadata_bytes could

部分文件因为文件数量过多而无法显示