Browse Source

Merge branches 'sh/hwblk', 'sh/cpuidle' and 'sh/stable-updates'

Paul Mundt 16 years ago
parent
commit
1ecc6ab669

+ 38 - 0
arch/sh/boards/mach-se/7724/setup.c

@@ -19,6 +19,7 @@
 #include <linux/smc91x.h>
 #include <linux/smc91x.h>
 #include <linux/gpio.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
 #include <linux/input.h>
+#include <linux/usb/r8a66597.h>
 #include <video/sh_mobile_lcdc.h>
 #include <video/sh_mobile_lcdc.h>
 #include <media/sh_mobile_ceu.h>
 #include <media/sh_mobile_ceu.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -302,6 +303,34 @@ static struct platform_device sh_eth_device = {
 	.resource = sh_eth_resources,
 	.resource = sh_eth_resources,
 };
 };
 
 
+static struct r8a66597_platdata sh7724_usb0_host_data = {
+};
+
+static struct resource sh7724_usb0_host_resources[] = {
+	[0] = {
+		.start	= 0xa4d80000,
+		.end	= 0xa4d800ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= 65,
+		.end	= 65,
+		.flags	= IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+	},
+};
+
+static struct platform_device sh7724_usb0_host_device = {
+	.name		= "r8a66597_hcd",
+	.id		= 0,
+	.dev = {
+		.dma_mask		= NULL,         /*  not use dma */
+		.coherent_dma_mask	= 0xffffffff,
+		.platform_data		= &sh7724_usb0_host_data,
+	},
+	.num_resources	= ARRAY_SIZE(sh7724_usb0_host_resources),
+	.resource	= sh7724_usb0_host_resources,
+};
+
 static struct platform_device *ms7724se_devices[] __initdata = {
 static struct platform_device *ms7724se_devices[] __initdata = {
 	&heartbeat_device,
 	&heartbeat_device,
 	&smc91x_eth_device,
 	&smc91x_eth_device,
@@ -311,6 +340,7 @@ static struct platform_device *ms7724se_devices[] __initdata = {
 	&ceu1_device,
 	&ceu1_device,
 	&keysc_device,
 	&keysc_device,
 	&sh_eth_device,
 	&sh_eth_device,
+	&sh7724_usb0_host_device,
 };
 };
 
 
 #define EEPROM_OP   0xBA206000
 #define EEPROM_OP   0xBA206000
@@ -364,6 +394,7 @@ static void __init sh_eth_init(void)
 #define SW4140    0xBA201000
 #define SW4140    0xBA201000
 #define FPGA_OUT  0xBA200400
 #define FPGA_OUT  0xBA200400
 #define PORT_HIZA 0xA4050158
 #define PORT_HIZA 0xA4050158
+#define PORT_MSELCRB 0xA4050182
 
 
 #define SW41_A    0x0100
 #define SW41_A    0x0100
 #define SW41_B    0x0200
 #define SW41_B    0x0200
@@ -373,6 +404,7 @@ static void __init sh_eth_init(void)
 #define SW41_F    0x2000
 #define SW41_F    0x2000
 #define SW41_G    0x4000
 #define SW41_G    0x4000
 #define SW41_H    0x8000
 #define SW41_H    0x8000
+
 static int __init devices_setup(void)
 static int __init devices_setup(void)
 {
 {
 	u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
 	u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
@@ -385,6 +417,12 @@ static int __init devices_setup(void)
 		    (1 << 14)), /* RMII */
 		    (1 << 14)), /* RMII */
 		  FPGA_OUT);
 		  FPGA_OUT);
 
 
+	/* turn on USB clocks, use external clock */
+	ctrl_outw((ctrl_inw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB);
+
+	/* enable USB0 port */
+	ctrl_outw(0x0600, 0xa40501d4);
+
 	/* enable IRQ 0,1,2 */
 	/* enable IRQ 0,1,2 */
 	gpio_request(GPIO_FN_INTC_IRQ0, NULL);
 	gpio_request(GPIO_FN_INTC_IRQ0, NULL);
 	gpio_request(GPIO_FN_INTC_IRQ1, NULL);
 	gpio_request(GPIO_FN_INTC_IRQ1, NULL);

+ 61 - 0
arch/sh/include/asm/hwblk.h

@@ -0,0 +1,61 @@
+#ifndef __ASM_SH_HWBLK_H
+#define __ASM_SH_HWBLK_H
+
+#include <asm/clock.h>
+#include <asm/io.h>
+
+#define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */
+
+#define HWBLK_AREA(_flags, _parent)		\
+{						\
+	.flags = _flags,			\
+	.parent = _parent,			\
+}
+
+struct hwblk_area {
+	unsigned long cnt;
+	unsigned char parent;
+	unsigned char flags;
+};
+
+#define HWBLK(_mstp, _bit, _area)		\
+{						\
+	.mstp = (void __iomem *)_mstp,		\
+	.bit = _bit,				\
+	.area = _area,				\
+}
+
+struct hwblk {
+	void __iomem *mstp;
+	unsigned char bit;
+	unsigned char area;
+	unsigned long cnt;
+};
+
+struct hwblk_info {
+	struct hwblk_area *areas;
+	int nr_areas;
+	struct hwblk *hwblks;
+	int nr_hwblks;
+};
+
+/* Should be defined by processor-specific code */
+int arch_hwblk_init(void);
+int arch_hwblk_sleep_mode(void);
+
+int hwblk_register(struct hwblk_info *info);
+int hwblk_init(void);
+
+/* allow clocks to enable and disable hardware blocks */
+#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags)	\
+{							\
+	.name		= _name,			\
+	.id		= _id,				\
+	.parent		= _parent,			\
+	.arch_flags	= _hwblk,			\
+	.flags		= _flags,			\
+}
+
+int sh_hwblk_clk_register(struct clk *clks, int nr);
+
+#endif /* __ASM_SH_HWBLK_H */

+ 9 - 0
arch/sh/include/asm/suspend.h

@@ -10,6 +10,15 @@ struct swsusp_arch_regs {
 	struct pt_regs user_regs;
 	struct pt_regs user_regs;
 	unsigned long bank1_regs[8];
 	unsigned long bank1_regs[8];
 };
 };
+
+void sh_mobile_call_standby(unsigned long mode);
+
+#ifdef CONFIG_CPU_IDLE
+void sh_mobile_setup_cpuidle(void);
+#else
+static inline void sh_mobile_setup_cpuidle(void) {}
+#endif
+
 #endif
 #endif
 
 
 /* flags passed to assembly suspend code */
 /* flags passed to assembly suspend code */

+ 14 - 0
arch/sh/include/cpu-sh4/cpu/sh7722.h

@@ -221,4 +221,18 @@ enum {
 	GPIO_FN_KEYOUT3, GPIO_FN_KEYOUT4_IN6, GPIO_FN_KEYOUT5_IN5,
 	GPIO_FN_KEYOUT3, GPIO_FN_KEYOUT4_IN6, GPIO_FN_KEYOUT5_IN5,
 };
 };
 
 
+enum {
+	HWBLK_UNKNOWN = 0,
+	HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_URAM, HWBLK_XYMEM,
+	HWBLK_INTC, HWBLK_DMAC, HWBLK_SHYWAY, HWBLK_HUDI,
+	HWBLK_UBC, HWBLK_TMU, HWBLK_CMT, HWBLK_RWDT, HWBLK_FLCTL,
+	HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, HWBLK_SIO,
+	HWBLK_SIOF0, HWBLK_SIOF1, HWBLK_IIC, HWBLK_RTC,
+	HWBLK_TPU, HWBLK_IRDA, HWBLK_SDHI, HWBLK_SIM, HWBLK_KEYSC,
+	HWBLK_TSIF, HWBLK_USBF, HWBLK_2DG, HWBLK_SIU, HWBLK_VOU,
+	HWBLK_JPU, HWBLK_BEU, HWBLK_CEU, HWBLK_VEU, HWBLK_VPU,
+	HWBLK_LCDC,
+	HWBLK_NR,
+};
+
 #endif /* __ASM_SH7722_H__ */
 #endif /* __ASM_SH7722_H__ */

+ 1 - 1
arch/sh/kernel/cpu/Makefile

@@ -19,4 +19,4 @@ obj-$(CONFIG_UBC_WAKEUP)	+= ubc.o
 obj-$(CONFIG_SH_ADC)		+= adc.o
 obj-$(CONFIG_SH_ADC)		+= adc.o
 obj-$(CONFIG_SH_CLK_CPG)	+= clock-cpg.o
 obj-$(CONFIG_SH_CLK_CPG)	+= clock-cpg.o
 
 
-obj-y	+= irq/ init.o clock.o
+obj-y	+= irq/ init.o clock.o hwblk.o

+ 130 - 0
arch/sh/kernel/cpu/hwblk.c

@@ -0,0 +1,130 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <asm/clock.h>
+
+static DEFINE_SPINLOCK(hwblk_lock);
+
+static void hwblk_area_inc(struct hwblk_info *info, int area)
+{
+	struct hwblk_area *hap = info->areas + area;
+
+	hap->cnt++;
+	if (hap->cnt == 1)
+		if (hap->flags & HWBLK_AREA_FLAG_PARENT)
+			hwblk_area_inc(info, hap->parent);
+}
+
+static void hwblk_area_dec(struct hwblk_info *info, int area)
+{
+	struct hwblk_area *hap = info->areas + area;
+
+	if (hap->cnt == 1)
+		if (hap->flags & HWBLK_AREA_FLAG_PARENT)
+			hwblk_area_dec(info, hap->parent);
+	hap->cnt--;
+}
+
+static void hwblk_enable(struct hwblk_info *info, int hwblk)
+{
+	struct hwblk *hp = info->hwblks + hwblk;
+	unsigned long tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hwblk_lock, flags);
+
+	hp->cnt++;
+	if (hp->cnt == 1) {
+		hwblk_area_inc(info, hp->area);
+
+		tmp = __raw_readl(hp->mstp);
+		tmp &= ~(1 << hp->bit);
+		__raw_writel(tmp, hp->mstp);
+	}
+
+	spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+static void hwblk_disable(struct hwblk_info *info, int hwblk)
+{
+	struct hwblk *hp = info->hwblks + hwblk;
+	unsigned long tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hwblk_lock, flags);
+
+	if (hp->cnt == 1) {
+		hwblk_area_dec(info, hp->area);
+
+		tmp = __raw_readl(hp->mstp);
+		tmp |= 1 << hp->bit;
+		__raw_writel(tmp, hp->mstp);
+	}
+	hp->cnt--;
+
+	spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+static struct hwblk_info *hwblk_info;
+
+int __init hwblk_register(struct hwblk_info *info)
+{
+	hwblk_info = info;
+	return 0;
+}
+
+int __init __weak arch_hwblk_init(void)
+{
+	return 0;
+}
+
+int __weak arch_hwblk_sleep_mode(void)
+{
+	return SUSP_SH_SLEEP;
+}
+
+int __init hwblk_init(void)
+{
+	return arch_hwblk_init();
+}
+
+/* allow clocks to enable and disable hardware blocks */
+static int sh_hwblk_clk_enable(struct clk *clk)
+{
+	if (!hwblk_info)
+		return -ENOENT;
+
+	hwblk_enable(hwblk_info, clk->arch_flags);
+	return 0;
+}
+
+static void sh_hwblk_clk_disable(struct clk *clk)
+{
+	if (hwblk_info)
+		hwblk_disable(hwblk_info, clk->arch_flags);
+}
+
+static struct clk_ops sh_hwblk_clk_ops = {
+	.enable		= sh_hwblk_clk_enable,
+	.disable	= sh_hwblk_clk_disable,
+	.recalc		= followparent_recalc,
+};
+
+int __init sh_hwblk_clk_register(struct clk *clks, int nr)
+{
+	struct clk *clkp;
+	int ret = 0;
+	int k;
+
+	for (k = 0; !ret && (k < nr); k++) {
+		clkp = clks + k;
+		clkp->ops = &sh_hwblk_clk_ops;
+		ret |= clk_register(clkp);
+	}
+
+	return ret;
+}

+ 1 - 1
arch/sh/kernel/cpu/sh4a/Makefile

@@ -25,7 +25,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780)	:= clock-sh7780.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7785)	:= clock-sh7785.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7785)	:= clock-sh7785.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7786)	:= clock-sh7786.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7786)	:= clock-sh7786.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7343)	:= clock-sh7343.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7343)	:= clock-sh7343.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7722)	:= clock-sh7722.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7722)	:= clock-sh7722.o hwblk-sh7722.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7723)	:= clock-sh7723.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7723)	:= clock-sh7723.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7724)	:= clock-sh7724.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7724)	:= clock-sh7724.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7366)	:= clock-sh7366.o
 clock-$(CONFIG_CPU_SUBTYPE_SH7366)	:= clock-sh7366.o

+ 32 - 28
arch/sh/kernel/cpu/sh4a/clock-sh7722.c

@@ -22,6 +22,8 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <asm/clock.h>
 #include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
 
 
 /* SH7722 registers */
 /* SH7722 registers */
 #define FRQCR		0xa4150000
 #define FRQCR		0xa4150000
@@ -140,35 +142,37 @@ struct clk div6_clks[] = {
 	SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
 	SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
 };
 };
 
 
-#define MSTP(_str, _parent, _reg, _bit, _flags) \
-  SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags)
+#define R_CLK &r_clk
+#define P_CLK &div4_clks[DIV4_P]
+#define B_CLK &div4_clks[DIV4_B]
+#define U_CLK &div4_clks[DIV4_U]
 
 
 static struct clk mstp_clks[] = {
 static struct clk mstp_clks[] = {
-	MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
-	MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
-	MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
-	MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
-	MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
-	MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
-	MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
-	MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
-	MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
-
-	MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0),
-	MSTP("rtc0", &r_clk, MSTPCR1, 8, 0),
-
-	MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0),
-	MSTP("keysc0", &r_clk, MSTPCR2, 14, 0),
-	MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0),
-	MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 9, 0),
-	MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0),
-	MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0),
-	MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
-	MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0),
-	MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0),
-	MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
-	MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
-	MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
+	SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT),
+	SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT),
+	SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0),
+	SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+	SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+	SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
+	SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+	SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+	SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+
+	SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
+	SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+
+	SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0),
+	SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+	SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0),
+	SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+	SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
+	SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+	SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT),
+	SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
+	SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
+	SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, CLK_ENABLE_ON_INIT),
+	SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT),
+	SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0),
 };
 };
 
 
 int __init arch_clk_init(void)
 int __init arch_clk_init(void)
@@ -191,7 +195,7 @@ int __init arch_clk_init(void)
 		ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
 		ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
 
 
 	if (!ret)
 	if (!ret)
-		ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+		ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
 
 
 	return ret;
 	return ret;
 }
 }

+ 106 - 0
arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c

@@ -0,0 +1,106 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
+ *
+ * SH7722 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
+
+/* SH7722 registers */
+#define MSTPCR0		0xa4150030
+#define MSTPCR1		0xa4150034
+#define MSTPCR2		0xa4150038
+
+/* SH7722 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7722_hwblk_area[] = {
+	[CORE_AREA] = HWBLK_AREA(0, 0),
+	[CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+	[SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7722_hwblk[HWBLK_NR] = {
+	[HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+	[HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+	[HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+	[HWBLK_URAM] = HWBLK(MSTPCR0, 28, CORE_AREA),
+	[HWBLK_XYMEM] = HWBLK(MSTPCR0, 26, CORE_AREA),
+	[HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+	[HWBLK_DMAC] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+	[HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+	[HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+	[HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+	[HWBLK_TMU] = HWBLK(MSTPCR0, 15, CORE_AREA),
+	[HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+	[HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+	[HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
+	[HWBLK_SCIF0] = HWBLK(MSTPCR0, 7, CORE_AREA),
+	[HWBLK_SCIF1] = HWBLK(MSTPCR0, 6, CORE_AREA),
+	[HWBLK_SCIF2] = HWBLK(MSTPCR0, 5, CORE_AREA),
+	[HWBLK_SIO] = HWBLK(MSTPCR0, 3, CORE_AREA),
+	[HWBLK_SIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+	[HWBLK_SIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+
+	[HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
+	[HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
+
+	[HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+	[HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+	[HWBLK_SDHI] = HWBLK(MSTPCR2, 18, CORE_AREA),
+	[HWBLK_SIM] = HWBLK(MSTPCR2, 16, CORE_AREA),
+	[HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
+	[HWBLK_TSIF] = HWBLK(MSTPCR2, 13, SUB_AREA),
+	[HWBLK_USBF] = HWBLK(MSTPCR2, 11, CORE_AREA),
+	[HWBLK_2DG] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
+	[HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
+	[HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+	[HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+	[HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+	[HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+	[HWBLK_VEU] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+	[HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+	[HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7722_hwblk_info = {
+	.areas = sh7722_hwblk_area,
+	.nr_areas = ARRAY_SIZE(sh7722_hwblk_area),
+	.hwblks = sh7722_hwblk,
+	.nr_hwblks = ARRAY_SIZE(sh7722_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+	if (!sh7722_hwblk_area[CORE_AREA].cnt)
+		return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+	if (!sh7722_hwblk_area[CORE_AREA_BM].cnt)
+		return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+	return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+	return hwblk_register(&sh7722_hwblk_info);
+}

+ 1 - 0
arch/sh/kernel/cpu/shmobile/Makefile

@@ -4,3 +4,4 @@
 
 
 # Power Management & Sleep mode
 # Power Management & Sleep mode
 obj-$(CONFIG_PM)	+= pm.o sleep.o
 obj-$(CONFIG_PM)	+= pm.o sleep.o
+obj-$(CONFIG_CPU_IDLE)	+= cpuidle.o

+ 102 - 0
arch/sh/kernel/cpu/shmobile/cpuidle.c

@@ -0,0 +1,102 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/cpuidle.c
+ *
+ * Cpuidle support code for SuperH Mobile
+ *
+ *  Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpuidle.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+#include <asm/hwblk.h>
+
+static unsigned long cpuidle_mode[] = {
+	SUSP_SH_SLEEP, /* regular sleep mode */
+	SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
+};
+
+static int cpuidle_sleep_enter(struct cpuidle_device *dev,
+			       struct cpuidle_state *state)
+{
+	unsigned long allowed_mode = arch_hwblk_sleep_mode();
+	ktime_t before, after;
+	int requested_state = state - &dev->states[0];
+	int allowed_state;
+	int k;
+
+	/* convert allowed mode to allowed state */
+	for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
+		if (cpuidle_mode[k] == allowed_mode)
+			break;
+
+	allowed_state = k;
+
+	/* take the following into account for sleep mode selection:
+	 * - allowed_state: best mode allowed by hardware (clock deps)
+	 * - requested_state: best mode allowed by software (latencies)
+	 */
+	k = min_t(int, allowed_state, requested_state);
+
+	dev->last_state = &dev->states[k];
+	before = ktime_get();
+	sh_mobile_call_standby(cpuidle_mode[k]);
+	after = ktime_get();
+	return ktime_to_ns(ktime_sub(after, before)) >> 10;
+}
+
+static struct cpuidle_device cpuidle_dev;
+static struct cpuidle_driver cpuidle_driver = {
+	.name =		"sh_idle",
+	.owner =	THIS_MODULE,
+};
+
+void sh_mobile_setup_cpuidle(void)
+{
+	struct cpuidle_device *dev = &cpuidle_dev;
+	struct cpuidle_state *state;
+	int i;
+
+	cpuidle_register_driver(&cpuidle_driver);
+
+	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+		dev->states[i].name[0] = '\0';
+		dev->states[i].desc[0] = '\0';
+	}
+
+	i = CPUIDLE_DRIVER_STATE_START;
+
+	state = &dev->states[i++];
+	snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
+	strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
+	state->exit_latency = 1;
+	state->target_residency = 1 * 2;
+	state->power_usage = 3;
+	state->flags = 0;
+	state->flags |= CPUIDLE_FLAG_SHALLOW;
+	state->flags |= CPUIDLE_FLAG_TIME_VALID;
+	state->enter = cpuidle_sleep_enter;
+
+	dev->safe_state = state;
+
+	state = &dev->states[i++];
+	snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
+	strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN);
+	state->exit_latency = 100;
+	state->target_residency = 1 * 2;
+	state->power_usage = 1;
+	state->flags = 0;
+	state->flags |= CPUIDLE_FLAG_TIME_VALID;
+	state->enter = cpuidle_sleep_enter;
+
+	dev->state_count = i;
+
+	cpuidle_register_device(dev);
+}

+ 13 - 13
arch/sh/kernel/cpu/shmobile/pm.c

@@ -1,5 +1,5 @@
 /*
 /*
- * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c
+ * arch/sh/kernel/cpu/shmobile/pm.c
  *
  *
  * Power management support code for SuperH Mobile
  * Power management support code for SuperH Mobile
  *
  *
@@ -32,20 +32,17 @@
  *
  *
  * R-standby mode is unsupported, but will be added in the future
  * R-standby mode is unsupported, but will be added in the future
  * U-standby mode is low priority since it needs bootloader hacks
  * U-standby mode is low priority since it needs bootloader hacks
- *
- * All modes should be tied in with cpuidle. But before that can
- * happen we need to keep track of enabled hardware blocks so we
- * can avoid entering sleep modes that stop clocks to hardware
- * blocks that are in use even though the cpu core is idle.
  */
  */
 
 
+#define ILRAM_BASE 0xe5200000
+
 extern const unsigned char sh_mobile_standby[];
 extern const unsigned char sh_mobile_standby[];
 extern const unsigned int sh_mobile_standby_size;
 extern const unsigned int sh_mobile_standby_size;
 
 
-static void sh_mobile_call_standby(unsigned long mode)
+void sh_mobile_call_standby(unsigned long mode)
 {
 {
 	extern void *vbr_base;
 	extern void *vbr_base;
-	void *onchip_mem = (void *)0xe5200000; /* ILRAM */
+	void *onchip_mem = (void *)ILRAM_BASE;
 	void (*standby_onchip_mem)(unsigned long) = onchip_mem;
 	void (*standby_onchip_mem)(unsigned long) = onchip_mem;
 
 
 	/* Note: Wake up from sleep may generate exceptions!
 	/* Note: Wake up from sleep may generate exceptions!
@@ -55,11 +52,6 @@ static void sh_mobile_call_standby(unsigned long mode)
 	if (mode & SUSP_SH_SF)
 	if (mode & SUSP_SH_SF)
 		asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory");
 		asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory");
 
 
-	/* Copy the assembly snippet to the otherwise ununsed ILRAM */
-	memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
-	wmb();
-	ctrl_barrier();
-
 	/* Let assembly snippet in on-chip memory handle the rest */
 	/* Let assembly snippet in on-chip memory handle the rest */
 	standby_onchip_mem(mode);
 	standby_onchip_mem(mode);
 
 
@@ -85,7 +77,15 @@ static struct platform_suspend_ops sh_pm_ops = {
 
 
 static int __init sh_pm_init(void)
 static int __init sh_pm_init(void)
 {
 {
+	void *onchip_mem = (void *)ILRAM_BASE;
+
+	/* Copy the assembly snippet to the otherwise ununsed ILRAM */
+	memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
+	wmb();
+	ctrl_barrier();
+
 	suspend_set_ops(&sh_pm_ops);
 	suspend_set_ops(&sh_pm_ops);
+	sh_mobile_setup_cpuidle();
 	return 0;
 	return 0;
 }
 }
 
 

+ 2 - 0
arch/sh/kernel/time.c

@@ -21,6 +21,7 @@
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/rtc.h>
 #include <linux/rtc.h>
 #include <asm/clock.h>
 #include <asm/clock.h>
+#include <asm/hwblk.h>
 #include <asm/rtc.h>
 #include <asm/rtc.h>
 
 
 /* Dummy RTC ops */
 /* Dummy RTC ops */
@@ -96,6 +97,7 @@ void __init time_init(void)
 	if (board_time_init)
 	if (board_time_init)
 		board_time_init();
 		board_time_init();
 
 
+	hwblk_init();
 	clk_init();
 	clk_init();
 
 
 	rtc_sh_get_time(&xtime);
 	rtc_sh_get_time(&xtime);

+ 2 - 13
arch/x86/Kconfig

@@ -1913,25 +1913,14 @@ config DMAR_DEFAULT_ON
 	  recommended you say N here while the DMAR code remains
 	  recommended you say N here while the DMAR code remains
 	  experimental.
 	  experimental.
 
 
-config DMAR_GFX_WA
-	def_bool y
-	prompt "Support for Graphics workaround"
-	depends on DMAR
-	---help---
-	  Current Graphics drivers tend to use physical address
-	  for DMA and avoid using DMA APIs. Setting this config
-	  option permits the IOMMU driver to set a unity map for
-	  all the OS-visible memory. Hence the driver can continue
-	  to use physical addresses for DMA.
-
 config DMAR_FLOPPY_WA
 config DMAR_FLOPPY_WA
 	def_bool y
 	def_bool y
 	depends on DMAR
 	depends on DMAR
 	---help---
 	---help---
-	  Floppy disk drivers are know to bypass DMA API calls
+	  Floppy disk drivers are known to bypass DMA API calls
 	  thereby failing to work when IOMMU is enabled. This
 	  thereby failing to work when IOMMU is enabled. This
 	  workaround will setup a 1:1 mapping for the first
 	  workaround will setup a 1:1 mapping for the first
-	  16M to make floppy (an ISA device) work.
+	  16MiB to make floppy (an ISA device) work.
 
 
 config INTR_REMAP
 config INTR_REMAP
 	bool "Support for Interrupt Remapping (EXPERIMENTAL)"
 	bool "Support for Interrupt Remapping (EXPERIMENTAL)"

+ 1 - 1
arch/x86/kernel/pci-dma.c

@@ -211,11 +211,11 @@ static __init int iommu_setup(char *p)
 #ifdef CONFIG_SWIOTLB
 #ifdef CONFIG_SWIOTLB
 		if (!strncmp(p, "soft", 4))
 		if (!strncmp(p, "soft", 4))
 			swiotlb = 1;
 			swiotlb = 1;
+#endif
 		if (!strncmp(p, "pt", 2)) {
 		if (!strncmp(p, "pt", 2)) {
 			iommu_pass_through = 1;
 			iommu_pass_through = 1;
 			return 1;
 			return 1;
 		}
 		}
-#endif
 
 
 		gart_parse_options(p);
 		gart_parse_options(p);
 
 

File diff suppressed because it is too large
+ 323 - 302
drivers/pci/intel-iommu.c


+ 4 - 0
drivers/scsi/cxgb3i/cxgb3i_iscsi.c

@@ -13,6 +13,7 @@
 
 
 #include <linux/inet.h>
 #include <linux/inet.h>
 #include <linux/crypto.h>
 #include <linux/crypto.h>
+#include <linux/if_vlan.h>
 #include <net/dst.h>
 #include <net/dst.h>
 #include <net/tcp.h>
 #include <net/tcp.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_cmnd.h>
@@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
 	struct cxgb3i_adapter *snic;
 	struct cxgb3i_adapter *snic;
 	int i;
 	int i;
 
 
+	if (ndev->priv_flags & IFF_802_1Q_VLAN)
+		ndev = vlan_dev_real_dev(ndev);
+
 	read_lock(&cxgb3i_snic_rwlock);
 	read_lock(&cxgb3i_snic_rwlock);
 	list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
 	list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
 		for (i = 0; i < snic->hba_cnt; i++) {
 		for (i = 0; i < snic->hba_cnt; i++) {

+ 4 - 4
drivers/scsi/fnic/fnic_main.c

@@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 	 * limitation for the device.  Try 40-bit first, and
 	 * limitation for the device.  Try 40-bit first, and
 	 * fail to 32-bit.
 	 * fail to 32-bit.
 	 */
 	 */
-	err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
 	if (err) {
 	if (err) {
-		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
 		if (err) {
 			shost_printk(KERN_ERR, fnic->lport->host,
 			shost_printk(KERN_ERR, fnic->lport->host,
 				     "No usable DMA configuration "
 				     "No usable DMA configuration "
 				     "aborting\n");
 				     "aborting\n");
 			goto err_out_release_regions;
 			goto err_out_release_regions;
 		}
 		}
-		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
 		if (err) {
 			shost_printk(KERN_ERR, fnic->lport->host,
 			shost_printk(KERN_ERR, fnic->lport->host,
 				     "Unable to obtain 32-bit DMA "
 				     "Unable to obtain 32-bit DMA "
@@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
 			goto err_out_release_regions;
 			goto err_out_release_regions;
 		}
 		}
 	} else {
 	} else {
-		err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
 		if (err) {
 		if (err) {
 			shost_printk(KERN_ERR, fnic->lport->host,
 			shost_printk(KERN_ERR, fnic->lport->host,
 				     "Unable to obtain 40-bit DMA "
 				     "Unable to obtain 40-bit DMA "

+ 2 - 5
drivers/scsi/fnic/fnic_scsi.c

@@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 					  struct vnic_wq_copy *wq,
 					  struct vnic_wq_copy *wq,
 					  struct fnic_io_req *io_req,
 					  struct fnic_io_req *io_req,
 					  struct scsi_cmnd *sc,
 					  struct scsi_cmnd *sc,
-					  u32 sg_count)
+					  int sg_count)
 {
 {
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
@@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 	char msg[2];
 	char msg[2];
 
 
 	if (sg_count) {
 	if (sg_count) {
-		BUG_ON(sg_count < 0);
-		BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
-
 		/* For each SGE, create a device desc entry */
 		/* For each SGE, create a device desc entry */
 		desc = io_req->sgl_list;
 		desc = io_req->sgl_list;
 		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
 		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
@@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 	struct fnic *fnic;
 	struct fnic *fnic;
 	struct vnic_wq_copy *wq;
 	struct vnic_wq_copy *wq;
 	int ret;
 	int ret;
-	u32 sg_count;
+	int sg_count;
 	unsigned long flags;
 	unsigned long flags;
 	unsigned long ptr;
 	unsigned long ptr;
 
 

+ 6 - 1
drivers/scsi/ibmvscsi/ibmvscsi.c

@@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
 				MAX_INDIRECT_BUFS);
 				MAX_INDIRECT_BUFS);
 			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
 			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
 		}
 		}
+
+		if (hostdata->madapter_info.os_type == 3) {
+			enable_fast_fail(hostdata);
+			return;
+		}
 	}
 	}
 
 
-	enable_fast_fail(hostdata);
+	send_srp_login(hostdata);
 }
 }
 
 
 /**
 /**

+ 3 - 2
drivers/scsi/scsi_transport_fc.c

@@ -3670,13 +3670,14 @@ static void
 fc_bsg_goose_queue(struct fc_rport *rport)
 fc_bsg_goose_queue(struct fc_rport *rport)
 {
 {
 	int flagset;
 	int flagset;
+	unsigned long flags;
 
 
 	if (!rport->rqst_q)
 	if (!rport->rqst_q)
 		return;
 		return;
 
 
 	get_device(&rport->dev);
 	get_device(&rport->dev);
 
 
-	spin_lock(rport->rqst_q->queue_lock);
+	spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
 	flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
 	flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
 		  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
 		  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
 	if (flagset)
 	if (flagset)
@@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
 	__blk_run_queue(rport->rqst_q);
 	__blk_run_queue(rport->rqst_q);
 	if (flagset)
 	if (flagset)
 		queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
 		queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
-	spin_unlock(rport->rqst_q->queue_lock);
+	spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
 
 
 	put_device(&rport->dev);
 	put_device(&rport->dev);
 }
 }

+ 1 - 1
drivers/scsi/zalon.c

@@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev)
 
 
 	host = ncr_attach(&zalon7xx_template, unit, &device);
 	host = ncr_attach(&zalon7xx_template, unit, &device);
 	if (!host)
 	if (!host)
-		goto fail;
+		return -ENODEV;
 
 
 	if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
 	if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
 	  dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
 	  dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",

+ 4 - 3
drivers/serial/sh-sci.c

@@ -707,12 +707,13 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
 
 
 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
 {
 {
-	unsigned short ssr_status, scr_status;
+	unsigned short ssr_status, scr_status, err_enabled;
 	struct uart_port *port = ptr;
 	struct uart_port *port = ptr;
 	irqreturn_t ret = IRQ_NONE;
 	irqreturn_t ret = IRQ_NONE;
 
 
 	ssr_status = sci_in(port, SCxSR);
 	ssr_status = sci_in(port, SCxSR);
 	scr_status = sci_in(port, SCSCR);
 	scr_status = sci_in(port, SCSCR);
+	err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
 
 
 	/* Tx Interrupt */
 	/* Tx Interrupt */
 	if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE))
 	if ((ssr_status & 0x0020) && (scr_status & SCI_CTRL_FLAGS_TIE))
@@ -721,10 +722,10 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
 	if ((ssr_status & 0x0002) && (scr_status & SCI_CTRL_FLAGS_RIE))
 	if ((ssr_status & 0x0002) && (scr_status & SCI_CTRL_FLAGS_RIE))
 		ret = sci_rx_interrupt(irq, ptr);
 		ret = sci_rx_interrupt(irq, ptr);
 	/* Error Interrupt */
 	/* Error Interrupt */
-	if ((ssr_status & 0x0080) && (scr_status & SCI_CTRL_FLAGS_REIE))
+	if ((ssr_status & 0x0080) && err_enabled)
 		ret = sci_er_interrupt(irq, ptr);
 		ret = sci_er_interrupt(irq, ptr);
 	/* Break Interrupt */
 	/* Break Interrupt */
-	if ((ssr_status & 0x0010) && (scr_status & SCI_CTRL_FLAGS_REIE))
+	if ((ssr_status & 0x0010) && err_enabled)
 		ret = sci_br_interrupt(irq, ptr);
 		ret = sci_br_interrupt(irq, ptr);
 
 
 	return ret;
 	return ret;

+ 2 - 2
drivers/usb/host/Kconfig

@@ -337,10 +337,10 @@ config USB_R8A66597_HCD
 
 
 config SUPERH_ON_CHIP_R8A66597
 config SUPERH_ON_CHIP_R8A66597
 	boolean "Enable SuperH on-chip R8A66597 USB"
 	boolean "Enable SuperH on-chip R8A66597 USB"
-	depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723)
+	depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724)
 	help
 	help
 	   This driver enables support for the on-chip R8A66597 in the
 	   This driver enables support for the on-chip R8A66597 in the
-	   SH7366 and SH7723 processors.
+	   SH7366, SH7723 and SH7724 processors.
 
 
 config USB_WHCI_HCD
 config USB_WHCI_HCD
 	tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
 	tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"

+ 1 - 1
fs/btrfs/async-thread.c

@@ -299,8 +299,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
 					   "btrfs-%s-%d", workers->name,
 					   "btrfs-%s-%d", workers->name,
 					   workers->num_workers + i);
 					   workers->num_workers + i);
 		if (IS_ERR(worker->task)) {
 		if (IS_ERR(worker->task)) {
-			kfree(worker);
 			ret = PTR_ERR(worker->task);
 			ret = PTR_ERR(worker->task);
+			kfree(worker);
 			goto fail;
 			goto fail;
 		}
 		}
 
 

+ 1 - 2
fs/btrfs/ctree.h

@@ -2074,8 +2074,7 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
-			*root);
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref);
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 			struct btrfs_root *root,
 			struct btrfs_root *root,
 			struct extent_buffer *node,
 			struct extent_buffer *node,

+ 391 - 175
fs/btrfs/extent-tree.c

@@ -990,15 +990,13 @@ static inline int extent_ref_type(u64 parent, u64 owner)
 	return type;
 	return type;
 }
 }
 
 
-static int find_next_key(struct btrfs_path *path, struct btrfs_key *key)
+static int find_next_key(struct btrfs_path *path, int level,
+			 struct btrfs_key *key)
 
 
 {
 {
-	int level;
-	BUG_ON(!path->keep_locks);
-	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+	for (; level < BTRFS_MAX_LEVEL; level++) {
 		if (!path->nodes[level])
 		if (!path->nodes[level])
 			break;
 			break;
-		btrfs_assert_tree_locked(path->nodes[level]);
 		if (path->slots[level] + 1 >=
 		if (path->slots[level] + 1 >=
 		    btrfs_header_nritems(path->nodes[level]))
 		    btrfs_header_nritems(path->nodes[level]))
 			continue;
 			continue;
@@ -1158,7 +1156,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
 		 * For simplicity, we just do not add new inline back
 		 * For simplicity, we just do not add new inline back
 		 * ref if there is any kind of item for this block
 		 * ref if there is any kind of item for this block
 		 */
 		 */
-		if (find_next_key(path, &key) == 0 && key.objectid == bytenr &&
+		if (find_next_key(path, 0, &key) == 0 &&
+		    key.objectid == bytenr &&
 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
 			err = -EAGAIN;
 			err = -EAGAIN;
 			goto out;
 			goto out;
@@ -2697,7 +2696,7 @@ again:
 
 
 		printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
 		printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
 		       ", %llu bytes_used, %llu bytes_reserved, "
 		       ", %llu bytes_used, %llu bytes_reserved, "
-		       "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
+		       "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
 		       "%llu total\n", (unsigned long long)bytes,
 		       "%llu total\n", (unsigned long long)bytes,
 		       (unsigned long long)data_sinfo->bytes_delalloc,
 		       (unsigned long long)data_sinfo->bytes_delalloc,
 		       (unsigned long long)data_sinfo->bytes_used,
 		       (unsigned long long)data_sinfo->bytes_used,
@@ -4128,6 +4127,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
 	return buf;
 	return buf;
 }
 }
 
 
+#if 0
 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
 			struct btrfs_root *root, struct extent_buffer *leaf)
 			struct btrfs_root *root, struct extent_buffer *leaf)
 {
 {
@@ -4171,8 +4171,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
 	return 0;
 	return 0;
 }
 }
 
 
-#if 0
-
 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
 					struct btrfs_root *root,
 					struct btrfs_root *root,
 					struct btrfs_leaf_ref *ref)
 					struct btrfs_leaf_ref *ref)
@@ -4553,262 +4551,471 @@ out:
 }
 }
 #endif
 #endif
 
 
+struct walk_control {
+	u64 refs[BTRFS_MAX_LEVEL];
+	u64 flags[BTRFS_MAX_LEVEL];
+	struct btrfs_key update_progress;
+	int stage;
+	int level;
+	int shared_level;
+	int update_ref;
+	int keep_locks;
+};
+
+#define DROP_REFERENCE	1
+#define UPDATE_BACKREF	2
+
 /*
 /*
- * helper function for drop_subtree, this function is similar to
- * walk_down_tree. The main difference is that it checks reference
- * counts while tree blocks are locked.
+ * hepler to process tree block while walking down the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function checks
+ * reference count of the block. if the block is shared and
+ * we need update back refs for the subtree rooted at the
+ * block, this function changes wc->stage to UPDATE_BACKREF
+ *
+ * when wc->stage == UPDATE_BACKREF, this function updates
+ * back refs for pointers in the block.
+ *
+ * NOTE: return value 1 means we should stop walking down.
  */
  */
-static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
 				   struct btrfs_root *root,
 				   struct btrfs_root *root,
-				   struct btrfs_path *path, int *level)
+				   struct btrfs_path *path,
+				   struct walk_control *wc)
 {
 {
-	struct extent_buffer *next;
-	struct extent_buffer *cur;
-	struct extent_buffer *parent;
-	u64 bytenr;
-	u64 ptr_gen;
-	u64 refs;
-	u64 flags;
-	u32 blocksize;
+	int level = wc->level;
+	struct extent_buffer *eb = path->nodes[level];
+	struct btrfs_key key;
+	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
 	int ret;
 	int ret;
 
 
-	cur = path->nodes[*level];
-	ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len,
-				       &refs, &flags);
-	BUG_ON(ret);
-	if (refs > 1)
-		goto out;
+	if (wc->stage == UPDATE_BACKREF &&
+	    btrfs_header_owner(eb) != root->root_key.objectid)
+		return 1;
 
 
-	BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+	/*
+	 * when reference count of tree block is 1, it won't increase
+	 * again. once full backref flag is set, we never clear it.
+	 */
+	if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+	    (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
+		BUG_ON(!path->locks[level]);
+		ret = btrfs_lookup_extent_info(trans, root,
+					       eb->start, eb->len,
+					       &wc->refs[level],
+					       &wc->flags[level]);
+		BUG_ON(ret);
+		BUG_ON(wc->refs[level] == 0);
+	}
 
 
-	while (*level >= 0) {
-		cur = path->nodes[*level];
-		if (*level == 0) {
-			ret = btrfs_drop_leaf_ref(trans, root, cur);
-			BUG_ON(ret);
-			clean_tree_block(trans, root, cur);
-			break;
-		}
-		if (path->slots[*level] >= btrfs_header_nritems(cur)) {
-			clean_tree_block(trans, root, cur);
-			break;
+	if (wc->stage == DROP_REFERENCE &&
+	    wc->update_ref && wc->refs[level] > 1) {
+		BUG_ON(eb == root->node);
+		BUG_ON(path->slots[level] > 0);
+		if (level == 0)
+			btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
+		else
+			btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
+		if (btrfs_header_owner(eb) == root->root_key.objectid &&
+		    btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
+			wc->stage = UPDATE_BACKREF;
+			wc->shared_level = level;
 		}
 		}
+	}
 
 
-		bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
-		blocksize = btrfs_level_size(root, *level - 1);
-		ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
+	if (wc->stage == DROP_REFERENCE) {
+		if (wc->refs[level] > 1)
+			return 1;
 
 
-		next = read_tree_block(root, bytenr, blocksize, ptr_gen);
-		btrfs_tree_lock(next);
-		btrfs_set_lock_blocking(next);
+		if (path->locks[level] && !wc->keep_locks) {
+			btrfs_tree_unlock(eb);
+			path->locks[level] = 0;
+		}
+		return 0;
+	}
 
 
-		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
-					       &refs, &flags);
+	/* wc->stage == UPDATE_BACKREF */
+	if (!(wc->flags[level] & flag)) {
+		BUG_ON(!path->locks[level]);
+		ret = btrfs_inc_ref(trans, root, eb, 1);
 		BUG_ON(ret);
 		BUG_ON(ret);
-		if (refs > 1) {
-			parent = path->nodes[*level];
-			ret = btrfs_free_extent(trans, root, bytenr,
-						blocksize, parent->start,
-						btrfs_header_owner(parent),
-						*level - 1, 0);
+		ret = btrfs_dec_ref(trans, root, eb, 0);
+		BUG_ON(ret);
+		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
+						  eb->len, flag, 0);
+		BUG_ON(ret);
+		wc->flags[level] |= flag;
+	}
+
+	/*
+	 * the block is shared by multiple trees, so it's not good to
+	 * keep the tree lock
+	 */
+	if (path->locks[level] && level > 0) {
+		btrfs_tree_unlock(eb);
+		path->locks[level] = 0;
+	}
+	return 0;
+}
+
+/*
+ * hepler to process tree block while walking up the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function drops
+ * reference count on the block.
+ *
+ * when wc->stage == UPDATE_BACKREF, this function changes
+ * wc->stage back to DROP_REFERENCE if we changed wc->stage
+ * to UPDATE_BACKREF previously while processing the block.
+ *
+ * NOTE: return value 1 means we should stop walking up.
+ */
+static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct btrfs_path *path,
+				 struct walk_control *wc)
+{
+	int ret = 0;
+	int level = wc->level;
+	struct extent_buffer *eb = path->nodes[level];
+	u64 parent = 0;
+
+	if (wc->stage == UPDATE_BACKREF) {
+		BUG_ON(wc->shared_level < level);
+		if (level < wc->shared_level)
+			goto out;
+
+		BUG_ON(wc->refs[level] <= 1);
+		ret = find_next_key(path, level + 1, &wc->update_progress);
+		if (ret > 0)
+			wc->update_ref = 0;
+
+		wc->stage = DROP_REFERENCE;
+		wc->shared_level = -1;
+		path->slots[level] = 0;
+
+		/*
+		 * check reference count again if the block isn't locked.
+		 * we should start walking down the tree again if reference
+		 * count is one.
+		 */
+		if (!path->locks[level]) {
+			BUG_ON(level == 0);
+			btrfs_tree_lock(eb);
+			btrfs_set_lock_blocking(eb);
+			path->locks[level] = 1;
+
+			ret = btrfs_lookup_extent_info(trans, root,
+						       eb->start, eb->len,
+						       &wc->refs[level],
+						       &wc->flags[level]);
 			BUG_ON(ret);
 			BUG_ON(ret);
-			path->slots[*level]++;
-			btrfs_tree_unlock(next);
-			free_extent_buffer(next);
-			continue;
+			BUG_ON(wc->refs[level] == 0);
+			if (wc->refs[level] == 1) {
+				btrfs_tree_unlock(eb);
+				path->locks[level] = 0;
+				return 1;
+			}
+		} else {
+			BUG_ON(level != 0);
 		}
 		}
+	}
 
 
-		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+	/* wc->stage == DROP_REFERENCE */
+	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
 
 
-		*level = btrfs_header_level(next);
-		path->nodes[*level] = next;
-		path->slots[*level] = 0;
-		path->locks[*level] = 1;
-		cond_resched();
+	if (wc->refs[level] == 1) {
+		if (level == 0) {
+			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+				ret = btrfs_dec_ref(trans, root, eb, 1);
+			else
+				ret = btrfs_dec_ref(trans, root, eb, 0);
+			BUG_ON(ret);
+		}
+		/* make block locked assertion in clean_tree_block happy */
+		if (!path->locks[level] &&
+		    btrfs_header_generation(eb) == trans->transid) {
+			btrfs_tree_lock(eb);
+			btrfs_set_lock_blocking(eb);
+			path->locks[level] = 1;
+		}
+		clean_tree_block(trans, root, eb);
+	}
+
+	if (eb == root->node) {
+		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+			parent = eb->start;
+		else
+			BUG_ON(root->root_key.objectid !=
+			       btrfs_header_owner(eb));
+	} else {
+		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+			parent = path->nodes[level + 1]->start;
+		else
+			BUG_ON(root->root_key.objectid !=
+			       btrfs_header_owner(path->nodes[level + 1]));
 	}
 	}
-out:
-	if (path->nodes[*level] == root->node)
-		parent = path->nodes[*level];
-	else
-		parent = path->nodes[*level + 1];
-	bytenr = path->nodes[*level]->start;
-	blocksize = path->nodes[*level]->len;
 
 
-	ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start,
-				btrfs_header_owner(parent), *level, 0);
+	ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
+				root->root_key.objectid, level, 0);
 	BUG_ON(ret);
 	BUG_ON(ret);
+out:
+	wc->refs[level] = 0;
+	wc->flags[level] = 0;
+	return ret;
+}
+
+static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root,
+				   struct btrfs_path *path,
+				   struct walk_control *wc)
+{
+	struct extent_buffer *next;
+	struct extent_buffer *cur;
+	u64 bytenr;
+	u64 ptr_gen;
+	u32 blocksize;
+	int level = wc->level;
+	int ret;
+
+	while (level >= 0) {
+		cur = path->nodes[level];
+		BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
 
 
-	if (path->locks[*level]) {
-		btrfs_tree_unlock(path->nodes[*level]);
-		path->locks[*level] = 0;
+		ret = walk_down_proc(trans, root, path, wc);
+		if (ret > 0)
+			break;
+
+		if (level == 0)
+			break;
+
+		bytenr = btrfs_node_blockptr(cur, path->slots[level]);
+		blocksize = btrfs_level_size(root, level - 1);
+		ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
+
+		next = read_tree_block(root, bytenr, blocksize, ptr_gen);
+		btrfs_tree_lock(next);
+		btrfs_set_lock_blocking(next);
+
+		level--;
+		BUG_ON(level != btrfs_header_level(next));
+		path->nodes[level] = next;
+		path->slots[level] = 0;
+		path->locks[level] = 1;
+		wc->level = level;
 	}
 	}
-	free_extent_buffer(path->nodes[*level]);
-	path->nodes[*level] = NULL;
-	*level += 1;
-	cond_resched();
 	return 0;
 	return 0;
 }
 }
 
 
-/*
- * helper for dropping snapshots.  This walks back up the tree in the path
- * to find the first node higher up where we haven't yet gone through
- * all the slots
- */
 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
 				 struct btrfs_root *root,
 				 struct btrfs_root *root,
 				 struct btrfs_path *path,
 				 struct btrfs_path *path,
-				 int *level, int max_level)
+				 struct walk_control *wc, int max_level)
 {
 {
-	struct btrfs_root_item *root_item = &root->root_item;
-	int i;
-	int slot;
+	int level = wc->level;
 	int ret;
 	int ret;
 
 
-	for (i = *level; i < max_level && path->nodes[i]; i++) {
-		slot = path->slots[i];
-		if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
-			/*
-			 * there is more work to do in this level.
-			 * Update the drop_progress marker to reflect
-			 * the work we've done so far, and then bump
-			 * the slot number
-			 */
-			path->slots[i]++;
-			WARN_ON(*level == 0);
-			if (max_level == BTRFS_MAX_LEVEL) {
-				btrfs_node_key(path->nodes[i],
-					       &root_item->drop_progress,
-					       path->slots[i]);
-				root_item->drop_level = i;
-			}
-			*level = i;
+	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
+	while (level < max_level && path->nodes[level]) {
+		wc->level = level;
+		if (path->slots[level] + 1 <
+		    btrfs_header_nritems(path->nodes[level])) {
+			path->slots[level]++;
 			return 0;
 			return 0;
 		} else {
 		} else {
-			struct extent_buffer *parent;
-
-			/*
-			 * this whole node is done, free our reference
-			 * on it and go up one level
-			 */
-			if (path->nodes[*level] == root->node)
-				parent = path->nodes[*level];
-			else
-				parent = path->nodes[*level + 1];
+			ret = walk_up_proc(trans, root, path, wc);
+			if (ret > 0)
+				return 0;
 
 
-			clean_tree_block(trans, root, path->nodes[i]);
-			ret = btrfs_free_extent(trans, root,
-						path->nodes[i]->start,
-						path->nodes[i]->len,
-						parent->start,
-						btrfs_header_owner(parent),
-						*level, 0);
-			BUG_ON(ret);
-			if (path->locks[*level]) {
-				btrfs_tree_unlock(path->nodes[i]);
-				path->locks[i] = 0;
+			if (path->locks[level]) {
+				btrfs_tree_unlock(path->nodes[level]);
+				path->locks[level] = 0;
 			}
 			}
-			free_extent_buffer(path->nodes[i]);
-			path->nodes[i] = NULL;
-			*level = i + 1;
+			free_extent_buffer(path->nodes[level]);
+			path->nodes[level] = NULL;
+			level++;
 		}
 		}
 	}
 	}
 	return 1;
 	return 1;
 }
 }
 
 
 /*
 /*
- * drop the reference count on the tree rooted at 'snap'.  This traverses
- * the tree freeing any blocks that have a ref count of zero after being
- * decremented.
+ * drop a subvolume tree.
+ *
+ * this function traverses the tree freeing any blocks that only
+ * referenced by the tree.
+ *
+ * when a shared tree block is found. this function decreases its
+ * reference count by one. if update_ref is true, this function
+ * also make sure backrefs for the shared block and all lower level
+ * blocks are properly updated.
  */
  */
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
-			*root)
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
 {
 {
-	int ret = 0;
-	int wret;
-	int level;
 	struct btrfs_path *path;
 	struct btrfs_path *path;
-	int update_count;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *tree_root = root->fs_info->tree_root;
 	struct btrfs_root_item *root_item = &root->root_item;
 	struct btrfs_root_item *root_item = &root->root_item;
+	struct walk_control *wc;
+	struct btrfs_key key;
+	int err = 0;
+	int ret;
+	int level;
 
 
 	path = btrfs_alloc_path();
 	path = btrfs_alloc_path();
 	BUG_ON(!path);
 	BUG_ON(!path);
 
 
-	level = btrfs_header_level(root->node);
+	wc = kzalloc(sizeof(*wc), GFP_NOFS);
+	BUG_ON(!wc);
+
+	trans = btrfs_start_transaction(tree_root, 1);
+
 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
+		level = btrfs_header_level(root->node);
 		path->nodes[level] = btrfs_lock_root_node(root);
 		path->nodes[level] = btrfs_lock_root_node(root);
 		btrfs_set_lock_blocking(path->nodes[level]);
 		btrfs_set_lock_blocking(path->nodes[level]);
 		path->slots[level] = 0;
 		path->slots[level] = 0;
 		path->locks[level] = 1;
 		path->locks[level] = 1;
+		memset(&wc->update_progress, 0,
+		       sizeof(wc->update_progress));
 	} else {
 	} else {
-		struct btrfs_key key;
-		struct btrfs_disk_key found_key;
-		struct extent_buffer *node;
-
 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
+		memcpy(&wc->update_progress, &key,
+		       sizeof(wc->update_progress));
+
 		level = root_item->drop_level;
 		level = root_item->drop_level;
+		BUG_ON(level == 0);
 		path->lowest_level = level;
 		path->lowest_level = level;
-		wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-		if (wret < 0) {
-			ret = wret;
+		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+		path->lowest_level = 0;
+		if (ret < 0) {
+			err = ret;
 			goto out;
 			goto out;
 		}
 		}
-		node = path->nodes[level];
-		btrfs_node_key(node, &found_key, path->slots[level]);
-		WARN_ON(memcmp(&found_key, &root_item->drop_progress,
-			       sizeof(found_key)));
+		btrfs_node_key_to_cpu(path->nodes[level], &key,
+				      path->slots[level]);
+		WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
+
 		/*
 		/*
 		 * unlock our path, this is safe because only this
 		 * unlock our path, this is safe because only this
 		 * function is allowed to delete this snapshot
 		 * function is allowed to delete this snapshot
 		 */
 		 */
 		btrfs_unlock_up_safe(path, 0);
 		btrfs_unlock_up_safe(path, 0);
+
+		level = btrfs_header_level(root->node);
+		while (1) {
+			btrfs_tree_lock(path->nodes[level]);
+			btrfs_set_lock_blocking(path->nodes[level]);
+
+			ret = btrfs_lookup_extent_info(trans, root,
+						path->nodes[level]->start,
+						path->nodes[level]->len,
+						&wc->refs[level],
+						&wc->flags[level]);
+			BUG_ON(ret);
+			BUG_ON(wc->refs[level] == 0);
+
+			if (level == root_item->drop_level)
+				break;
+
+			btrfs_tree_unlock(path->nodes[level]);
+			WARN_ON(wc->refs[level] != 1);
+			level--;
+		}
 	}
 	}
+
+	wc->level = level;
+	wc->shared_level = -1;
+	wc->stage = DROP_REFERENCE;
+	wc->update_ref = update_ref;
+	wc->keep_locks = 0;
+
 	while (1) {
 	while (1) {
-		unsigned long update;
-		wret = walk_down_tree(trans, root, path, &level);
-		if (wret > 0)
+		ret = walk_down_tree(trans, root, path, wc);
+		if (ret < 0) {
+			err = ret;
 			break;
 			break;
-		if (wret < 0)
-			ret = wret;
+		}
 
 
-		wret = walk_up_tree(trans, root, path, &level,
-				    BTRFS_MAX_LEVEL);
-		if (wret > 0)
+		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
+		if (ret < 0) {
+			err = ret;
 			break;
 			break;
-		if (wret < 0)
-			ret = wret;
-		if (trans->transaction->in_commit ||
-		    trans->transaction->delayed_refs.flushing) {
-			ret = -EAGAIN;
+		}
+
+		if (ret > 0) {
+			BUG_ON(wc->stage != DROP_REFERENCE);
 			break;
 			break;
 		}
 		}
-		for (update_count = 0; update_count < 16; update_count++) {
+
+		if (wc->stage == DROP_REFERENCE) {
+			level = wc->level;
+			btrfs_node_key(path->nodes[level],
+				       &root_item->drop_progress,
+				       path->slots[level]);
+			root_item->drop_level = level;
+		}
+
+		BUG_ON(wc->level == 0);
+		if (trans->transaction->in_commit ||
+		    trans->transaction->delayed_refs.flushing) {
+			ret = btrfs_update_root(trans, tree_root,
+						&root->root_key,
+						root_item);
+			BUG_ON(ret);
+
+			btrfs_end_transaction(trans, tree_root);
+			trans = btrfs_start_transaction(tree_root, 1);
+		} else {
+			unsigned long update;
 			update = trans->delayed_ref_updates;
 			update = trans->delayed_ref_updates;
 			trans->delayed_ref_updates = 0;
 			trans->delayed_ref_updates = 0;
 			if (update)
 			if (update)
-				btrfs_run_delayed_refs(trans, root, update);
-			else
-				break;
+				btrfs_run_delayed_refs(trans, tree_root,
+						       update);
 		}
 		}
 	}
 	}
+	btrfs_release_path(root, path);
+	BUG_ON(err);
+
+	ret = btrfs_del_root(trans, tree_root, &root->root_key);
+	BUG_ON(ret);
+
+	free_extent_buffer(root->node);
+	free_extent_buffer(root->commit_root);
+	kfree(root);
 out:
 out:
+	btrfs_end_transaction(trans, tree_root);
+	kfree(wc);
 	btrfs_free_path(path);
 	btrfs_free_path(path);
-	return ret;
+	return err;
 }
 }
 
 
+/*
+ * drop subtree rooted at tree block 'node'.
+ *
+ * NOTE: this function will unlock and release tree block 'node'
+ */
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 			struct btrfs_root *root,
 			struct btrfs_root *root,
 			struct extent_buffer *node,
 			struct extent_buffer *node,
 			struct extent_buffer *parent)
 			struct extent_buffer *parent)
 {
 {
 	struct btrfs_path *path;
 	struct btrfs_path *path;
+	struct walk_control *wc;
 	int level;
 	int level;
 	int parent_level;
 	int parent_level;
 	int ret = 0;
 	int ret = 0;
 	int wret;
 	int wret;
 
 
+	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+
 	path = btrfs_alloc_path();
 	path = btrfs_alloc_path();
 	BUG_ON(!path);
 	BUG_ON(!path);
 
 
+	wc = kzalloc(sizeof(*wc), GFP_NOFS);
+	BUG_ON(!wc);
+
 	btrfs_assert_tree_locked(parent);
 	btrfs_assert_tree_locked(parent);
 	parent_level = btrfs_header_level(parent);
 	parent_level = btrfs_header_level(parent);
 	extent_buffer_get(parent);
 	extent_buffer_get(parent);
@@ -4817,24 +5024,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 
 
 	btrfs_assert_tree_locked(node);
 	btrfs_assert_tree_locked(node);
 	level = btrfs_header_level(node);
 	level = btrfs_header_level(node);
-	extent_buffer_get(node);
 	path->nodes[level] = node;
 	path->nodes[level] = node;
 	path->slots[level] = 0;
 	path->slots[level] = 0;
+	path->locks[level] = 1;
+
+	wc->refs[parent_level] = 1;
+	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
+	wc->level = level;
+	wc->shared_level = -1;
+	wc->stage = DROP_REFERENCE;
+	wc->update_ref = 0;
+	wc->keep_locks = 1;
 
 
 	while (1) {
 	while (1) {
-		wret = walk_down_tree(trans, root, path, &level);
-		if (wret < 0)
+		wret = walk_down_tree(trans, root, path, wc);
+		if (wret < 0) {
 			ret = wret;
 			ret = wret;
-		if (wret != 0)
 			break;
 			break;
+		}
 
 
-		wret = walk_up_tree(trans, root, path, &level, parent_level);
+		wret = walk_up_tree(trans, root, path, wc, parent_level);
 		if (wret < 0)
 		if (wret < 0)
 			ret = wret;
 			ret = wret;
 		if (wret != 0)
 		if (wret != 0)
 			break;
 			break;
 	}
 	}
 
 
+	kfree(wc);
 	btrfs_free_path(path);
 	btrfs_free_path(path);
 	return ret;
 	return ret;
 }
 }

+ 4 - 1
fs/btrfs/file.c

@@ -151,7 +151,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
 	}
 	}
 	if (end_pos > isize) {
 	if (end_pos > isize) {
 		i_size_write(inode, end_pos);
 		i_size_write(inode, end_pos);
-		btrfs_update_inode(trans, root, inode);
+		/* we've only changed i_size in ram, and we haven't updated
+		 * the disk i_size.  There is no need to log the inode
+		 * at this time.
+		 */
 	}
 	}
 	err = btrfs_end_transaction(trans, root);
 	err = btrfs_end_transaction(trans, root);
 out_unlock:
 out_unlock:

+ 18 - 7
fs/btrfs/inode.c

@@ -3580,12 +3580,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 		owner = 1;
 		owner = 1;
 	BTRFS_I(inode)->block_group =
 	BTRFS_I(inode)->block_group =
 			btrfs_find_block_group(root, 0, alloc_hint, owner);
 			btrfs_find_block_group(root, 0, alloc_hint, owner);
-	if ((mode & S_IFREG)) {
-		if (btrfs_test_opt(root, NODATASUM))
-			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
-		if (btrfs_test_opt(root, NODATACOW))
-			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
-	}
 
 
 	key[0].objectid = objectid;
 	key[0].objectid = objectid;
 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -3640,6 +3634,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 
 
 	btrfs_inherit_iflags(inode, dir);
 	btrfs_inherit_iflags(inode, dir);
 
 
+	if ((mode & S_IFREG)) {
+		if (btrfs_test_opt(root, NODATASUM))
+			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
+		if (btrfs_test_opt(root, NODATACOW))
+			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+	}
+
 	insert_inode_hash(inode);
 	insert_inode_hash(inode);
 	inode_tree_add(inode);
 	inode_tree_add(inode);
 	return inode;
 	return inode;
@@ -5082,6 +5083,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
 	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
 	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
 	struct extent_map *em;
 	struct extent_map *em;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root;
 	int ret;
 	int ret;
 
 
 	alloc_start = offset & ~mask;
 	alloc_start = offset & ~mask;
@@ -5100,6 +5102,13 @@ static long btrfs_fallocate(struct inode *inode, int mode,
 			goto out;
 			goto out;
 	}
 	}
 
 
+	root = BTRFS_I(inode)->root;
+
+	ret = btrfs_check_data_free_space(root, inode,
+					  alloc_end - alloc_start);
+	if (ret)
+		goto out;
+
 	locked_end = alloc_end - 1;
 	locked_end = alloc_end - 1;
 	while (1) {
 	while (1) {
 		struct btrfs_ordered_extent *ordered;
 		struct btrfs_ordered_extent *ordered;
@@ -5107,7 +5116,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
 		trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
 		trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
 		if (!trans) {
 		if (!trans) {
 			ret = -EIO;
 			ret = -EIO;
-			goto out;
+			goto out_free;
 		}
 		}
 
 
 		/* the extent lock is ordered inside the running
 		/* the extent lock is ordered inside the running
@@ -5168,6 +5177,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
 		      GFP_NOFS);
 		      GFP_NOFS);
 
 
 	btrfs_end_transaction(trans, BTRFS_I(inode)->root);
 	btrfs_end_transaction(trans, BTRFS_I(inode)->root);
+out_free:
+	btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
 out:
 out:
 	mutex_unlock(&inode->i_mutex);
 	mutex_unlock(&inode->i_mutex);
 	return ret;
 	return ret;

+ 4 - 2
fs/btrfs/ioctl.c

@@ -1028,7 +1028,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 						struct btrfs_file_extent_item);
 						struct btrfs_file_extent_item);
 			comp = btrfs_file_extent_compression(leaf, extent);
 			comp = btrfs_file_extent_compression(leaf, extent);
 			type = btrfs_file_extent_type(leaf, extent);
 			type = btrfs_file_extent_type(leaf, extent);
-			if (type == BTRFS_FILE_EXTENT_REG) {
+			if (type == BTRFS_FILE_EXTENT_REG ||
+			    type == BTRFS_FILE_EXTENT_PREALLOC) {
 				disko = btrfs_file_extent_disk_bytenr(leaf,
 				disko = btrfs_file_extent_disk_bytenr(leaf,
 								      extent);
 								      extent);
 				diskl = btrfs_file_extent_disk_num_bytes(leaf,
 				diskl = btrfs_file_extent_disk_num_bytes(leaf,
@@ -1051,7 +1052,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 			new_key.objectid = inode->i_ino;
 			new_key.objectid = inode->i_ino;
 			new_key.offset = key.offset + destoff - off;
 			new_key.offset = key.offset + destoff - off;
 
 
-			if (type == BTRFS_FILE_EXTENT_REG) {
+			if (type == BTRFS_FILE_EXTENT_REG ||
+			    type == BTRFS_FILE_EXTENT_PREALLOC) {
 				ret = btrfs_insert_empty_item(trans, root, path,
 				ret = btrfs_insert_empty_item(trans, root, path,
 							      &new_key, size);
 							      &new_key, size);
 				if (ret)
 				if (ret)

+ 1 - 4
fs/btrfs/relocation.c

@@ -1788,7 +1788,7 @@ static void merge_func(struct btrfs_work *work)
 		btrfs_end_transaction(trans, root);
 		btrfs_end_transaction(trans, root);
 	}
 	}
 
 
-	btrfs_drop_dead_root(reloc_root);
+	btrfs_drop_snapshot(reloc_root, 0);
 
 
 	if (atomic_dec_and_test(async->num_pending))
 	if (atomic_dec_and_test(async->num_pending))
 		complete(async->done);
 		complete(async->done);
@@ -2075,9 +2075,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
 
 
 			ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
 			ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
 			BUG_ON(ret);
 			BUG_ON(ret);
-
-			btrfs_tree_unlock(eb);
-			free_extent_buffer(eb);
 		}
 		}
 		if (!lowest) {
 		if (!lowest) {
 			btrfs_tree_unlock(upper->eb);
 			btrfs_tree_unlock(upper->eb);

+ 3 - 1
fs/btrfs/transaction.c

@@ -593,6 +593,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
 	return 0;
 	return 0;
 }
 }
 
 
+#if 0
 /*
 /*
  * when dropping snapshots, we generate a ton of delayed refs, and it makes
  * when dropping snapshots, we generate a ton of delayed refs, and it makes
  * sense not to join the transaction while it is trying to flush the current
  * sense not to join the transaction while it is trying to flush the current
@@ -681,6 +682,7 @@ int btrfs_drop_dead_root(struct btrfs_root *root)
 	btrfs_btree_balance_dirty(tree_root, nr);
 	btrfs_btree_balance_dirty(tree_root, nr);
 	return ret;
 	return ret;
 }
 }
+#endif
 
 
 /*
 /*
  * new snapshots need to be created at a very specific time in the
  * new snapshots need to be created at a very specific time in the
@@ -1081,7 +1083,7 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
 	while (!list_empty(&list)) {
 	while (!list_empty(&list)) {
 		root = list_entry(list.next, struct btrfs_root, root_list);
 		root = list_entry(list.next, struct btrfs_root, root_list);
 		list_del_init(&root->root_list);
 		list_del_init(&root->root_list);
-		btrfs_drop_dead_root(root);
+		btrfs_drop_snapshot(root, 0);
 	}
 	}
 	return 0;
 	return 0;
 }
 }

+ 3 - 0
fs/notify/inotify/inotify_user.c

@@ -296,12 +296,15 @@ static int inotify_fasync(int fd, struct file *file, int on)
 static int inotify_release(struct inode *ignored, struct file *file)
 static int inotify_release(struct inode *ignored, struct file *file)
 {
 {
 	struct fsnotify_group *group = file->private_data;
 	struct fsnotify_group *group = file->private_data;
+	struct user_struct *user = group->inotify_data.user;
 
 
 	fsnotify_clear_marks_by_group(group);
 	fsnotify_clear_marks_by_group(group);
 
 
 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
 	fsnotify_put_group(group);
 	fsnotify_put_group(group);
 
 
+	atomic_dec(&user->inotify_devs);
+
 	return 0;
 	return 0;
 }
 }
 
 

Some files were not shown because too many files changed in this diff