Эх сурвалжийг харах

Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm: (28 commits)
  ARM: 6411/1: vexpress: set RAM latencies to 1 cycle for PL310 on ct-ca9x4 tile
  ARM: 6409/1: davinci: map sram using MT_MEMORY_NONCACHED instead of MT_DEVICE
  ARM: 6408/1: omap: Map only available sram memory
  ARM: 6407/1: mmu: Setup MT_MEMORY and MT_MEMORY_NONCACHED L1 entries
  ARM: pxa: remove pr_<level> uses of KERN_<level>
  ARM: pxa168fb: clear enable bit when not active
  ARM: pxa: fix cpu_is_pxa*() not expanding to zero when not configured
  ARM: pxa168: fix corrected reset vector
  ARM: pxa: Use PIO for PI2C communication on Palm27x
  ARM: pxa: Fix Vpac270 gpio_power for MMC
  ARM: 6401/1: plug a race in the alignment trap handler
  ARM: 6406/1: at91sam9g45: fix i2c bus speed
  leds: leds-ns2: fix locking
  ARM: dove: fix __io() definition to use bus based offset
  dmaengine: fix interrupt clearing for mv_xor
  ARM: kirkwood: Unbreak PCIe I/O port
  ARM: Fix build error when using KCONFIG_CONFIG
  ARM: 6383/1: Implement phys_mem_access_prot() to avoid attributes aliasing
  ARM: 6400/1: at91: fix arch_gettimeoffset fallout
  ARM: 6398/1: add proc info for ARM11MPCore/Cortex-A9 from ARM
  ...
Linus Torvalds 14 жил өмнө
parent
commit
6e029fe373

+ 26 - 1
arch/arm/Kconfig

@@ -271,7 +271,6 @@ config ARCH_AT91
 	bool "Atmel AT91"
 	bool "Atmel AT91"
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select HAVE_CLK
 	select HAVE_CLK
-	select ARCH_USES_GETTIMEOFFSET
 	help
 	help
 	  This enables support for systems based on the Atmel AT91RM9200,
 	  This enables support for systems based on the Atmel AT91RM9200,
 	  AT91SAM9 and AT91CAP9 processors.
 	  AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
 	  ACTLR register. Note that setting specific bits in the ACTLR register
 	  ACTLR register. Note that setting specific bits in the ACTLR register
 	  may not be available in non-secure mode.
 	  may not be available in non-secure mode.
 
 
+config ARM_ERRATA_742230
+	bool "ARM errata: DMB operation may be faulty"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 742230 Cortex-A9
+	  (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+	  between two write operations may not ensure the correct visibility
+	  ordering of the two writes. This workaround sets a specific bit in
+	  the diagnostic register of the Cortex-A9 which causes the DMB
+	  instruction to behave as a DSB, ensuring the correct behaviour of
+	  the two writes.
+
+config ARM_ERRATA_742231
+	bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 742231 Cortex-A9
+	  (r2p0..r2p2) erratum. Under certain conditions, specific to the
+	  Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+	  accessing some data located in the same cache line, may get corrupted
+	  data due to bad handling of the address hazard when the line gets
+	  replaced from one of the CPUs at the same time as another CPU is
+	  accessing it. This workaround sets specific bits in the diagnostic
+	  register of the Cortex-A9 which reduces the linefill issuing
+	  capabilities of the processor.
+
 config PL310_ERRATA_588369
 config PL310_ERRATA_588369
 	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
 	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
 	depends on CACHE_L2X0 && ARCH_OMAP4
 	depends on CACHE_L2X0 && ARCH_OMAP4

+ 1 - 1
arch/arm/boot/compressed/Makefile

@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
 $(obj)/font.c: $(FONTC)
 	$(call cmd,shipped)
 	$(call cmd,shipped)
 
 
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
 	@sed "$(SEDFLAGS)" < $< > $@
 	@sed "$(SEDFLAGS)" < $< > $@

+ 4 - 0
arch/arm/include/asm/pgtable.h

@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+				     unsigned long size, pgprot_t vma_prot);
 #else
 #else
 #define pgprot_dmacoherent(prot) \
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
 	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)

+ 2 - 0
arch/arm/kernel/entry-common.S

@@ -48,6 +48,8 @@ work_pending:
 	beq	no_work_pending
 	beq	no_work_pending
 	mov	r0, sp				@ 'regs'
 	mov	r0, sp				@ 'regs'
 	mov	r2, why				@ 'syscall'
 	mov	r2, why				@ 'syscall'
+	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
+	movne	why, #0				@ prevent further restarts
 	bl	do_notify_resume
 	bl	do_notify_resume
 	b	ret_slow_syscall		@ Check work again
 	b	ret_slow_syscall		@ Check work again
 
 

+ 2 - 2
arch/arm/mach-at91/at91sam9g45_devices.c

@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
 	.sda_is_open_drain	= 1,
 	.sda_is_open_drain	= 1,
 	.scl_pin		= AT91_PIN_PA21,
 	.scl_pin		= AT91_PIN_PA21,
 	.scl_is_open_drain	= 1,
 	.scl_is_open_drain	= 1,
-	.udelay			= 2,		/* ~100 kHz */
+	.udelay			= 5,		/* ~100 kHz */
 };
 };
 
 
 static struct platform_device at91sam9g45_twi0_device = {
 static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
 	.sda_is_open_drain	= 1,
 	.sda_is_open_drain	= 1,
 	.scl_pin		= AT91_PIN_PB11,
 	.scl_pin		= AT91_PIN_PB11,
 	.scl_is_open_drain	= 1,
 	.scl_is_open_drain	= 1,
-	.udelay			= 2,		/* ~100 kHz */
+	.udelay			= 5,		/* ~100 kHz */
 };
 };
 
 
 static struct platform_device at91sam9g45_twi1_device = {
 static struct platform_device at91sam9g45_twi1_device = {

+ 1 - 2
arch/arm/mach-davinci/dm355.c

@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00010000),
 		.pfn		= __phys_to_pfn(0x00010000),
 		.length		= SZ_32K,
 		.length		= SZ_32K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 1 - 2
arch/arm/mach-davinci/dm365.c

@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00010000),
 		.pfn		= __phys_to_pfn(0x00010000),
 		.length		= SZ_32K,
 		.length		= SZ_32K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 1 - 2
arch/arm/mach-davinci/dm644x.c

@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00008000),
 		.pfn		= __phys_to_pfn(0x00008000),
 		.length		= SZ_16K,
 		.length		= SZ_16K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 1 - 2
arch/arm/mach-davinci/dm646x.c

@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
 		.virtual	= SRAM_VIRT,
 		.virtual	= SRAM_VIRT,
 		.pfn		= __phys_to_pfn(0x00010000),
 		.pfn		= __phys_to_pfn(0x00010000),
 		.length		= SZ_32K,
 		.length		= SZ_32K,
-		/* MT_MEMORY_NONCACHED requires supersection alignment */
-		.type		= MT_DEVICE,
+		.type		= MT_MEMORY_NONCACHED,
 	},
 	},
 };
 };
 
 

+ 3 - 3
arch/arm/mach-dove/include/mach/io.h

@@ -13,8 +13,8 @@
 
 
 #define IO_SPACE_LIMIT		0xffffffff
 #define IO_SPACE_LIMIT		0xffffffff
 
 
-#define __io(a)  ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
-				   DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a)		(a)
+#define __io(a)  	((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+						 DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a)	(a)
 
 
 #endif
 #endif

+ 1 - 1
arch/arm/mach-kirkwood/include/mach/kirkwood.h

@@ -38,7 +38,7 @@
 
 
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE	0xf3000000
 #define KIRKWOOD_PCIE1_IO_PHYS_BASE	0xf3000000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE	0xfef00000
 #define KIRKWOOD_PCIE1_IO_VIRT_BASE	0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE	0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE	0x00100000
 #define KIRKWOOD_PCIE1_IO_SIZE		SZ_1M
 #define KIRKWOOD_PCIE1_IO_SIZE		SZ_1M
 
 
 #define KIRKWOOD_PCIE_IO_PHYS_BASE	0xf2000000
 #define KIRKWOOD_PCIE_IO_PHYS_BASE	0xf2000000

+ 2 - 2
arch/arm/mach-kirkwood/pcie.c

@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
 	 * IORESOURCE_IO
 	 * IORESOURCE_IO
 	 */
 	 */
 	pp->res[0].name = "PCIe 0 I/O Space";
 	pp->res[0].name = "PCIe 0 I/O Space";
-	pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+	pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
 	pp->res[0].flags = IORESOURCE_IO;
 	pp->res[0].flags = IORESOURCE_IO;
 
 
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
 	 * IORESOURCE_IO
 	 * IORESOURCE_IO
 	 */
 	 */
 	pp->res[0].name = "PCIe 1 I/O Space";
 	pp->res[0].name = "PCIe 1 I/O Space";
-	pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+	pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
 	pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
 	pp->res[0].flags = IORESOURCE_IO;
 	pp->res[0].flags = IORESOURCE_IO;
 
 

+ 6 - 1
arch/arm/mach-mmp/include/mach/system.h

@@ -9,6 +9,8 @@
 #ifndef __ASM_MACH_SYSTEM_H
 #ifndef __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 #define __ASM_MACH_SYSTEM_H
 
 
+#include <mach/cputype.h>
+
 static inline void arch_idle(void)
 static inline void arch_idle(void)
 {
 {
 	cpu_do_idle();
 	cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
 
 
 static inline void arch_reset(char mode, const char *cmd)
 static inline void arch_reset(char mode, const char *cmd)
 {
 {
-	cpu_reset(0);
+	if (cpu_is_pxa168())
+		cpu_reset(0xffff0000);
+	else
+		cpu_reset(0);
 }
 }
 #endif /* __ASM_MACH_SYSTEM_H */
 #endif /* __ASM_MACH_SYSTEM_H */

+ 1 - 2
arch/arm/mach-pxa/cpufreq-pxa2xx.c

@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
 	freqs.cpu = policy->cpu;
 	freqs.cpu = policy->cpu;
 
 
 	if (freq_debug)
 	if (freq_debug)
-		pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
-			 "(SDRAM %d Mhz)\n",
+		pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
 			 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
 			 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
 			 (new_freq_mem / 2000) : (new_freq_mem / 1000));
 			 (new_freq_mem / 2000) : (new_freq_mem / 1000));
 
 

+ 12 - 0
arch/arm/mach-pxa/include/mach/hardware.h

@@ -264,23 +264,35 @@
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
  * == 0x3 for pxa300/pxa310/pxa320
  * == 0x3 for pxa300/pxa310/pxa320
  */
  */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
 #define __cpu_is_pxa2xx(id)				\
 #define __cpu_is_pxa2xx(id)				\
 	({						\
 	({						\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		_id <= 0x2;				\
 		_id <= 0x2;				\
 	 })
 	 })
+#else
+#define __cpu_is_pxa2xx(id)	(0)
+#endif
 
 
+#ifdef CONFIG_PXA3xx
 #define __cpu_is_pxa3xx(id)				\
 #define __cpu_is_pxa3xx(id)				\
 	({						\
 	({						\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		unsigned int _id = (id) >> 13 & 0x7;	\
 		_id == 0x3;				\
 		_id == 0x3;				\
 	 })
 	 })
+#else
+#define __cpu_is_pxa3xx(id)	(0)
+#endif
 
 
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
 #define __cpu_is_pxa93x(id)				\
 #define __cpu_is_pxa93x(id)				\
 	({						\
 	({						\
 		unsigned int _id = (id) >> 4 & 0xfff;	\
 		unsigned int _id = (id) >> 4 & 0xfff;	\
 		_id == 0x683 || _id == 0x693;		\
 		_id == 0x683 || _id == 0x693;		\
 	 })
 	 })
+#else
+#define __cpu_is_pxa93x(id)	(0)
+#endif
 
 
 #define cpu_is_pxa2xx()					\
 #define cpu_is_pxa2xx()					\
 	({						\
 	({						\

+ 5 - 1
arch/arm/mach-pxa/palm27x.c

@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
 	},
 	},
 };
 };
 
 
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+	.use_pio	= 1,
+};
+
 void __init palm27x_pmic_init(void)
 void __init palm27x_pmic_init(void)
 {
 {
 	i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
 	i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
-	pxa27x_set_i2c_power_info(NULL);
+	pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
 }
 }
 #endif
 #endif

+ 1 - 0
arch/arm/mach-pxa/vpac270.c

@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
 static struct pxamci_platform_data vpac270_mci_platform_data = {
 static struct pxamci_platform_data vpac270_mci_platform_data = {
 	.ocr_mask		= MMC_VDD_32_33 | MMC_VDD_33_34,
 	.ocr_mask		= MMC_VDD_32_33 | MMC_VDD_33_34,
+	.gpio_power		= -1,
 	.gpio_card_detect	= GPIO53_VPAC270_SD_DETECT_N,
 	.gpio_card_detect	= GPIO53_VPAC270_SD_DETECT_N,
 	.gpio_card_ro		= GPIO52_VPAC270_SD_READONLY,
 	.gpio_card_ro		= GPIO52_VPAC270_SD_READONLY,
 	.detect_delay_ms	= 200,
 	.detect_delay_ms	= 200,

+ 3 - 0
arch/arm/mach-u300/include/mach/gpio.h

@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
 extern int gpio_get_value(unsigned gpio);
 extern int gpio_get_value(unsigned gpio);
 extern void gpio_set_value(unsigned gpio, int value);
 extern void gpio_set_value(unsigned gpio, int value);
 
 
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
 /* wrappers to sleep-enable the previous two functions */
 /* wrappers to sleep-enable the previous two functions */
 static inline unsigned gpio_to_irq(unsigned gpio)
 static inline unsigned gpio_to_irq(unsigned gpio)
 {
 {

+ 7 - 1
arch/arm/mach-vexpress/ct-ca9x4.c

@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
 	int i;
 	int i;
 
 
 #ifdef CONFIG_CACHE_L2X0
 #ifdef CONFIG_CACHE_L2X0
-	l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+	void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+	/* set RAM latencies to 1 cycle for this core tile. */
+	writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+	writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+	l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
 #endif
 #endif
 
 
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));

+ 17 - 2
arch/arm/mm/alignment.c

@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
 
 	if (ai_usermode & UM_SIGNAL)
 	if (ai_usermode & UM_SIGNAL)
 		force_sig(SIGBUS, current);
 		force_sig(SIGBUS, current);
-	else
-		set_cr(cr_no_alignment);
+	else {
+		/*
+		 * We're about to disable the alignment trap and return to
+		 * user space.  But if an interrupt occurs before actually
+		 * reaching user space, then the IRQ vector entry code will
+		 * notice that we were still in kernel space and therefore
+		 * the alignment trap won't be re-enabled in that case as it
+		 * is presumed to be always on from kernel space.
+		 * Let's prevent that race by disabling interrupts here (they
+		 * are disabled on the way back to user space anyway in
+		 * entry-common.S) and disable the alignment trap only if
+		 * there is no work pending for this thread.
+		 */
+		raw_local_irq_disable();
+		if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+			set_cr(cr_no_alignment);
+	}
 
 
 	return 0;
 	return 0;
 }
 }

+ 29 - 2
arch/arm/mm/mmu.c

@@ -15,6 +15,7 @@
 #include <linux/nodemask.h>
 #include <linux/nodemask.h>
 #include <linux/memblock.h>
 #include <linux/memblock.h>
 #include <linux/sort.h>
 #include <linux/sort.h>
+#include <linux/fs.h>
 
 
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
 		.domain    = DOMAIN_USER,
 		.domain    = DOMAIN_USER,
 	},
 	},
 	[MT_MEMORY] = {
 	[MT_MEMORY] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+				L_PTE_USER | L_PTE_EXEC,
+		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 		.domain    = DOMAIN_KERNEL,
 	},
 	},
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
 		.domain    = DOMAIN_KERNEL,
 		.domain    = DOMAIN_KERNEL,
 	},
 	},
 	[MT_MEMORY_NONCACHED] = {
 	[MT_MEMORY_NONCACHED] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+				L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 		.domain    = DOMAIN_KERNEL,
 	},
 	},
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
 	 * Enable CPU-specific coherency if supported.
 	 * Enable CPU-specific coherency if supported.
 	 * (Only available on XSC3 at the moment.)
 	 * (Only available on XSC3 at the moment.)
 	 */
 	 */
-	if (arch_is_coherent() && cpu_is_xsc3())
+	if (arch_is_coherent() && cpu_is_xsc3()) {
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+	}
 	/*
 	/*
 	 * ARMv6 and above have extended page tables.
 	 * ARMv6 and above have extended page tables.
 	 */
 	 */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
 		mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 #endif
 #endif
 	}
 	}
 
 
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
 	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 
 
 	switch (cp->pmd) {
 	switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
 	}
 	}
 }
 }
 
 
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+			      unsigned long size, pgprot_t vma_prot)
+{
+	if (!pfn_valid(pfn))
+		return pgprot_noncached(vma_prot);
+	else if (file->f_flags & O_SYNC)
+		return pgprot_writecombine(vma_prot);
+	return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
 #define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 #define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 
 
 static void __init *early_alloc(unsigned long sz)
 static void __init *early_alloc(unsigned long sz)

+ 56 - 6
arch/arm/mm/proc-v7.S

@@ -186,13 +186,14 @@ cpu_v7_name:
  *	It is assumed that:
  *	It is assumed that:
  *	- cache type register is implemented
  *	- cache type register is implemented
  */
  */
-__v7_setup:
+__v7_ca9mp_setup:
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	mrc	p15, 0, r0, c1, c0, 1
 	mrc	p15, 0, r0, c1, c0, 1
 	tst	r0, #(1 << 6)			@ SMP/nAMP mode enabled?
 	tst	r0, #(1 << 6)			@ SMP/nAMP mode enabled?
 	orreq	r0, r0, #(1 << 6) | (1 << 0)	@ Enable SMP/nAMP mode and
 	orreq	r0, r0, #(1 << 6) | (1 << 0)	@ Enable SMP/nAMP mode and
 	mcreq	p15, 0, r0, c1, c0, 1		@ TLB ops broadcasting
 	mcreq	p15, 0, r0, c1, c0, 1		@ TLB ops broadcasting
 #endif
 #endif
+__v7_setup:
 	adr	r12, __v7_setup_stack		@ the local stack
 	adr	r12, __v7_setup_stack		@ the local stack
 	stmia	r12, {r0-r5, r7, r9, r11, lr}
 	stmia	r12, {r0-r5, r7, r9, r11, lr}
 	bl	v7_flush_dcache_all
 	bl	v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
 	mrc	p15, 0, r0, c0, c0, 0		@ read main ID register
 	mrc	p15, 0, r0, c0, c0, 0		@ read main ID register
 	and	r10, r0, #0xff000000		@ ARM?
 	and	r10, r0, #0xff000000		@ ARM?
 	teq	r10, #0x41000000
 	teq	r10, #0x41000000
-	bne	2f
+	bne	3f
 	and	r5, r0, #0x00f00000		@ variant
 	and	r5, r0, #0x00f00000		@ variant
 	and	r6, r0, #0x0000000f		@ revision
 	and	r6, r0, #0x0000000f		@ revision
-	orr	r0, r6, r5, lsr #20-4		@ combine variant and revision
+	orr	r6, r6, r5, lsr #20-4		@ combine variant and revision
+	ubfx	r0, r0, #4, #12			@ primary part number
 
 
+	/* Cortex-A8 Errata */
+	ldr	r10, =0x00000c08		@ Cortex-A8 primary part number
+	teq	r0, r10
+	bne	2f
 #ifdef CONFIG_ARM_ERRATA_430973
 #ifdef CONFIG_ARM_ERRATA_430973
 	teq	r5, #0x00100000			@ only present in r1p*
 	teq	r5, #0x00100000			@ only present in r1p*
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 #endif
 #endif
 #ifdef CONFIG_ARM_ERRATA_458693
 #ifdef CONFIG_ARM_ERRATA_458693
-	teq	r0, #0x20			@ only present in r2p0
+	teq	r6, #0x20			@ only present in r2p0
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
 	mrceq	p15, 0, r10, c1, c0, 1		@ read aux control register
 	orreq	r10, r10, #(1 << 5)		@ set L1NEON to 1
 	orreq	r10, r10, #(1 << 5)		@ set L1NEON to 1
 	orreq	r10, r10, #(1 << 9)		@ set PLDNOP to 1
 	orreq	r10, r10, #(1 << 9)		@ set PLDNOP to 1
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 	mcreq	p15, 0, r10, c1, c0, 1		@ write aux control register
 #endif
 #endif
 #ifdef CONFIG_ARM_ERRATA_460075
 #ifdef CONFIG_ARM_ERRATA_460075
-	teq	r0, #0x20			@ only present in r2p0
+	teq	r6, #0x20			@ only present in r2p0
 	mrceq	p15, 1, r10, c9, c0, 2		@ read L2 cache aux ctrl register
 	mrceq	p15, 1, r10, c9, c0, 2		@ read L2 cache aux ctrl register
 	tsteq	r10, #1 << 22
 	tsteq	r10, #1 << 22
 	orreq	r10, r10, #(1 << 22)		@ set the Write Allocate disable bit
 	orreq	r10, r10, #(1 << 22)		@ set the Write Allocate disable bit
 	mcreq	p15, 1, r10, c9, c0, 2		@ write the L2 cache aux ctrl register
 	mcreq	p15, 1, r10, c9, c0, 2		@ write the L2 cache aux ctrl register
 #endif
 #endif
+	b	3f
+
+	/* Cortex-A9 Errata */
+2:	ldr	r10, =0x00000c09		@ Cortex-A9 primary part number
+	teq	r0, r10
+	bne	3f
+#ifdef CONFIG_ARM_ERRATA_742230
+	cmp	r6, #0x22			@ only present up to r2p2
+	mrcle	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orrle	r10, r10, #1 << 4		@ set bit #4
+	mcrle	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+	teq	r6, #0x20			@ present in r2p0
+	teqne	r6, #0x21			@ present in r2p1
+	teqne	r6, #0x22			@ present in r2p2
+	mrceq	p15, 0, r10, c15, c0, 1		@ read diagnostic register
+	orreq	r10, r10, #1 << 12		@ set bit #12
+	orreq	r10, r10, #1 << 22		@ set bit #22
+	mcreq	p15, 0, r10, c15, c0, 1		@ write diagnostic register
+#endif
 
 
-2:	mov	r10, #0
+3:	mov	r10, #0
 #ifdef HARVARD_CACHE
 #ifdef HARVARD_CACHE
 	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
 	mcr	p15, 0, r10, c7, c5, 0		@ I+BTB cache invalidate
 #endif
 #endif
@@ -323,6 +350,29 @@ cpu_elf_name:
 
 
 	.section ".proc.info.init", #alloc, #execinstr
 	.section ".proc.info.init", #alloc, #execinstr
 
 
+	.type   __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+	.long	0x410fc090		@ Required ID value
+	.long	0xff0ffff0		@ Mask for ID
+	.long   PMD_TYPE_SECT | \
+		PMD_SECT_AP_WRITE | \
+		PMD_SECT_AP_READ | \
+		PMD_FLAGS
+	.long   PMD_TYPE_SECT | \
+		PMD_SECT_XN | \
+		PMD_SECT_AP_WRITE | \
+		PMD_SECT_AP_READ
+	b	__v7_ca9mp_setup
+	.long	cpu_arch_name
+	.long	cpu_elf_name
+	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+	.long	cpu_v7_name
+	.long	v7_processor_functions
+	.long	v7wbi_tlb_fns
+	.long	v6_user_fns
+	.long	v7_cache_fns
+	.size	__v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
 	/*
 	/*
 	 * Match any ARMv7 processor core.
 	 * Match any ARMv7 processor core.
 	 */
 	 */

+ 12 - 21
arch/arm/plat-nomadik/timer.c

@@ -1,5 +1,5 @@
 /*
 /*
- *  linux/arch/arm/mach-nomadik/timer.c
+ *  linux/arch/arm/plat-nomadik/timer.c
  *
  *
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2008 STMicroelectronics
  * Copyright (C) 2010 Alessandro Rubini
  * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
 		cr = readl(mtu_base + MTU_CR(1));
 		cr = readl(mtu_base + MTU_CR(1));
 		writel(0, mtu_base + MTU_LR(1));
 		writel(0, mtu_base + MTU_LR(1));
 		writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
 		writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
-		writel(0x2, mtu_base + MTU_IMSC);
+		writel(1 << 1, mtu_base + MTU_IMSC);
 		break;
 		break;
 	case CLOCK_EVT_MODE_SHUTDOWN:
 	case CLOCK_EVT_MODE_SHUTDOWN:
 	case CLOCK_EVT_MODE_UNUSED:
 	case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
 {
 {
 	unsigned long rate;
 	unsigned long rate;
 	struct clk *clk0;
 	struct clk *clk0;
-	struct clk *clk1;
-	u32 cr;
+	u32 cr = MTU_CRn_32BITS;
 
 
 	clk0 = clk_get_sys("mtu0", NULL);
 	clk0 = clk_get_sys("mtu0", NULL);
 	BUG_ON(IS_ERR(clk0));
 	BUG_ON(IS_ERR(clk0));
 
 
-	clk1 = clk_get_sys("mtu1", NULL);
-	BUG_ON(IS_ERR(clk1));
-
 	clk_enable(clk0);
 	clk_enable(clk0);
-	clk_enable(clk1);
 
 
 	/*
 	/*
-	 * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
-	 * use a divide-by-16 counter if it's more than 16MHz
+	 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+	 * for ux500.
+	 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+	 * At 32 MHz, the timer (with 32 bit counter) can be programmed
+	 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+	 * with 16 gives too low timer resolution.
 	 */
 	 */
-	cr = MTU_CRn_32BITS;;
 	rate = clk_get_rate(clk0);
 	rate = clk_get_rate(clk0);
-	if (rate > 16 << 20) {
+	if (rate > 32000000) {
 		rate /= 16;
 		rate /= 16;
 		cr |= MTU_CRn_PRESCALE_16;
 		cr |= MTU_CRn_PRESCALE_16;
 	} else {
 	} else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
 		pr_err("timer: failed to initialize clock source %s\n",
 		pr_err("timer: failed to initialize clock source %s\n",
 		       nmdk_clksrc.name);
 		       nmdk_clksrc.name);
 
 
-	/* Timer 1 is used for events, fix according to rate */
-	cr = MTU_CRn_32BITS;
-	rate = clk_get_rate(clk1);
-	if (rate > 16 << 20) {
-		rate /= 16;
-		cr |= MTU_CRn_PRESCALE_16;
-	} else {
-		cr |= MTU_CRn_PRESCALE_1;
-	}
+	/* Timer 1 is used for events */
+
 	clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 	clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
 
 
 	writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
 	writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */

+ 5 - 20
arch/arm/plat-omap/sram.c

@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
 	if (omap_sram_size == 0)
 	if (omap_sram_size == 0)
 		return;
 		return;
 
 
-	if (cpu_is_omap24xx()) {
-		omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
-		base = OMAP2_SRAM_PA;
-		base = ROUND_DOWN(base, PAGE_SIZE);
-		omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-	}
-
 	if (cpu_is_omap34xx()) {
 	if (cpu_is_omap34xx()) {
-		omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
-		base = OMAP3_SRAM_PA;
-		base = ROUND_DOWN(base, PAGE_SIZE);
-		omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
 		/*
 		/*
 		 * SRAM must be marked as non-cached on OMAP3 since the
 		 * SRAM must be marked as non-cached on OMAP3 since the
 		 * CORE DPLL M2 divider change code (in SRAM) runs with the
 		 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
 		omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
 		omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
 	}
 	}
 
 
-	if (cpu_is_omap44xx()) {
-		omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
-		base = OMAP4_SRAM_PA;
-		base = ROUND_DOWN(base, PAGE_SIZE);
-		omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-	}
-	omap_sram_io_desc[0].length = 1024 * 1024;	/* Use section desc */
+	omap_sram_io_desc[0].virtual = omap_sram_base;
+	base = omap_sram_start;
+	base = ROUND_DOWN(base, PAGE_SIZE);
+	omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+	omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
 	iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 	iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
 
 
 	printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
 	printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",

+ 1 - 1
drivers/dma/mv_xor.c

@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
 
 
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 {
 {
-	u32 val = (1 << (1 + (chan->idx * 16)));
+	u32 val = ~(1 << (chan->idx * 16));
 	dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
 	dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
 	__raw_writel(val, XOR_INTR_CAUSE(chan));
 	__raw_writel(val, XOR_INTR_CAUSE(chan));
 }
 }

+ 5 - 4
drivers/leds/leds-ns2.c

@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
 	int cmd_level;
 	int cmd_level;
 	int slow_level;
 	int slow_level;
 
 
-	read_lock(&led_dat->rw_lock);
+	read_lock_irq(&led_dat->rw_lock);
 
 
 	cmd_level = gpio_get_value(led_dat->cmd);
 	cmd_level = gpio_get_value(led_dat->cmd);
 	slow_level = gpio_get_value(led_dat->slow);
 	slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
 		}
 		}
 	}
 	}
 
 
-	read_unlock(&led_dat->rw_lock);
+	read_unlock_irq(&led_dat->rw_lock);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
 			     enum ns2_led_modes mode)
 			     enum ns2_led_modes mode)
 {
 {
 	int i;
 	int i;
+	unsigned long flags;
 
 
-	write_lock(&led_dat->rw_lock);
+	write_lock_irqsave(&led_dat->rw_lock, flags);
 
 
 	for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
 	for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
 		if (mode == ns2_led_modval[i].mode) {
 		if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
 		}
 		}
 	}
 	}
 
 
-	write_unlock(&led_dat->rw_lock);
+	write_unlock_irqrestore(&led_dat->rw_lock, flags);
 }
 }
 
 
 static void ns2_led_set(struct led_classdev *led_cdev,
 static void ns2_led_set(struct led_classdev *led_cdev,

+ 2 - 2
drivers/video/pxa168fb.c

@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
 	 * Set bit to enable graphics DMA.
 	 * Set bit to enable graphics DMA.
 	 */
 	 */
 	x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
 	x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
-	x |= fbi->active ? 0x00000100 : 0;
-	fbi->active = 0;
+	x &= ~CFG_GRA_ENA_MASK;
+	x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
 
 
 	/*
 	/*
 	 * If we are in a pseudo-color mode, we need to enable
 	 * If we are in a pseudo-color mode, we need to enable