Browse Source

Merge branches 'x86/vt-d', 'arm/omap', 'core', 'x86/amd' and 'arm/smmu' into next

Joerg Roedel 12 years ago
100 changed files with 2750 additions and 424 deletions
  1. 22 13
      Documentation/DocBook/media/v4l/dev-codec.xml
  2. 1 1
      Documentation/DocBook/media/v4l/v4l2.xml
  3. 70 0
      Documentation/devicetree/bindings/iommu/arm,smmu.txt
  4. 1 1
      Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
  5. 3 0
      Documentation/sound/alsa/HD-Audio-Models.txt
  6. 6 0
      MAINTAINERS
  7. 1 1
      Makefile
  8. 11 1
      arch/arm/Kconfig
  9. 2 1
      arch/arm/boot/compressed/Makefile
  10. 1 1
      arch/arm/boot/dts/exynos5250-pinctrl.dtsi
  11. 2 2
      arch/arm/boot/dts/exynos5250.dtsi
  12. 1 3
      arch/arm/include/asm/cacheflush.h
  13. 4 0
      arch/arm/kernel/machine_kexec.c
  14. 37 6
      arch/arm/kernel/process.c
  15. 0 13
      arch/arm/kernel/smp.c
  16. 8 0
      arch/arm/mm/cache-v7.S
  17. 33 0
      arch/arm/mm/flush.c
  18. 5 3
      arch/arm/mm/mmu.c
  19. 2 2
      arch/arm/mm/proc-v7.S
  20. 1 0
      arch/arm64/kernel/perf_event.c
  21. 1 0
      arch/ia64/include/asm/irqflags.h
  22. 1 0
      arch/metag/include/asm/hugetlb.h
  23. 2 3
      arch/mn10300/include/asm/irqflags.h
  24. 3 1
      arch/mn10300/include/asm/smp.h
  25. 2 2
      arch/parisc/include/asm/mmzone.h
  26. 5 0
      arch/parisc/include/asm/pci.h
  27. 1 0
      arch/parisc/kernel/hardware.c
  28. 38 38
      arch/parisc/kernel/pacache.S
  29. 27 0
      arch/parisc/kernel/pci.c
  30. 1 1
      arch/parisc/mm/init.c
  31. 2 1
      arch/powerpc/kvm/booke.c
  32. 7 1
      arch/powerpc/mm/hugetlbpage.c
  33. 1 0
      arch/sparc/include/asm/Kbuild
  34. 1 1
      arch/sparc/include/asm/leon.h
  35. 1 0
      arch/sparc/include/asm/leon_amba.h
  36. 0 6
      arch/sparc/include/asm/linkage.h
  37. 2 1
      arch/sparc/kernel/ds.c
  38. 17 37
      arch/sparc/kernel/leon_kernel.c
  39. 3 5
      arch/sparc/kernel/leon_pci_grpci1.c
  40. 7 0
      arch/sparc/kernel/leon_pmc.c
  41. 1 1
      arch/sparc/kernel/setup_32.c
  42. 1 1
      arch/sparc/kernel/setup_64.c
  43. 8 1
      arch/sparc/mm/init_64.c
  44. 1 1
      arch/sparc/mm/tlb.c
  45. 7 5
      arch/sparc/prom/bootstr_32.c
  46. 8 8
      arch/sparc/prom/tree_64.c
  47. 2 0
      arch/tile/lib/exports.c
  48. 1 1
      arch/um/drivers/mconsole_kern.c
  49. 1 0
      arch/x86/Kconfig
  50. 32 16
      arch/x86/crypto/aesni-intel_asm.S
  51. 1 1
      arch/x86/ia32/ia32_aout.c
  52. 5 0
      arch/x86/include/asm/irq.h
  53. 2 2
      arch/x86/include/asm/microcode.h
  54. 1 3
      arch/x86/include/asm/nmi.h
  55. 1 0
      arch/x86/kernel/apic/hw_nmi.c
  56. 4 4
      arch/x86/kernel/cpu/mtrr/cleanup.c
  57. 1 1
      arch/x86/kernel/cpu/perf_event_intel.c
  58. 1 0
      arch/x86/kernel/kvmclock.c
  59. 0 12
      arch/x86/kernel/process.c
  60. 4 4
      arch/x86/kernel/smpboot.c
  61. 2 3
      arch/x86/kvm/x86.c
  62. 6 1
      arch/x86/platform/efi/efi.c
  63. 15 6
      drivers/acpi/acpi_lpss.c
  64. 20 0
      drivers/acpi/device_pm.c
  65. 2 0
      drivers/acpi/dock.c
  66. 1 0
      drivers/acpi/power.c
  67. 11 5
      drivers/acpi/resource.c
  68. 18 9
      drivers/base/firmware_class.c
  69. 5 1
      drivers/block/rbd.c
  70. 1 0
      drivers/clk/clk.c
  71. 5 5
      drivers/clk/samsung/clk-exynos5250.c
  72. 3 2
      drivers/clk/samsung/clk-pll.c
  73. 1 1
      drivers/clk/spear/spear3xx_clock.c
  74. 6 5
      drivers/clk/tegra/clk-tegra30.c
  75. 1 2
      drivers/gpu/drm/drm_prime.c
  76. 10 3
      drivers/gpu/drm/radeon/r600.c
  77. 24 29
      drivers/gpu/drm/radeon/radeon_device.c
  78. 8 2
      drivers/gpu/drm/radeon/radeon_fence.c
  79. 4 2
      drivers/gpu/drm/radeon/radeon_gart.c
  80. 7 0
      drivers/gpu/drm/radeon/radeon_ring.c
  81. 31 17
      drivers/gpu/drm/radeon/radeon_uvd.c
  82. 13 0
      drivers/iommu/Kconfig
  83. 1 0
      drivers/iommu/Makefile
  84. 54 25
      drivers/iommu/amd_iommu.c
  85. 1969 0
      drivers/iommu/arm-smmu.c
  86. 45 41
      drivers/iommu/iommu.c
  87. 8 7
      drivers/iommu/omap-iommu.c
  88. 1 1
      drivers/iommu/omap-iopgtable.h
  89. 2 2
      drivers/iommu/omap-iovmm.c
  90. 1 1
      drivers/irqchip/irq-gic.c
  91. 9 3
      drivers/media/Kconfig
  92. 1 1
      drivers/media/i2c/s5c73m3/s5c73m3-core.c
  93. 3 4
      drivers/media/pci/cx88/cx88-alsa.c
  94. 3 5
      drivers/media/pci/cx88/cx88-video.c
  95. 9 0
      drivers/media/platform/coda.c
  96. 15 0
      drivers/media/platform/davinci/vpbe_display.c
  97. 1 2
      drivers/media/platform/davinci/vpfe_capture.c
  98. 1 1
      drivers/media/platform/exynos4-is/fimc-is-regs.c
  99. 18 30
      drivers/media/platform/exynos4-is/fimc-is.c
  100. 0 2
      drivers/media/platform/exynos4-is/fimc-is.h

+ 22 - 13
Documentation/DocBook/media/v4l/dev-codec.xml

@@ -1,18 +1,27 @@
   <title>Codec Interface</title>
 
-  <note>
-    <title>Suspended</title>
+  <para>A V4L2 codec can compress, decompress, transform, or otherwise
+convert video data from one format into another format, in memory. Typically
+such devices are memory-to-memory devices (i.e. devices with the
+<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
+capability set).
+</para>
 
-    <para>This interface has been be suspended from the V4L2 API
-implemented in Linux 2.6 until we have more experience with codec
-device interfaces.</para>
-  </note>
+  <para>A memory-to-memory video node acts just like a normal video node, but it
+supports both output (sending frames from memory to the codec hardware) and
+capture (receiving the processed frames from the codec hardware into memory)
+stream I/O. An application will have to setup the stream
+I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
+to start the codec.</para>
 
-  <para>A V4L2 codec can compress, decompress, transform, or otherwise
-convert video data from one format into another format, in memory.
-Applications send data to be converted to the driver through a
-&func-write; call, and receive the converted data through a
-&func-read; call. For efficiency a driver may also support streaming
-I/O.</para>
+  <para>Video compression codecs use the MPEG controls to setup their codec parameters
+(note that the MPEG controls actually support many more codecs than just MPEG).
+See <xref linkend="mpeg-controls"></xref>.</para>
 
-  <para>[to do]</para>
+  <para>Memory-to-memory devices can often be used as a shared resource: you can
+open the video node multiple times, each application setting up their own codec properties
+that are local to the file handle, and each can use it independently from the others.
+The driver will arbitrate access to the codec and reprogram it whenever another file
+handler gets access. This is different from the usual video node behavior where the video properties
+are global to the device (i.e. changing something through one file handle is visible
+through another file handle).</para>

+ 1 - 1
Documentation/DocBook/media/v4l/v4l2.xml

@@ -493,7 +493,7 @@ and discussions on the V4L mailing list.</revremark>
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.9</subtitle>
+ <subtitle>Revision 3.10</subtitle>
 
   <chapter id="common">
     &sub-common;

+ 70 - 0
Documentation/devicetree/bindings/iommu/arm,smmu.txt

@@ -0,0 +1,70 @@
+* ARM System MMU Architecture Implementation
+
+ARM SoCs may contain an implementation of the ARM System Memory
+Management Unit Architecture, which can be used to provide 1 or 2 stages
+of address translation to bus masters external to the CPU.
+
+The SMMU may also raise interrupts in response to various fault
+conditions.
+
+** System MMU required properties:
+
+- compatible    : Should be one of:
+
+                        "arm,smmu-v1"
+                        "arm,smmu-v2"
+                        "arm,mmu-400"
+                        "arm,mmu-500"
+
+                  depending on the particular implementation and/or the
+                  version of the architecture implemented.
+
+- reg           : Base address and size of the SMMU.
+
+- #global-interrupts : The number of global interrupts exposed by the
+                       device.
+
+- interrupts    : Interrupt list, with the first #global-irqs entries
+                  corresponding to the global interrupts and any
+                  following entries corresponding to context interrupts,
+                  specified in order of their indexing by the SMMU.
+
+                  For SMMUv2 implementations, there must be exactly one
+                  interrupt per context bank. In the case of a single,
+                  combined interrupt, it must be listed multiple times.
+
+- mmu-masters   : A list of phandles to device nodes representing bus
+                  masters for which the SMMU can provide a translation
+                  and their corresponding StreamIDs (see example below).
+                  Each device node linked from this list must have a
+                  "#stream-id-cells" property, indicating the number of
+                  StreamIDs associated with it.
+
+** System MMU optional properties:
+
+- smmu-parent   : When multiple SMMUs are chained together, this
+                  property can be used to provide a phandle to the
+                  parent SMMU (that is the next SMMU on the path going
+                  from the mmu-masters towards memory) node for this
+                  SMMU.
+
+Example:
+
+        smmu {
+                compatible = "arm,smmu-v1";
+                reg = <0xba5e0000 0x10000>;
+                #global-interrupts = <2>;
+                interrupts = <0 32 4>,
+                             <0 33 4>,
+                             <0 34 4>, /* This is the first context interrupt */
+                             <0 35 4>,
+                             <0 36 4>,
+                             <0 37 4>;
+
+                /*
+                 * Two DMA controllers, the first with two StreamIDs (0xd01d
+                 * and 0xd01e) and the second with only one (0xd11c).
+                 */
+                mmu-masters = <&dma0 0xd01d 0xd01e>,
+                              <&dma1 0xd11c>;
+        };

+ 1 - 1
Documentation/devicetree/bindings/media/exynos-fimc-lite.txt

@@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE)
 
 Required properties:
 
-- compatible	: should be "samsung,exynos4212-fimc" for Exynos4212 and
+- compatible	: should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
 		  Exynos4412 SoCs;
 - reg		: physical base address and size of the device memory mapped
 		  registers;

+ 3 - 0
Documentation/sound/alsa/HD-Audio-Models.txt

@@ -29,6 +29,8 @@ ALC269/270/275/276/280/282
   alc271-dmic	Enable ALC271X digital mic workaround
   inv-dmic	Inverted internal mic workaround
   lenovo-dock   Enables docking station I/O for some Lenovos
+  dell-headset-multi	Headset jack, which can also be used as mic-in
+  dell-headset-dock	Headset jack (without mic-in), and also dock I/O
 
 ALC662/663/272
 ==============
@@ -42,6 +44,7 @@ ALC662/663/272
   asus-mode7	ASUS
   asus-mode8	ASUS
   inv-dmic	Inverted internal mic workaround
+  dell-headset-multi	Headset jack, which can also be used as mic-in
 
 ALC680
 ======

+ 6 - 0
MAINTAINERS

@@ -1310,6 +1310,12 @@ T:	git git://git.xilinx.com/linux-xlnx.git
 S:	Supported
 F:	arch/arm/mach-zynq/
 
+ARM SMMU DRIVER
+M:	Will Deacon <will.deacon@arm.com>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+F:	drivers/iommu/arm-smmu.c
+
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:	Catalin Marinas <catalin.marinas@arm.com>
 M:	Will Deacon <will.deacon@arm.com>

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*

+ 11 - 1
arch/arm/Kconfig

@@ -1189,6 +1189,16 @@ config PL310_ERRATA_588369
 	   is not correctly implemented in PL310 as clean lines are not
 	   invalidated as a result of these operations.
 
+config ARM_ERRATA_643719
+	bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 643719 Cortex-A9 (prior to
+	  r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
+	  register returns zero when it should return one. The workaround
+	  corrects this value, ensuring cache maintenance operations which use
+	  it behave as intended and avoiding data corruption.
+
 config ARM_ERRATA_720789
 	bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
 	depends on CPU_V7
@@ -2006,7 +2016,7 @@ config XIP_PHYS_ADDR
 
 config KEXEC
 	bool "Kexec system call (EXPERIMENTAL)"
-	depends on (!SMP || HOTPLUG_CPU)
+	depends on (!SMP || PM_SLEEP_SMP)
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot

+ 2 - 1
arch/arm/boot/compressed/Makefile

@@ -116,7 +116,8 @@ targets       := vmlinux vmlinux.lds \
 
 # Make sure files are removed during clean
 extra-y       += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
-		 lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
+		 lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
+		 hyp-stub.S
 
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)

+ 1 - 1
arch/arm/boot/dts/exynos5250-pinctrl.dtsi

@@ -763,7 +763,7 @@
 		};
 	};
 
-	pinctrl@03680000 {
+	pinctrl@03860000 {
 		gpz: gpz {
 			gpio-controller;
 			#gpio-cells = <2>;

+ 2 - 2
arch/arm/boot/dts/exynos5250.dtsi

@@ -161,9 +161,9 @@
 		interrupts = <0 50 0>;
 	};
 
-	pinctrl_3: pinctrl@03680000 {
+	pinctrl_3: pinctrl@03860000 {
 		compatible = "samsung,exynos5250-pinctrl";
-		reg = <0x0368000 0x1000>;
+		reg = <0x03860000 0x1000>;
 		interrupts = <0 47 0>;
 	};
 

+ 1 - 3
arch/arm/include/asm/cacheflush.h

@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
 }
 
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
 
 #define flush_dcache_mmap_lock(mapping) \
 	spin_lock_irq(&(mapping)->tree_lock)

+ 4 - 0
arch/arm/kernel/machine_kexec.c

@@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image)
 	unsigned long reboot_code_buffer_phys;
 	void *reboot_code_buffer;
 
+	if (num_online_cpus() > 1) {
+		pr_err("kexec: error: multiple CPUs still online\n");
+		return;
+	}
 
 	page_list = image->head & PAGE_MASK;
 

+ 37 - 6
arch/arm/kernel/process.c

@@ -184,30 +184,61 @@ int __init reboot_setup(char *str)
 
 __setup("reboot=", reboot_setup);
 
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in disable_nonboot_cpus() to achieve this.
+ */
 void machine_shutdown(void)
 {
-#ifdef CONFIG_SMP
-	smp_send_stop();
-#endif
+	disable_nonboot_cpus();
 }
 
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
 void machine_halt(void)
 {
-	machine_shutdown();
+	smp_send_stop();
+
 	local_irq_disable();
 	while (1);
 }
 
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
 void machine_power_off(void)
 {
-	machine_shutdown();
+	smp_send_stop();
+
 	if (pm_power_off)
 		pm_power_off();
 }
 
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with a single CPU can
+ * use soft_restart() as their machine descriptor's .restart hook, since that
+ * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
 void machine_restart(char *cmd)
 {
-	machine_shutdown();
+	smp_send_stop();
 
 	arm_pm_restart(reboot_mode, cmd);
 

+ 0 - 13
arch/arm/kernel/smp.c

@@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu)
 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-static void smp_kill_cpus(cpumask_t *mask)
-{
-	unsigned int cpu;
-	for_each_cpu(cpu, mask)
-		platform_cpu_kill(cpu);
-}
-#else
-static void smp_kill_cpus(cpumask_t *mask) { }
-#endif
-
 void smp_send_stop(void)
 {
 	unsigned long timeout;
@@ -679,8 +668,6 @@ void smp_send_stop(void)
 
 	if (num_online_cpus() > 1)
 		pr_warning("SMP: failed to stop secondary CPUs\n");
-
-	smp_kill_cpus(&mask);
 }
 
 /*

+ 8 - 0
arch/arm/mm/cache-v7.S

@@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis)
 	mrc	p15, 1, r0, c0, c0, 1		@ read clidr, r0 = clidr
 	ALT_SMP(ands	r3, r0, #(7 << 21))	@ extract LoUIS from clidr
 	ALT_UP(ands	r3, r0, #(7 << 27))	@ extract LoUU from clidr
+#ifdef CONFIG_ARM_ERRATA_643719
+	ALT_SMP(mrceq	p15, 0, r2, c0, c0, 0)	@ read main ID register
+	ALT_UP(moveq	pc, lr)			@ LoUU is zero, so nothing to do
+	ldreq	r1, =0x410fc090                 @ ID of ARM Cortex A9 r0p?
+	biceq	r2, r2, #0x0000000f             @ clear minor revision number
+	teqeq	r2, r1                          @ test for errata affected core and if so...
+	orreqs	r3, #(1 << 21)			@   fix LoUIS value (and set flags state to 'ne')
+#endif
 	ALT_SMP(mov	r3, r3, lsr #20)	@ r3 = LoUIS * 2
 	ALT_UP(mov	r3, r3, lsr #26)	@ r3 = LoUU * 2
 	moveq	pc, lr				@ return if level == 0

+ 33 - 0
arch/arm/mm/flush.c

@@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page)
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
+/*
+ * Ensure cache coherency for the kernel mapping of this page. We can
+ * assume that the page is pinned via kmap.
+ *
+ * If the page only exists in the page cache and there are no user
+ * space mappings, this is a no-op since the page was already marked
+ * dirty at creation.  Otherwise, we need to flush the dirty kernel
+ * cache lines directly.
+ */
+void flush_kernel_dcache_page(struct page *page)
+{
+	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
+		struct address_space *mapping;
+
+		mapping = page_mapping(page);
+
+		if (!mapping || mapping_mapped(mapping)) {
+			void *addr;
+
+			addr = page_address(page);
+			/*
+			 * kmap_atomic() doesn't set the page virtual
+			 * address for highmem pages, and
+			 * kunmap_atomic() takes care of cache
+			 * flushing already.
+			 */
+			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
+				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
+		}
+	}
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
 /*
  * Flush an anonymous page so that users of get_user_pages()
  * can safely access the data.  The expected sequence is:

+ 5 - 3
arch/arm/mm/mmu.c

@@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
 			unsigned long end, phys_addr_t phys,
 			const struct mem_type *type)
 {
+	pmd_t *p = pmd;
+
 #ifndef CONFIG_ARM_LPAE
 	/*
 	 * In classic MMU format, puds and pmds are folded in to
@@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr,
 		phys += SECTION_SIZE;
 	} while (pmd++, addr += SECTION_SIZE, addr != end);
 
-	flush_pmd_entry(pmd);
+	flush_pmd_entry(p);
 }
 
 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
@@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 		 */
 		if (type->prot_sect &&
 				((addr | next | phys) & ~SECTION_MASK) == 0) {
-			map_init_section(pmd, addr, next, phys, type);
+			__map_init_section(pmd, addr, next, phys, type);
 		} else {
 			alloc_init_pte(pmd, addr, next,
 						__phys_to_pfn(phys), type);

+ 2 - 2
arch/arm/mm/proc-v7.S

@@ -409,8 +409,8 @@ __v7_ca9mp_proc_info:
 	 */
 	.type   __v7_pj4b_proc_info, #object
 __v7_pj4b_proc_info:
-	.long	0x562f5840
-	.long	0xfffffff0
+	.long	0x560f5800
+	.long	0xff0fff00
 	__v7_proc __v7_pj4b_setup
 	.size	__v7_pj4b_proc_info, . - __v7_pj4b_proc_info
 

+ 1 - 0
arch/arm64/kernel/perf_event.c

@@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
 		return;
 	}
 
+	perf_callchain_store(entry, regs->pc);
 	tail = (struct frame_tail __user *)regs->regs[29];
 
 	while (entry->nr < PERF_MAX_STACK_DEPTH &&

+ 1 - 0
arch/ia64/include/asm/irqflags.h

@@ -11,6 +11,7 @@
 #define _ASM_IA64_IRQFLAGS_H
 
 #include <asm/pal.h>
+#include <asm/kregs.h>
 
 #ifdef CONFIG_IA64_DEBUG_IRQ
 extern unsigned long last_cli_ip;

+ 1 - 0
arch/metag/include/asm/hugetlb.h

@@ -2,6 +2,7 @@
 #define _ASM_METAG_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,

+ 2 - 3
arch/mn10300/include/asm/irqflags.h

@@ -13,9 +13,8 @@
 #define _ASM_IRQFLAGS_H
 
 #include <asm/cpu-regs.h>
-#ifndef __ASSEMBLY__
-#include <linux/smp.h>
-#endif
+/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
+#include <asm/smp.h>
 
 /*
  * interrupt control

+ 3 - 1
arch/mn10300/include/asm/smp.h

@@ -24,6 +24,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/threads.h>
 #include <linux/cpumask.h>
+#include <linux/thread_info.h>
 #endif
 
 #ifdef CONFIG_SMP
@@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map;
 extern void smp_init_cpus(void);
 extern void smp_cache_interrupt(void);
 extern void send_IPI_allbutself(int irq);
-extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu);
 #ifndef __ASSEMBLY__
 
 static inline void smp_init_cpus(void) {}
+#define raw_smp_processor_id() 0
 
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_SMP */

+ 2 - 2
arch/parisc/include/asm/mmzone.h

@@ -27,7 +27,7 @@ extern struct node_map_data node_data[];
 
 #define PFNNID_SHIFT (30 - PAGE_SHIFT)
 #define PFNNID_MAP_MAX  512     /* support 512GB */
-extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
+extern signed char pfnnid_map[PFNNID_MAP_MAX];
 
 #ifndef CONFIG_64BIT
 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
@@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn)
 	i = pfn >> PFNNID_SHIFT;
 	BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
 
-	return (int)pfnnid_map[i];
+	return pfnnid_map[i];
 }
 
 static inline int pfn_valid(int pfn)

+ 5 - 0
arch/parisc/include/asm/pci.h

@@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 	return channel ? 15 : 14;
 }
 
+#define HAVE_PCI_MMAP
+
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+	enum pci_mmap_state mmap_state, int write_combine);
+
 #endif /* __ASM_PARISC_PCI_H */

+ 1 - 0
arch/parisc/kernel/hardware.c

@@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = {
 	{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
 	{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
 	{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
+	{HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
 	{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
 	{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
 	{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 

+ 38 - 38
arch/parisc/kernel/pacache.S

@@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm)
 #endif
 
 	ldil		L%dcache_stride, %r1
-	ldw		R%dcache_stride(%r1), %r1
+	ldw		R%dcache_stride(%r1), r31
 
 #ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
@@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm)
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 #endif
 	add		%r28, %r25, %r25
-	sub		%r25, %r1, %r25
-
-
-1:      fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
+	sub		%r25, r31, %r25
+
+
+1:      fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
 	cmpb,COND(<<)		%r28, %r25,1b
-	fdc,m		%r1(%r28)
+	fdc,m		r31(%r28)
 
 	sync
 
@@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm)
 #endif
 
 	ldil		L%icache_stride, %r1
-	ldw		R%icache_stride(%r1), %r1
+	ldw		R%icache_stride(%r1), %r31
 
 #ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
@@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm)
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 #endif
 	add		%r28, %r25, %r25
-	sub		%r25, %r1, %r25
+	sub		%r25, %r31, %r25
 
 
 	/* fic only has the type 26 form on PA1.1, requiring an
 	 * explicit space specification, so use %sr4 */
-1:      fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
+1:      fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
 	cmpb,COND(<<)	%r28, %r25,1b
-	fic,m		%r1(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
 
 	sync
 

+ 27 - 0
arch/parisc/kernel/pci.c

@@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 }
 
 
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+			enum pci_mmap_state mmap_state, int write_combine)
+{
+	unsigned long prot;
+
+	/*
+	 * I/O space can be accessed via normal processor loads and stores on
+	 * this platform but for now we elect not to do this and portable
+	 * drivers should not do this anyway.
+	 */
+	if (mmap_state == pci_mmap_io)
+		return -EINVAL;
+
+	if (write_combine)
+		return -EINVAL;
+
+	/*
+	 * Ignore write-combine; for now only return uncached mappings.
+	 */
+	prot = pgprot_val(vma->vm_page_prot);
+	prot |= _PAGE_NO_CACHE;
+	vma->vm_page_prot = __pgprot(prot);
+
+	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+		vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
 /*
  * A driver is enabling the device.  We make sure that all the appropriate
  * bits are set to allow the device to operate as the driver is expecting.

+ 1 - 1
arch/parisc/mm/init.c

@@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt
 
 #ifdef CONFIG_DISCONTIGMEM
 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
 #endif
 
 static struct resource data_resource = {

+ 2 - 1
arch/powerpc/kvm/booke.c

@@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 		ret = s;
 		goto out;
 	}
-	kvmppc_lazy_ee_enable();
 
 	kvm_guest_enter();
 
@@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	kvmppc_load_guest_fp(vcpu);
 #endif
 
+	kvmppc_lazy_ee_enable();
+
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
 	/* No need for kvm_guest_exit. It's done in handle_exit.

+ 7 - 1
arch/powerpc/mm/hugetlbpage.c

@@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 	do {
 		pmd = pmd_offset(pud, addr);
 		next = pmd_addr_end(addr, end);
-		if (pmd_none_or_clear_bad(pmd))
+		if (!is_hugepd(pmd)) {
+			/*
+			 * if it is not hugepd pointer, we should already find
+			 * it cleared.
+			 */
+			WARN_ON(!pmd_none_or_clear_bad(pmd));
 			continue;
+		}
 #ifdef CONFIG_PPC_FSL_BOOK3E
 		/*
 		 * Increment next by the size of the huge mapping since

+ 1 - 0
arch/sparc/include/asm/Kbuild

@@ -6,6 +6,7 @@ generic-y += cputime.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h
+generic-y += linkage.h
 generic-y += local64.h
 generic-y += mutex.h
 generic-y += irq_regs.h

+ 1 - 1
arch/sparc/include/asm/leon.h

@@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
 
 #ifdef CONFIG_SMP
 # define LEON3_IRQ_IPI_DEFAULT		13
-# define LEON3_IRQ_TICKER		(leon3_ticker_irq)
+# define LEON3_IRQ_TICKER		(leon3_gptimer_irq)
 # define LEON3_IRQ_CROSS_CALL		15
 #endif
 

+ 1 - 0
arch/sparc/include/asm/leon_amba.h

@@ -47,6 +47,7 @@ struct amba_prom_registers {
 #define LEON3_GPTIMER_LD 4
 #define LEON3_GPTIMER_IRQEN 8
 #define LEON3_GPTIMER_SEPIRQ 8
+#define LEON3_GPTIMER_TIMERS 0x7
 
 #define LEON23_REG_TIMER_CONTROL_EN    0x00000001 /* 1 = enable counting */
 /* 0 = hold scalar and counter */

+ 0 - 6
arch/sparc/include/asm/linkage.h

@@ -1,6 +0,0 @@
-#ifndef __ASM_LINKAGE_H
-#define __ASM_LINKAGE_H
-
-/* Nothing to see here... */
-
-#endif

+ 2 - 1
arch/sparc/kernel/ds.c

@@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command)
 		unsigned long len;
 
 		strcpy(full_boot_str, "boot ");
-		strcpy(full_boot_str + strlen("boot "), boot_command);
+		strlcpy(full_boot_str + strlen("boot "), boot_command,
+			sizeof(full_boot_str + strlen("boot ")));
 		len = strlen(full_boot_str);
 
 		if (reboot_data_supported) {

+ 17 - 37
arch/sparc/kernel/leon_kernel.c

@@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock);
 
 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
 unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
-int leon3_ticker_irq; /* Timer ticker IRQ */
 unsigned int sparc_leon_eirq;
 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
 
 	leon_clear_profile_irq(cpu);
 
+	if (cpu == boot_cpu_id)
+		timer_interrupt(irq, NULL);
+
 	ce = &per_cpu(sparc32_clockevent, cpu);
 
 	irq_enter();
@@ -299,6 +301,7 @@ void __init leon_init_timers(void)
 	int icsel;
 	int ampopts;
 	int err;
+	u32 config;
 
 	sparc_config.get_cycles_offset = leon_cycles_offset;
 	sparc_config.cs_period = 1000000 / HZ;
@@ -377,23 +380,6 @@ void __init leon_init_timers(void)
 	LEON3_BYPASS_STORE_PA(
 			&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
 
-#ifdef CONFIG_SMP
-	leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
-
-	if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
-	      (1<<LEON3_GPTIMER_SEPIRQ))) {
-		printk(KERN_ERR "timer not configured with separate irqs\n");
-		BUG();
-	}
-
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
-				0);
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
-				(((1000000/HZ) - 1)));
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
-				0);
-#endif
-
 	/*
 	 * The IRQ controller may (if implemented) consist of multiple
 	 * IRQ controllers, each mapped on a 4Kb boundary.
@@ -416,13 +402,6 @@ void __init leon_init_timers(void)
 	if (eirq != 0)
 		leon_eirq_setup(eirq);
 
-	irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
-	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
-	if (err) {
-		printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
-		prom_halt();
-	}
-
 #ifdef CONFIG_SMP
 	{
 		unsigned long flags;
@@ -439,30 +418,31 @@ void __init leon_init_timers(void)
 	}
 #endif
 
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
-			      LEON3_GPTIMER_EN |
-			      LEON3_GPTIMER_RL |
-			      LEON3_GPTIMER_LD |
-			      LEON3_GPTIMER_IRQEN);
+	config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
+	if (config & (1 << LEON3_GPTIMER_SEPIRQ))
+		leon3_gptimer_irq += leon3_gptimer_idx;
+	else if ((config & LEON3_GPTIMER_TIMERS) > 1)
+		pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
 
 #ifdef CONFIG_SMP
 	/* Install per-cpu IRQ handler for broadcasted ticker */
-	irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
+	irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
 				    "per-cpu", 0);
 	err = request_irq(irq, leon_percpu_timer_ce_interrupt,
-			  IRQF_PERCPU | IRQF_TIMER, "ticker",
-			  NULL);
+			  IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
+#else
+	irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
+	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
+#endif
 	if (err) {
-		printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
+		pr_err("Unable to attach timer IRQ%d\n", irq);
 		prom_halt();
 	}
-
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
 			      LEON3_GPTIMER_EN |
 			      LEON3_GPTIMER_RL |
 			      LEON3_GPTIMER_LD |
 			      LEON3_GPTIMER_IRQEN);
-#endif
 	return;
 bad:
 	printk(KERN_ERR "No Timer/irqctrl found\n");

+ 3 - 5
arch/sparc/kernel/leon_pci_grpci1.c

@@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev)
 
 	/* find device register base address */
 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
-	regs = devm_request_and_ioremap(&ofdev->dev, res);
-	if (!regs) {
-		dev_err(&ofdev->dev, "io-regs mapping failed\n");
-		return -EADDRNOTAVAIL;
-	}
+	regs = devm_ioremap_resource(&ofdev->dev, res);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	/*
 	 * check that we're in Host Slot and that we can act as a Host Bridge

+ 7 - 0
arch/sparc/kernel/leon_pmc.c

@@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void)
 	 * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
 	 */
 	register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+
+	/* Interrupts need to be enabled to not hang the CPU */
+	local_irq_enable();
+
 	__asm__ __volatile__ (
 		"wr	%%g0, %%asr19\n"
 		"lda	[%0] %1, %%g0\n"
@@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void)
  */
 void pmc_leon_idle(void)
 {
+	/* Interrupts need to be enabled to not hang the CPU */
+	local_irq_enable();
+
 	/* For systems without power-down, this will be no-op */
 	__asm__ __volatile__ ("wr	%g0, %asr19\n\t");
 }

+ 1 - 1
arch/sparc/kernel/setup_32.c

@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
 
 	/* Initialize PROM console and command line. */
 	*cmdline_p = prom_getbootargs();
-	strcpy(boot_command_line, *cmdline_p);
+	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
 	parse_early_param();
 
 	boot_flags_init(*cmdline_p);

+ 1 - 1
arch/sparc/kernel/setup_64.c

@@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p)
 {
 	/* Initialize PROM console and command line. */
 	*cmdline_p = prom_getbootargs();
-	strcpy(boot_command_line, *cmdline_p);
+	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
 	parse_early_param();
 
 	boot_flags_init(*cmdline_p);

+ 8 - 1
arch/sparc/mm/init_64.c

@@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
 		m->size = *val;
 		val = mdesc_get_property(md, node,
 					 "address-congruence-offset", NULL);
-		m->offset = *val;
+
+		/* The address-congruence-offset property is optional.
+		 * Explicity zero it be identifty this.
+		 */
+		if (val)
+			m->offset = *val;
+		else
+			m->offset = 0UL;
 
 		numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
 			count - 1, m->base, m->size, m->offset);

+ 1 - 1
arch/sparc/mm/tlb.c

@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
 	}
 
 	if (!tb->active) {
-		global_flush_tlb_page(mm, vaddr);
 		flush_tsb_user_page(mm, vaddr);
+		global_flush_tlb_page(mm, vaddr);
 		goto out;
 	}
 

+ 7 - 5
arch/sparc/prom/bootstr_32.c

@@ -23,23 +23,25 @@ prom_getbootargs(void)
 		return barg_buf;
 	}
 
-	switch(prom_vers) {
+	switch (prom_vers) {
 	case PROM_V0:
 		cp = barg_buf;
 		/* Start from 1 and go over fd(0,0,0)kernel */
-		for(iter = 1; iter < 8; iter++) {
+		for (iter = 1; iter < 8; iter++) {
 			arg = (*(romvec->pv_v0bootargs))->argv[iter];
 			if (arg == NULL)
 				break;
-			while(*arg != 0) {
+			while (*arg != 0) {
 				/* Leave place for space and null. */
-				if(cp >= barg_buf + BARG_LEN-2){
+				if (cp >= barg_buf + BARG_LEN - 2)
 					/* We might issue a warning here. */
 					break;
-				}
 				*cp++ = *arg++;
 			}
 			*cp++ = ' ';
+			if (cp >= barg_buf + BARG_LEN - 1)
+				/* We might issue a warning here. */
+				break;
 		}
 		*cp = 0;
 		break;

+ 8 - 8
arch/sparc/prom/tree_64.c

@@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node)
 	return prom_node_to_node("child", node);
 }
 
-inline phandle prom_getchild(phandle node)
+phandle prom_getchild(phandle node)
 {
 	phandle cnode;
 
@@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node)
 	return prom_node_to_node(prom_peer_name, node);
 }
 
-inline phandle prom_getsibling(phandle node)
+phandle prom_getsibling(phandle node)
 {
 	phandle sibnode;
 
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
 /* Return the length in bytes of property 'prop' at node 'node'.
  * Return -1 on error.
  */
-inline int prom_getproplen(phandle node, const char *prop)
+int prom_getproplen(phandle node, const char *prop)
 {
 	unsigned long args[6];
 
@@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen);
  * 'buffer' which has a size of 'bufsize'.  If the acquisition
  * was successful the length will be returned, else -1 is returned.
  */
-inline int prom_getproperty(phandle node, const char *prop,
-			    char *buffer, int bufsize)
+int prom_getproperty(phandle node, const char *prop,
+		     char *buffer, int bufsize)
 {
 	unsigned long args[8];
 	int plen;
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
 /* Acquire an integer property and return its value.  Returns -1
  * on failure.
  */
-inline int prom_getint(phandle node, const char *prop)
+int prom_getint(phandle node, const char *prop)
 {
 	int intprop;
 
@@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
  */
-inline char *prom_firstprop(phandle node, char *buffer)
+char *prom_firstprop(phandle node, char *buffer)
 {
 	unsigned long args[7];
 
@@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
  * at node 'node' .  Returns NULL string if no more
  * property types for this node.
  */
-inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
+char *prom_nextprop(phandle node, const char *oprop, char *buffer)
 {
 	unsigned long args[7];
 	char buf[32];

+ 2 - 0
arch/tile/lib/exports.c

@@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
 EXPORT_SYMBOL(__ashrdi3);
 uint64_t __ashldi3(uint64_t, unsigned int);
 EXPORT_SYMBOL(__ashldi3);
+int __ffsdi2(uint64_t);
+EXPORT_SYMBOL(__ffsdi2);
 #endif

+ 1 - 1
arch/um/drivers/mconsole_kern.c

@@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req)
 	}
 
 	do {
-		loff_t pos;
+		loff_t pos = file->f_pos;
 		mm_segment_t old_fs = get_fs();
 		set_fs(KERNEL_DS);
 		len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);

+ 1 - 0
arch/x86/Kconfig

@@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
 config IA32_EMULATION
 	bool "IA32 Emulation"
 	depends on X86_64
+	select BINFMT_ELF
 	select COMPAT_BINFMT_ELF
 	select HAVE_UID16
 	---help---

+ 32 - 16
arch/x86/crypto/aesni-intel_asm.S

@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
 	addq %rcx, KEYP
 
 	movdqa IV, STATE1
-	pxor 0x00(INP), STATE1
+	movdqu 0x00(INP), INC
+	pxor INC, STATE1
 	movdqu IV, 0x00(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE2
-	pxor 0x10(INP), STATE2
+	movdqu 0x10(INP), INC
+	pxor INC, STATE2
 	movdqu IV, 0x10(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE3
-	pxor 0x20(INP), STATE3
+	movdqu 0x20(INP), INC
+	pxor INC, STATE3
 	movdqu IV, 0x20(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE4
-	pxor 0x30(INP), STATE4
+	movdqu 0x30(INP), INC
+	pxor INC, STATE4
 	movdqu IV, 0x30(OUTP)
 
 	call *%r11
 
-	pxor 0x00(OUTP), STATE1
+	movdqu 0x00(OUTP), INC
+	pxor INC, STATE1
 	movdqu STATE1, 0x00(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE1
-	pxor 0x40(INP), STATE1
+	movdqu 0x40(INP), INC
+	pxor INC, STATE1
 	movdqu IV, 0x40(OUTP)
 
-	pxor 0x10(OUTP), STATE2
+	movdqu 0x10(OUTP), INC
+	pxor INC, STATE2
 	movdqu STATE2, 0x10(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE2
-	pxor 0x50(INP), STATE2
+	movdqu 0x50(INP), INC
+	pxor INC, STATE2
 	movdqu IV, 0x50(OUTP)
 
-	pxor 0x20(OUTP), STATE3
+	movdqu 0x20(OUTP), INC
+	pxor INC, STATE3
 	movdqu STATE3, 0x20(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE3
-	pxor 0x60(INP), STATE3
+	movdqu 0x60(INP), INC
+	pxor INC, STATE3
 	movdqu IV, 0x60(OUTP)
 
-	pxor 0x30(OUTP), STATE4
+	movdqu 0x30(OUTP), INC
+	pxor INC, STATE4
 	movdqu STATE4, 0x30(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE4
-	pxor 0x70(INP), STATE4
+	movdqu 0x70(INP), INC
+	pxor INC, STATE4
 	movdqu IV, 0x70(OUTP)
 
 	_aesni_gf128mul_x_ble()
@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
 
 	call *%r11
 
-	pxor 0x40(OUTP), STATE1
+	movdqu 0x40(OUTP), INC
+	pxor INC, STATE1
 	movdqu STATE1, 0x40(OUTP)
 
-	pxor 0x50(OUTP), STATE2
+	movdqu 0x50(OUTP), INC
+	pxor INC, STATE2
 	movdqu STATE2, 0x50(OUTP)
 
-	pxor 0x60(OUTP), STATE3
+	movdqu 0x60(OUTP), INC
+	pxor INC, STATE3
 	movdqu STATE3, 0x60(OUTP)
 
-	pxor 0x70(OUTP), STATE4
+	movdqu 0x70(OUTP), INC
+	pxor INC, STATE4
 	movdqu STATE4, 0x70(OUTP)
 
 	ret

+ 1 - 1
arch/x86/ia32/ia32_aout.c

@@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
 	/* struct user */
 	DUMP_WRITE(&dump, sizeof(dump));
 	/* Now dump all of the user data.  Include malloced stuff as well */
-	DUMP_SEEK(PAGE_SIZE);
+	DUMP_SEEK(PAGE_SIZE - sizeof(dump));
 	/* now we start writing out the user space info */
 	set_fs(USER_DS);
 	/* Dump the data area */

+ 5 - 0
arch/x86/include/asm/irq.h

@@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
 
 extern void init_ISA_irqs(void);
 
+#ifdef CONFIG_X86_LOCAL_APIC
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
 #endif /* _ASM_X86_IRQ_H */

+ 2 - 2
arch/x86/include/asm/microcode.h

@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
 #ifdef CONFIG_MICROCODE_EARLY
 #define MAX_UCODE_COUNT 128
 extern void __init load_ucode_bsp(void);
-extern __init void load_ucode_ap(void);
+extern void __cpuinit load_ucode_ap(void);
 extern int __init save_microcode_in_initrd(void);
 #else
 static inline void __init load_ucode_bsp(void) {}
-static inline __init void load_ucode_ap(void) {}
+static inline void __cpuinit load_ucode_ap(void) {}
 static inline int __init save_microcode_in_initrd(void)
 {
 	return 0;

+ 1 - 3
arch/x86/include/asm/nmi.h

@@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
 			void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
-void arch_trigger_all_cpu_backtrace(void);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-#endif
+#endif /* CONFIG_X86_LOCAL_APIC */
 
 #define NMI_FLAG_FIRST	1
 

+ 1 - 0
arch/x86/kernel/apic/hw_nmi.c

@@ -9,6 +9,7 @@
  *
  */
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 #include <linux/cpumask.h>
 #include <linux/kdebug.h>

+ 4 - 4
arch/x86/kernel/cpu/mtrr/cleanup.c

@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
 	if (mtrr_tom2)
 		x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
 
-	nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
 	/*
 	 * [0, 1M) should always be covered by var mtrr with WB
 	 * and fixed mtrrs should take effect before var mtrr for it:
 	 */
-	nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
+	nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
 					1ULL<<(20 - PAGE_SHIFT));
-	/* Sort the ranges: */
-	sort_range(range, nr_range);
+	/* add from var mtrr at last */
+	nr_range = x86_get_mtrr_mem_range(range, nr_range,
+					  x_remove_base, x_remove_size);
 
 	range_sums = sum_ranges(range, nr_range);
 	printk(KERN_INFO "total RAM covered: %ldM\n",

+ 1 - 1
arch/x86/kernel/cpu/perf_event_intel.c

@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
-	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 	EVENT_EXTRA_END
 };
 
 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 	EVENT_EXTRA_END
 };
 

+ 1 - 0
arch/x86/kernel/kvmclock.c

@@ -242,6 +242,7 @@ void __init kvmclock_init(void)
 	if (!mem)
 		return;
 	hv_clock = __va(mem);
+	memset(hv_clock, 0, size);
 
 	if (kvm_register_clock("boot clock")) {
 		hv_clock = NULL;

+ 0 - 12
arch/x86/kernel/process.c

@@ -277,18 +277,6 @@ void exit_idle(void)
 }
 #endif
 
-void arch_cpu_idle_prepare(void)
-{
-	/*
-	 * If we're the non-boot CPU, nothing set the stack canary up
-	 * for us.  CPU0 already has it initialized but no harm in
-	 * doing it again.  This is a good place for updating it, as
-	 * we wont ever return from this function (so the invalid
-	 * canaries already on the stack wont ever trigger).
-	 */
-	boot_init_stack_canary();
-}
-
 void arch_cpu_idle_enter(void)
 {
 	local_touch_nmi();

+ 4 - 4
arch/x86/kernel/smpboot.c

@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 
 void __cpuinit set_cpu_sibling_map(int cpu)
 {
-	bool has_mc = boot_cpu_data.x86_max_cores > 1;
 	bool has_smt = smp_num_siblings > 1;
+	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	struct cpuinfo_x86 *o;
 	int i;
 
 	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 
-	if (!has_smt && !has_mc) {
+	if (!has_mp) {
 		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
 		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 		cpumask_set_cpu(cpu, cpu_core_mask(cpu));
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
 		if ((i == cpu) || (has_smt && match_smt(c, o)))
 			link_mask(sibling, cpu, i);
 
-		if ((i == cpu) || (has_mc && match_llc(c, o)))
+		if ((i == cpu) || (has_mp && match_llc(c, o)))
 			link_mask(llc_shared, cpu, i);
 
 	}
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
 	for_each_cpu(i, cpu_sibling_setup_mask) {
 		o = &cpu_data(i);
 
-		if ((i == cpu) || (has_mc && match_mc(c, o))) {
+		if ((i == cpu) || (has_mp && match_mc(c, o))) {
 			link_mask(core, cpu, i);
 
 			/*

+ 2 - 3
arch/x86/kvm/x86.c

@@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 	if (index != XCR_XFEATURE_ENABLED_MASK)
 		return 1;
 	xcr0 = xcr;
-	if (kvm_x86_ops->get_cpl(vcpu) != 0)
-		return 1;
 	if (!(xcr0 & XSTATE_FP))
 		return 1;
 	if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
-	if (__kvm_set_xcr(vcpu, index, xcr)) {
+	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
+	    __kvm_set_xcr(vcpu, index, xcr)) {
 		kvm_inject_gp(vcpu, 0);
 		return 1;
 	}

+ 6 - 1
arch/x86/platform/efi/efi.c

@@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
 		 * that by attempting to use more space than is available.
 		 */
 		unsigned long dummy_size = remaining_size + 1024;
-		void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
+		void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
+
+		if (!dummy)
+			return EFI_OUT_OF_RESOURCES;
 
 		status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
 					  EFI_VARIABLE_NON_VOLATILE |
@@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
 					 0, dummy);
 		}
 
+		kfree(dummy);
+
 		/*
 		 * The runtime code may now have triggered a garbage collection
 		 * run, so check the variable info again

+ 15 - 6
drivers/acpi/acpi_lpss.c

@@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
 	if (dev_desc->clk_required) {
 		ret = register_device_clock(adev, pdata);
 		if (ret) {
-			/*
-			 * Skip the device, but don't terminate the namespace
-			 * scan.
-			 */
-			kfree(pdata);
-			return 0;
+			/* Skip the device, but continue the namespace scan. */
+			ret = 0;
+			goto err_out;
 		}
 	}
 
+	/*
+	 * This works around a known issue in ACPI tables where LPSS devices
+	 * have _PS0 and _PS3 without _PSC (and no power resources), so
+	 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
+	 */
+	ret = acpi_device_fix_up_power(adev);
+	if (ret) {
+		/* Skip the device, but continue the namespace scan. */
+		ret = 0;
+		goto err_out;
+	}
+
 	adev->driver_data = pdata;
 	ret = acpi_create_platform_device(adev, id);
 	if (ret > 0)

+ 20 - 0
drivers/acpi/device_pm.c

@@ -290,6 +290,26 @@ int acpi_bus_init_power(struct acpi_device *device)
 	return 0;
 }
 
+/**
+ * acpi_device_fix_up_power - Force device with missing _PSC into D0.
+ * @device: Device object whose power state is to be fixed up.
+ *
+ * Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
+ * are assumed to be put into D0 by the BIOS.  However, in some cases that may
+ * not be the case and this function should be used then.
+ */
+int acpi_device_fix_up_power(struct acpi_device *device)
+{
+	int ret = 0;
+
+	if (!device->power.flags.power_resources
+	    && !device->power.flags.explicit_get
+	    && device->power.state == ACPI_STATE_D0)
+		ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
+
+	return ret;
+}
+
 int acpi_bus_update_power(acpi_handle handle, int *state_p)
 {
 	struct acpi_device *device;

+ 2 - 0
drivers/acpi/dock.c

@@ -868,8 +868,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
 	if (!count)
 		return -EINVAL;
 
+	acpi_scan_lock_acquire();
 	begin_undock(dock_station);
 	ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
+	acpi_scan_lock_release();
 	return ret ? ret: count;
 }
 static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);

+ 1 - 0
drivers/acpi/power.c

@@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle)
 				ACPI_STA_DEFAULT);
 	mutex_init(&resource->resource_lock);
 	INIT_LIST_HEAD(&resource->dependent);
+	INIT_LIST_HEAD(&resource->list_node);
 	resource->name = device->pnp.bus_id;
 	strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
 	strcpy(acpi_device_class(device), ACPI_POWER_CLASS);

+ 11 - 5
drivers/acpi/resource.c

@@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
 }
 
 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
-				     u8 triggering, u8 polarity, u8 shareable)
+				     u8 triggering, u8 polarity, u8 shareable,
+				     bool legacy)
 {
 	int irq, p, t;
 
@@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
 	 * In IO-APIC mode, use overrided attribute. Two reasons:
 	 * 1. BIOS bug in DSDT
 	 * 2. BIOS uses IO-APIC mode Interrupt Source Override
+	 *
+	 * We do this only if we are dealing with IRQ() or IRQNoFlags()
+	 * resource (the legacy ISA resources). With modern ACPI 5 devices
+	 * using extended IRQ descriptors we take the IRQ configuration
+	 * from _CRS directly.
 	 */
-	if (!acpi_get_override_irq(gsi, &t, &p)) {
+	if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
 		u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
 		u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
 
 		if (triggering != trig || polarity != pol) {
 			pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
-				   t ? "edge" : "level", p ? "low" : "high");
+				   t ? "level" : "edge", p ? "low" : "high");
 			triggering = trig;
 			polarity = pol;
 		}
@@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
 		}
 		acpi_dev_get_irqresource(res, irq->interrupts[index],
 					 irq->triggering, irq->polarity,
-					 irq->sharable);
+					 irq->sharable, true);
 		break;
 	case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
 		ext_irq = &ares->data.extended_irq;
@@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
 		}
 		acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
 					 ext_irq->triggering, ext_irq->polarity,
-					 ext_irq->sharable);
+					 ext_irq->sharable, false);
 		break;
 	default:
 		return false;

+ 18 - 9
drivers/base/firmware_class.c

@@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 {
 	struct firmware_buf *buf = fw_priv->buf;
 
+	/*
+	 * There is a small window in which user can write to 'loading'
+	 * between loading done and disappearance of 'loading'
+	 */
+	if (test_bit(FW_STATUS_DONE, &buf->status))
+		return;
+
 	set_bit(FW_STATUS_ABORT, &buf->status);
 	complete_all(&buf->completion);
+
+	/* avoid user action after loading abort */
+	fw_priv->buf = NULL;
 }
 
 #define is_fw_load_aborted(buf)	\
@@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev,
 				     struct device_attribute *attr, char *buf)
 {
 	struct firmware_priv *fw_priv = to_firmware_priv(dev);
-	int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+	int loading = 0;
+
+	mutex_lock(&fw_lock);
+	if (fw_priv->buf)
+		loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+	mutex_unlock(&fw_lock);
 
 	return sprintf(buf, "%d\n", loading);
 }
@@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev,
 				      const char *buf, size_t count)
 {
 	struct firmware_priv *fw_priv = to_firmware_priv(dev);
-	struct firmware_buf *fw_buf = fw_priv->buf;
+	struct firmware_buf *fw_buf;
 	int loading = simple_strtol(buf, NULL, 10);
 	int i;
 
 	mutex_lock(&fw_lock);
-
+	fw_buf = fw_priv->buf;
 	if (!fw_buf)
 		goto out;
 
@@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work)
 			struct firmware_priv, timeout_work.work);
 
 	mutex_lock(&fw_lock);
-	if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
-		mutex_unlock(&fw_lock);
-		return;
-	}
 	fw_load_abort(fw_priv);
 	mutex_unlock(&fw_lock);
 }
@@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
 
 	cancel_delayed_work_sync(&fw_priv->timeout_work);
 
-	fw_priv->buf = NULL;
-
 	device_remove_file(f_dev, &dev_attr_loading);
 err_del_bin_attr:
 	device_remove_bin_file(f_dev, &firmware_attr_data);

+ 5 - 1
drivers/block/rbd.c

@@ -1036,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
 	char *name;
 	u64 segment;
 	int ret;
+	char *name_format;
 
 	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
 	if (!name)
 		return NULL;
 	segment = offset >> rbd_dev->header.obj_order;
-	ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
+	name_format = "%s.%012llx";
+	if (rbd_dev->image_format == 2)
+		name_format = "%s.%016llx";
+	ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
 			rbd_dev->header.object_prefix, segment);
 	if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
 		pr_err("error formatting segment name for #%llu (%d)\n",

+ 1 - 0
drivers/clk/clk.c

@@ -1955,6 +1955,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 		/* XXX the notifier code should handle this better */
 		if (!cn->notifier_head.head) {
 			srcu_cleanup_notifier_head(&cn->notifier_head);
+			list_del(&cn->node);
 			kfree(cn);
 		}
 

+ 5 - 5
drivers/clk/samsung/clk-exynos5250.c

@@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
 
 /* list of all parent clock list */
 PNAME(mout_apll_p)	= { "fin_pll", "fout_apll", };
-PNAME(mout_cpu_p)	= { "mout_apll", "mout_mpll", };
+PNAME(mout_cpu_p)	= { "mout_apll", "sclk_mpll", };
 PNAME(mout_mpll_fout_p)	= { "fout_mplldiv2", "fout_mpll" };
 PNAME(mout_mpll_p)	= { "fin_pll", "mout_mpll_fout" };
 PNAME(mout_bpll_fout_p)	= { "fout_bplldiv2", "fout_bpll" };
@@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
 };
 
 struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
-	MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
-	MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
+	MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
+	MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
 	MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
-	MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
+	MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
 	MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
 	MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
 	MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
@@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
 	GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
 	GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
 	GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
-	GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0),
+	GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
 	GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
 	GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
 	GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),

+ 3 - 2
drivers/clk/samsung/clk-pll.c

@@ -111,7 +111,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
 				unsigned long parent_rate)
 {
 	struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
-	u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+	u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
+	s16 kdiv;
 	u64 fvco = parent_rate;
 
 	pll_con0 = __raw_readl(pll->con_reg);
@@ -119,7 +120,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
 	mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
 	pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
 	sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
-	kdiv = pll_con1 & PLL36XX_KDIV_MASK;
+	kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
 
 	fvco *= (mdiv << 16) + kdiv;
 	do_div(fvco, (pdiv << sdiv));

+ 1 - 1
drivers/clk/spear/spear3xx_clock.c

@@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
 	clk_register_clkdev(clk, NULL, "60100000.serial");
 }
 #else
-static inline void spear320_clk_init(void) { }
+static inline void spear320_clk_init(void __iomem *soc_config_base) { }
 #endif
 
 void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)

+ 6 - 5
drivers/clk/tegra/clk-tegra30.c

@@ -1598,6 +1598,12 @@ static void __init tegra30_periph_clk_init(void)
 	clk_register_clkdev(clk, "afi", "tegra-pcie");
 	clks[afi] = clk;
 
+	/* pciex */
+	clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
+				    74, &periph_u_regs, periph_clk_enb_refcnt);
+	clk_register_clkdev(clk, "pciex", "tegra-pcie");
+	clks[pciex] = clk;
+
 	/* kfuse */
 	clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
 				    TEGRA_PERIPH_ON_APB,
@@ -1716,11 +1722,6 @@ static void __init tegra30_fixed_clk_init(void)
 				1, 0, &cml_lock);
 	clk_register_clkdev(clk, "cml1", NULL);
 	clks[cml1] = clk;
-
-	/* pciex */
-	clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
-	clk_register_clkdev(clk, "pciex", NULL);
-	clks[pciex] = clk;
 }
 
 static void __init tegra30_osc_clk_init(void)

+ 1 - 2
drivers/gpu/drm/drm_prime.c

@@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 		if (ret)
 			return ERR_PTR(ret);
 	}
-	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
-			      0600);
+	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 

+ 10 - 3
drivers/gpu/drm/radeon/r600.c

@@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
 int r600_uvd_init(struct radeon_device *rdev)
 {
 	int i, j, r;
+	/* disable byte swapping */
+	u32 lmi_swap_cntl = 0;
+	u32 mp_swap_cntl = 0;
 
 	/* raise clocks while booting up the VCPU */
 	radeon_set_uvd_clocks(rdev, 53300, 40000);
@@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev)
 	WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
 			     (1 << 21) | (1 << 9) | (1 << 20));
 
-	/* disable byte swapping */
-	WREG32(UVD_LMI_SWAP_CNTL, 0);
-	WREG32(UVD_MP_SWAP_CNTL, 0);
+#ifdef __BIG_ENDIAN
+	/* swap (8 in 32) RB and IB */
+	lmi_swap_cntl = 0xa;
+	mp_swap_cntl = 0;
+#endif
+	WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+	WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
 
 	WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
 	WREG32(UVD_MPC_SET_MUXA1, 0x0);

+ 24 - 29
drivers/gpu/drm/radeon/radeon_device.c

@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
  */
 void radeon_wb_disable(struct radeon_device *rdev)
 {
-	int r;
-
-	if (rdev->wb.wb_obj) {
-		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-		if (unlikely(r != 0))
-			return;
-		radeon_bo_kunmap(rdev->wb.wb_obj);
-		radeon_bo_unpin(rdev->wb.wb_obj);
-		radeon_bo_unreserve(rdev->wb.wb_obj);
-	}
 	rdev->wb.enabled = false;
 }
 
@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
 {
 	radeon_wb_disable(rdev);
 	if (rdev->wb.wb_obj) {
+		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+			radeon_bo_kunmap(rdev->wb.wb_obj);
+			radeon_bo_unpin(rdev->wb.wb_obj);
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+		}
 		radeon_bo_unref(&rdev->wb.wb_obj);
 		rdev->wb.wb = NULL;
 		rdev->wb.wb_obj = NULL;
@@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev)
 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
 			return r;
 		}
-	}
-	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-	if (unlikely(r != 0)) {
-		radeon_wb_fini(rdev);
-		return r;
-	}
-	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-			  &rdev->wb.gpu_addr);
-	if (r) {
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+				&rdev->wb.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
 		radeon_bo_unreserve(rdev->wb.wb_obj);
-		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-		radeon_wb_fini(rdev);
-		return r;
-	}
-	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-	radeon_bo_unreserve(rdev->wb.wb_obj);
-	if (r) {
-		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
-		radeon_wb_fini(rdev);
-		return r;
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
 	}
 
 	/* clear wb memory */

+ 8 - 2
drivers/gpu/drm/radeon/radeon_fence.c

@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
 {
 	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
 	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-		*drv->cpu_addr = cpu_to_le32(seq);
+		if (drv->cpu_addr) {
+			*drv->cpu_addr = cpu_to_le32(seq);
+		}
 	} else {
 		WREG32(drv->scratch_reg, seq);
 	}
@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
 	u32 seq = 0;
 
 	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-		seq = le32_to_cpu(*drv->cpu_addr);
+		if (drv->cpu_addr) {
+			seq = le32_to_cpu(*drv->cpu_addr);
+		} else {
+			seq = lower_32_bits(atomic64_read(&drv->last_seq));
+		}
 	} else {
 		seq = RREG32(drv->scratch_reg);
 	}

+ 4 - 2
drivers/gpu/drm/radeon/radeon_gart.c

@@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 int radeon_vm_bo_rmv(struct radeon_device *rdev,
 		     struct radeon_bo_va *bo_va)
 {
-	int r;
+	int r = 0;
 
 	mutex_lock(&rdev->vm_manager.lock);
 	mutex_lock(&bo_va->vm->mutex);
-	r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+	if (bo_va->soffset) {
+		r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+	}
 	mutex_unlock(&rdev->vm_manager.lock);
 	list_del(&bo_va->vm_list);
 	mutex_unlock(&bo_va->vm->mutex);

+ 7 - 0
drivers/gpu/drm/radeon/radeon_ring.c

@@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
 		return -ENOMEM;
 	/* Align requested size with padding so unlock_commit can
 	 * pad safely */
+	radeon_ring_free_size(rdev, ring);
+	if (ring->ring_free_dw == (ring->ring_size / 4)) {
+		/* This is an empty ring update lockup info to avoid
+		 * false positive.
+		 */
+		radeon_ring_lockup_update(ring);
+	}
 	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
 	while (ndw > (ring->ring_free_dw - 1)) {
 		radeon_ring_free_size(rdev, ring);

+ 31 - 17
drivers/gpu/drm/radeon/radeon_uvd.c

@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
 	if (!r) {
 		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
 		radeon_bo_unpin(rdev->uvd.vcpu_bo);
+		rdev->uvd.cpu_addr = NULL;
+		if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
+			radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+		}
 		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+		if (rdev->uvd.cpu_addr) {
+			radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+		} else {
+			rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
+		}
 	}
 	return r;
 }
@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
 		return r;
 	}
 
+	/* Have been pin in cpu unmap unpin */
+	radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+	radeon_bo_unpin(rdev->uvd.vcpu_bo);
+
 	r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
 			  &rdev->uvd.gpu_addr);
 	if (r) {
@@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
 	}
 
 	/* stitch together an UVD create msg */
-	msg[0] = 0x00000de4;
-	msg[1] = 0x00000000;
-	msg[2] = handle;
-	msg[3] = 0x00000000;
-	msg[4] = 0x00000000;
-	msg[5] = 0x00000000;
-	msg[6] = 0x00000000;
-	msg[7] = 0x00000780;
-	msg[8] = 0x00000440;
-	msg[9] = 0x00000000;
-	msg[10] = 0x01b37000;
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000000);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	msg[4] = cpu_to_le32(0x00000000);
+	msg[5] = cpu_to_le32(0x00000000);
+	msg[6] = cpu_to_le32(0x00000000);
+	msg[7] = cpu_to_le32(0x00000780);
+	msg[8] = cpu_to_le32(0x00000440);
+	msg[9] = cpu_to_le32(0x00000000);
+	msg[10] = cpu_to_le32(0x01b37000);
 	for (i = 11; i < 1024; ++i)
-		msg[i] = 0x0;
+		msg[i] = cpu_to_le32(0x0);
 
 	radeon_bo_kunmap(bo);
 	radeon_bo_unreserve(bo);
@@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
 	}
 
 	/* stitch together an UVD destroy msg */
-	msg[0] = 0x00000de4;
-	msg[1] = 0x00000002;
-	msg[2] = handle;
-	msg[3] = 0x00000000;
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000002);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
 	for (i = 4; i < 1024; ++i)
-		msg[i] = 0x0;
+		msg[i] = cpu_to_le32(0x0);
 
 	radeon_bo_kunmap(bo);
 	radeon_bo_unreserve(bo);

+ 13 - 0
drivers/iommu/Kconfig

@@ -261,4 +261,17 @@ config SHMOBILE_IOMMU_L1SIZE
 	default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
 	default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
 
+config ARM_SMMU
+	bool "ARM Ltd. System MMU (SMMU) Support"
+	depends on ARM64 || (ARM_LPAE && OF)
+	select IOMMU_API
+	select ARM_DMA_USE_IOMMU if ARM
+	help
+	  Support for implementations of the ARM System MMU architecture
+	  versions 1 and 2. The driver supports both v7l and v8l table
+	  formats with 4k and 64k page sizes.
+
+	  Say Y here if your SoC includes an IOMMU device implementing
+	  the ARM SMMU architecture.
+
 endif # IOMMU_SUPPORT

+ 1 - 0
drivers/iommu/Makefile

@@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
 obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o

+ 54 - 25
drivers/iommu/amd_iommu.c

@@ -1497,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
 
 			/* Large PTE found which maps this address */
 			unmap_size = PTE_PAGE_SIZE(*pte);
+
+			/* Only unmap from the first pte in the page */
+			if ((unmap_size - 1) & bus_addr)
+				break;
 			count      = PAGE_SIZE_PTE_COUNT(unmap_size);
 			for (i = 0; i < count; i++)
 				pte[i] = 0ULL;
@@ -1506,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
 		unmapped += unmap_size;
 	}
 
-	BUG_ON(!is_power_of_2(unmapped));
+	BUG_ON(unmapped && !is_power_of_2(unmapped));
 
 	return unmapped;
 }
@@ -1906,34 +1910,59 @@ static void domain_id_free(int id)
 	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 }
 
+#define DEFINE_FREE_PT_FN(LVL, FN)				\
+static void free_pt_##LVL (unsigned long __pt)			\
+{								\
+	unsigned long p;					\
+	u64 *pt;						\
+	int i;							\
+								\
+	pt = (u64 *)__pt;					\
+								\
+	for (i = 0; i < 512; ++i) {				\
+		if (!IOMMU_PTE_PRESENT(pt[i]))			\
+			continue;				\
+								\
+		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
+		FN(p);						\
+	}							\
+	free_page((unsigned long)pt);				\
+}
+
+DEFINE_FREE_PT_FN(l2, free_page)
+DEFINE_FREE_PT_FN(l3, free_pt_l2)
+DEFINE_FREE_PT_FN(l4, free_pt_l3)
+DEFINE_FREE_PT_FN(l5, free_pt_l4)
+DEFINE_FREE_PT_FN(l6, free_pt_l5)
+
 static void free_pagetable(struct protection_domain *domain)
 {
-	int i, j;
-	u64 *p1, *p2, *p3;
-
-	p1 = domain->pt_root;
-
-	if (!p1)
-		return;
-
-	for (i = 0; i < 512; ++i) {
-		if (!IOMMU_PTE_PRESENT(p1[i]))
-			continue;
-
-		p2 = IOMMU_PTE_PAGE(p1[i]);
-		for (j = 0; j < 512; ++j) {
-			if (!IOMMU_PTE_PRESENT(p2[j]))
-				continue;
-			p3 = IOMMU_PTE_PAGE(p2[j]);
-			free_page((unsigned long)p3);
-		}
+	unsigned long root = (unsigned long)domain->pt_root;
 
-		free_page((unsigned long)p2);
+	switch (domain->mode) {
+	case PAGE_MODE_NONE:
+		break;
+	case PAGE_MODE_1_LEVEL:
+		free_page(root);
+		break;
+	case PAGE_MODE_2_LEVEL:
+		free_pt_l2(root);
+		break;
+	case PAGE_MODE_3_LEVEL:
+		free_pt_l3(root);
+		break;
+	case PAGE_MODE_4_LEVEL:
+		free_pt_l4(root);
+		break;
+	case PAGE_MODE_5_LEVEL:
+		free_pt_l5(root);
+		break;
+	case PAGE_MODE_6_LEVEL:
+		free_pt_l6(root);
+		break;
+	default:
+		BUG();
 	}
-
-	free_page((unsigned long)p1);
-
-	domain->pt_root = NULL;
 }
 
 static void free_gcr3_tbl_level1(u64 *tbl)

+ 1969 - 0
drivers/iommu/arm-smmu.c

@@ -0,0 +1,1969 @@
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This driver currently supports:
+ *	- SMMUv1 and v2 implementations
+ *	- Stream-matching and stream-indexing
+ *	- v7/v8 long-descriptor format
+ *	- Non-secure access to the SMMU
+ *	- 4k and 64k pages, with contiguous pte hints.
+ *	- Up to 39-bit addressing
+ *	- Context fault reporting
+ */
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/amba/bus.h>
+
+#include <asm/pgalloc.h>
+
+/* Maximum number of stream IDs assigned to a single device */
+#define MAX_MASTER_STREAMIDS		8
+
+/* Maximum number of context banks per SMMU */
+#define ARM_SMMU_MAX_CBS		128
+
+/* Maximum number of mapping groups per SMMU */
+#define ARM_SMMU_MAX_SMRS		128
+
+/* Number of VMIDs per SMMU */
+#define ARM_SMMU_NUM_VMIDS		256
+
+/* SMMU global address space */
+#define ARM_SMMU_GR0(smmu)		((smmu)->base)
+#define ARM_SMMU_GR1(smmu)		((smmu)->base + (smmu)->pagesize)
+
+/* Page table bits */
+#define ARM_SMMU_PTE_PAGE		(((pteval_t)3) << 0)
+#define ARM_SMMU_PTE_CONT		(((pteval_t)1) << 52)
+#define ARM_SMMU_PTE_AF			(((pteval_t)1) << 10)
+#define ARM_SMMU_PTE_SH_NS		(((pteval_t)0) << 8)
+#define ARM_SMMU_PTE_SH_OS		(((pteval_t)2) << 8)
+#define ARM_SMMU_PTE_SH_IS		(((pteval_t)3) << 8)
+
+#if PAGE_SIZE == SZ_4K
+#define ARM_SMMU_PTE_CONT_ENTRIES	16
+#elif PAGE_SIZE == SZ_64K
+#define ARM_SMMU_PTE_CONT_ENTRIES	32
+#else
+#define ARM_SMMU_PTE_CONT_ENTRIES	1
+#endif
+
+#define ARM_SMMU_PTE_CONT_SIZE		(PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
+#define ARM_SMMU_PTE_CONT_MASK		(~(ARM_SMMU_PTE_CONT_SIZE - 1))
+#define ARM_SMMU_PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(pte_t))
+
+/* Stage-1 PTE */
+#define ARM_SMMU_PTE_AP_UNPRIV		(((pteval_t)1) << 6)
+#define ARM_SMMU_PTE_AP_RDONLY		(((pteval_t)2) << 6)
+#define ARM_SMMU_PTE_ATTRINDX_SHIFT	2
+
+/* Stage-2 PTE */
+#define ARM_SMMU_PTE_HAP_FAULT		(((pteval_t)0) << 6)
+#define ARM_SMMU_PTE_HAP_READ		(((pteval_t)1) << 6)
+#define ARM_SMMU_PTE_HAP_WRITE		(((pteval_t)2) << 6)
+#define ARM_SMMU_PTE_MEMATTR_OIWB	(((pteval_t)0xf) << 2)
+#define ARM_SMMU_PTE_MEMATTR_NC		(((pteval_t)0x5) << 2)
+#define ARM_SMMU_PTE_MEMATTR_DEV	(((pteval_t)0x1) << 2)
+
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0		0x0
+#define sCR0_CLIENTPD			(1 << 0)
+#define sCR0_GFRE			(1 << 1)
+#define sCR0_GFIE			(1 << 2)
+#define sCR0_GCFGFRE			(1 << 4)
+#define sCR0_GCFGFIE			(1 << 5)
+#define sCR0_USFCFG			(1 << 10)
+#define sCR0_VMIDPNE			(1 << 11)
+#define sCR0_PTM			(1 << 12)
+#define sCR0_FB				(1 << 13)
+#define sCR0_BSU_SHIFT			14
+#define sCR0_BSU_MASK			0x3
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0		0x20
+#define ARM_SMMU_GR0_ID1		0x24
+#define ARM_SMMU_GR0_ID2		0x28
+#define ARM_SMMU_GR0_ID3		0x2c
+#define ARM_SMMU_GR0_ID4		0x30
+#define ARM_SMMU_GR0_ID5		0x34
+#define ARM_SMMU_GR0_ID6		0x38
+#define ARM_SMMU_GR0_ID7		0x3c
+#define ARM_SMMU_GR0_sGFSR		0x48
+#define ARM_SMMU_GR0_sGFSYNR0		0x50
+#define ARM_SMMU_GR0_sGFSYNR1		0x54
+#define ARM_SMMU_GR0_sGFSYNR2		0x58
+#define ARM_SMMU_GR0_PIDR0		0xfe0
+#define ARM_SMMU_GR0_PIDR1		0xfe4
+#define ARM_SMMU_GR0_PIDR2		0xfe8
+
+#define ID0_S1TS			(1 << 30)
+#define ID0_S2TS			(1 << 29)
+#define ID0_NTS				(1 << 28)
+#define ID0_SMS				(1 << 27)
+#define ID0_PTFS_SHIFT			24
+#define ID0_PTFS_MASK			0x2
+#define ID0_PTFS_V8_ONLY		0x2
+#define ID0_CTTW			(1 << 14)
+#define ID0_NUMIRPT_SHIFT		16
+#define ID0_NUMIRPT_MASK		0xff
+#define ID0_NUMSMRG_SHIFT		0
+#define ID0_NUMSMRG_MASK		0xff
+
+#define ID1_PAGESIZE			(1 << 31)
+#define ID1_NUMPAGENDXB_SHIFT		28
+#define ID1_NUMPAGENDXB_MASK		7
+#define ID1_NUMS2CB_SHIFT		16
+#define ID1_NUMS2CB_MASK		0xff
+#define ID1_NUMCB_SHIFT			0
+#define ID1_NUMCB_MASK			0xff
+
+#define ID2_OAS_SHIFT			4
+#define ID2_OAS_MASK			0xf
+#define ID2_IAS_SHIFT			0
+#define ID2_IAS_MASK			0xf
+#define ID2_UBS_SHIFT			8
+#define ID2_UBS_MASK			0xf
+#define ID2_PTFS_4K			(1 << 12)
+#define ID2_PTFS_16K			(1 << 13)
+#define ID2_PTFS_64K			(1 << 14)
+
+#define PIDR2_ARCH_SHIFT		4
+#define PIDR2_ARCH_MASK			0xf
+
+/* Global TLB invalidation */
+#define ARM_SMMU_GR0_STLBIALL		0x60
+#define ARM_SMMU_GR0_TLBIVMID		0x64
+#define ARM_SMMU_GR0_TLBIALLNSNH	0x68
+#define ARM_SMMU_GR0_TLBIALLH		0x6c
+#define ARM_SMMU_GR0_sTLBGSYNC		0x70
+#define ARM_SMMU_GR0_sTLBGSTATUS	0x74
+#define sTLBGSTATUS_GSACTIVE		(1 << 0)
+#define TLB_LOOP_TIMEOUT		1000000	/* 1s! */
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n)		(0x800 + ((n) << 2))
+#define SMR_VALID			(1 << 31)
+#define SMR_MASK_SHIFT			16
+#define SMR_MASK_MASK			0x7fff
+#define SMR_ID_SHIFT			0
+#define SMR_ID_MASK			0x7fff
+
+#define ARM_SMMU_GR0_S2CR(n)		(0xc00 + ((n) << 2))
+#define S2CR_CBNDX_SHIFT		0
+#define S2CR_CBNDX_MASK			0xff
+#define S2CR_TYPE_SHIFT			16
+#define S2CR_TYPE_MASK			0x3
+#define S2CR_TYPE_TRANS			(0 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_BYPASS		(1 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_FAULT			(2 << S2CR_TYPE_SHIFT)
+
+/* Context bank attribute registers */
+#define ARM_SMMU_GR1_CBAR(n)		(0x0 + ((n) << 2))
+#define CBAR_VMID_SHIFT			0
+#define CBAR_VMID_MASK			0xff
+#define CBAR_S1_MEMATTR_SHIFT		12
+#define CBAR_S1_MEMATTR_MASK		0xf
+#define CBAR_S1_MEMATTR_WB		0xf
+#define CBAR_TYPE_SHIFT			16
+#define CBAR_TYPE_MASK			0x3
+#define CBAR_TYPE_S2_TRANS		(0 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_BYPASS	(1 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_FAULT	(2 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_TRANS	(3 << CBAR_TYPE_SHIFT)
+#define CBAR_IRPTNDX_SHIFT		24
+#define CBAR_IRPTNDX_MASK		0xff
+
+#define ARM_SMMU_GR1_CBA2R(n)		(0x800 + ((n) << 2))
+#define CBA2R_RW64_32BIT		(0 << 0)
+#define CBA2R_RW64_64BIT		(1 << 0)
+
+/* Translation context bank */
+#define ARM_SMMU_CB_BASE(smmu)		((smmu)->base + ((smmu)->size >> 1))
+#define ARM_SMMU_CB(smmu, n)		((n) * (smmu)->pagesize)
+
+#define ARM_SMMU_CB_SCTLR		0x0
+#define ARM_SMMU_CB_RESUME		0x8
+#define ARM_SMMU_CB_TTBCR2		0x10
+#define ARM_SMMU_CB_TTBR0_LO		0x20
+#define ARM_SMMU_CB_TTBR0_HI		0x24
+#define ARM_SMMU_CB_TTBCR		0x30
+#define ARM_SMMU_CB_S1_MAIR0		0x38
+#define ARM_SMMU_CB_FSR			0x58
+#define ARM_SMMU_CB_FAR_LO		0x60
+#define ARM_SMMU_CB_FAR_HI		0x64
+#define ARM_SMMU_CB_FSYNR0		0x68
+
+#define SCTLR_S1_ASIDPNE		(1 << 12)
+#define SCTLR_CFCFG			(1 << 7)
+#define SCTLR_CFIE			(1 << 6)
+#define SCTLR_CFRE			(1 << 5)
+#define SCTLR_E				(1 << 4)
+#define SCTLR_AFE			(1 << 2)
+#define SCTLR_TRE			(1 << 1)
+#define SCTLR_M				(1 << 0)
+#define SCTLR_EAE_SBOP			(SCTLR_AFE | SCTLR_TRE)
+
+#define RESUME_RETRY			(0 << 0)
+#define RESUME_TERMINATE		(1 << 0)
+
+#define TTBCR_EAE			(1 << 31)
+
+#define TTBCR_PASIZE_SHIFT		16
+#define TTBCR_PASIZE_MASK		0x7
+
+#define TTBCR_TG0_4K			(0 << 14)
+#define TTBCR_TG0_64K			(1 << 14)
+
+#define TTBCR_SH0_SHIFT			12
+#define TTBCR_SH0_MASK			0x3
+#define TTBCR_SH_NS			0
+#define TTBCR_SH_OS			2
+#define TTBCR_SH_IS			3
+
+#define TTBCR_ORGN0_SHIFT		10
+#define TTBCR_IRGN0_SHIFT		8
+#define TTBCR_RGN_MASK			0x3
+#define TTBCR_RGN_NC			0
+#define TTBCR_RGN_WBWA			1
+#define TTBCR_RGN_WT			2
+#define TTBCR_RGN_WB			3
+
+#define TTBCR_SL0_SHIFT			6
+#define TTBCR_SL0_MASK			0x3
+#define TTBCR_SL0_LVL_2			0
+#define TTBCR_SL0_LVL_1			1
+
+#define TTBCR_T1SZ_SHIFT		16
+#define TTBCR_T0SZ_SHIFT		0
+#define TTBCR_SZ_MASK			0xf
+
+#define TTBCR2_SEP_SHIFT		15
+#define TTBCR2_SEP_MASK			0x7
+
+#define TTBCR2_PASIZE_SHIFT		0
+#define TTBCR2_PASIZE_MASK		0x7
+
+/* Common definitions for PASize and SEP fields */
+#define TTBCR2_ADDR_32			0
+#define TTBCR2_ADDR_36			1
+#define TTBCR2_ADDR_40			2
+#define TTBCR2_ADDR_42			3
+#define TTBCR2_ADDR_44			4
+#define TTBCR2_ADDR_48			5
+
+#define MAIR_ATTR_SHIFT(n)		((n) << 3)
+#define MAIR_ATTR_MASK			0xff
+#define MAIR_ATTR_DEVICE		0x04
+#define MAIR_ATTR_NC			0x44
+#define MAIR_ATTR_WBRWA			0xff
+#define MAIR_ATTR_IDX_NC		0
+#define MAIR_ATTR_IDX_CACHE		1
+#define MAIR_ATTR_IDX_DEV		2
+
+#define FSR_MULTI			(1 << 31)
+#define FSR_SS				(1 << 30)
+#define FSR_UUT				(1 << 8)
+#define FSR_ASF				(1 << 7)
+#define FSR_TLBLKF			(1 << 6)
+#define FSR_TLBMCF			(1 << 5)
+#define FSR_EF				(1 << 4)
+#define FSR_PF				(1 << 3)
+#define FSR_AFF				(1 << 2)
+#define FSR_TF				(1 << 1)
+
+#define FSR_IGN				(FSR_AFF | FSR_ASF | FSR_TLBMCF |	\
+					 FSR_TLBLKF)
+#define FSR_FAULT			(FSR_MULTI | FSR_SS | FSR_UUT |		\
+					 FSR_EF | FSR_PF | FSR_TF)
+
+#define FSYNR0_WNR			(1 << 4)
+
+struct arm_smmu_smr {
+	u8				idx;
+	u16				mask;
+	u16				id;
+};
+
+struct arm_smmu_master {
+	struct device_node		*of_node;
+
+	/*
+	 * The following is specific to the master's position in the
+	 * SMMU chain.
+	 */
+	struct rb_node			node;
+	int				num_streamids;
+	u16				streamids[MAX_MASTER_STREAMIDS];
+
+	/*
+	 * We only need to allocate these on the root SMMU, as we
+	 * configure unmatched streams to bypass translation.
+	 */
+	struct arm_smmu_smr		*smrs;
+};
+
+struct arm_smmu_device {
+	struct device			*dev;
+	struct device_node		*parent_of_node;
+
+	void __iomem			*base;
+	unsigned long			size;
+	unsigned long			pagesize;
+
+#define ARM_SMMU_FEAT_COHERENT_WALK	(1 << 0)
+#define ARM_SMMU_FEAT_STREAM_MATCH	(1 << 1)
+#define ARM_SMMU_FEAT_TRANS_S1		(1 << 2)
+#define ARM_SMMU_FEAT_TRANS_S2		(1 << 3)
+#define ARM_SMMU_FEAT_TRANS_NESTED	(1 << 4)
+	u32				features;
+	int				version;
+
+	u32				num_context_banks;
+	u32				num_s2_context_banks;
+	DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+	atomic_t			irptndx;
+
+	u32				num_mapping_groups;
+	DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+
+	unsigned long			input_size;
+	unsigned long			s1_output_size;
+	unsigned long			s2_output_size;
+
+	u32				num_global_irqs;
+	u32				num_context_irqs;
+	unsigned int			*irqs;
+
+	DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
+
+	struct list_head		list;
+	struct rb_root			masters;
+};
+
+struct arm_smmu_cfg {
+	struct arm_smmu_device		*smmu;
+	u8				vmid;
+	u8				cbndx;
+	u8				irptndx;
+	u32				cbar;
+	pgd_t				*pgd;
+};
+
+struct arm_smmu_domain {
+	/*
+	 * A domain can span across multiple, chained SMMUs and requires
+	 * all devices within the domain to follow the same translation
+	 * path.
+	 */
+	struct arm_smmu_device		*leaf_smmu;
+	struct arm_smmu_cfg		root_cfg;
+	phys_addr_t			output_mask;
+
+	spinlock_t			lock;
+};
+
+static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+static LIST_HEAD(arm_smmu_devices);
+
+static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
+						struct device_node *dev_node)
+{
+	struct rb_node *node = smmu->masters.rb_node;
+
+	while (node) {
+		struct arm_smmu_master *master;
+		master = container_of(node, struct arm_smmu_master, node);
+
+		if (dev_node < master->of_node)
+			node = node->rb_left;
+		else if (dev_node > master->of_node)
+			node = node->rb_right;
+		else
+			return master;
+	}
+
+	return NULL;
+}
+
+static int insert_smmu_master(struct arm_smmu_device *smmu,
+			      struct arm_smmu_master *master)
+{
+	struct rb_node **new, *parent;
+
+	new = &smmu->masters.rb_node;
+	parent = NULL;
+	while (*new) {
+		struct arm_smmu_master *this;
+		this = container_of(*new, struct arm_smmu_master, node);
+
+		parent = *new;
+		if (master->of_node < this->of_node)
+			new = &((*new)->rb_left);
+		else if (master->of_node > this->of_node)
+			new = &((*new)->rb_right);
+		else
+			return -EEXIST;
+	}
+
+	rb_link_node(&master->node, parent, new);
+	rb_insert_color(&master->node, &smmu->masters);
+	return 0;
+}
+
+static int register_smmu_master(struct arm_smmu_device *smmu,
+				struct device *dev,
+				struct of_phandle_args *masterspec)
+{
+	int i;
+	struct arm_smmu_master *master;
+
+	master = find_smmu_master(smmu, masterspec->np);
+	if (master) {
+		dev_err(dev,
+			"rejecting multiple registrations for master device %s\n",
+			masterspec->np->name);
+		return -EBUSY;
+	}
+
+	if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
+		dev_err(dev,
+			"reached maximum number (%d) of stream IDs for master device %s\n",
+			MAX_MASTER_STREAMIDS, masterspec->np->name);
+		return -ENOSPC;
+	}
+
+	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return -ENOMEM;
+
+	master->of_node		= masterspec->np;
+	master->num_streamids	= masterspec->args_count;
+
+	for (i = 0; i < master->num_streamids; ++i)
+		master->streamids[i] = masterspec->args[i];
+
+	return insert_smmu_master(smmu, master);
+}
+
+static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu)
+{
+	struct arm_smmu_device *parent;
+
+	if (!smmu->parent_of_node)
+		return NULL;
+
+	spin_lock(&arm_smmu_devices_lock);
+	list_for_each_entry(parent, &arm_smmu_devices, list)
+		if (parent->dev->of_node == smmu->parent_of_node)
+			goto out_unlock;
+
+	parent = NULL;
+	dev_warn(smmu->dev,
+		 "Failed to find SMMU parent despite parent in DT\n");
+out_unlock:
+	spin_unlock(&arm_smmu_devices_lock);
+	return parent;
+}
+
+static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
+{
+	int idx;
+
+	do {
+		idx = find_next_zero_bit(map, end, start);
+		if (idx == end)
+			return -ENOSPC;
+	} while (test_and_set_bit(idx, map));
+
+	return idx;
+}
+
+static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
+{
+	clear_bit(idx, map);
+}
+
+/* Wait for any pending TLB invalidations to complete */
+static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+{
+	int count = 0;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
+	while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
+	       & sTLBGSTATUS_GSACTIVE) {
+		cpu_relax();
+		if (++count == TLB_LOOP_TIMEOUT) {
+			dev_err_ratelimited(smmu->dev,
+			"TLB sync timed out -- SMMU may be deadlocked\n");
+			return;
+		}
+		udelay(1);
+	}
+}
+
+static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
+{
+	int flags, ret;
+	u32 fsr, far, fsynr, resume;
+	unsigned long iova;
+	struct iommu_domain *domain = dev;
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	struct arm_smmu_device *smmu = root_cfg->smmu;
+	void __iomem *cb_base;
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+
+	if (!(fsr & FSR_FAULT))
+		return IRQ_NONE;
+
+	if (fsr & FSR_IGN)
+		dev_err_ratelimited(smmu->dev,
+				    "Unexpected context fault (fsr 0x%u)\n",
+				    fsr);
+
+	fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+	flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+	far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
+	iova = far;
+#ifdef CONFIG_64BIT
+	far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
+	iova |= ((unsigned long)far << 32);
+#endif
+
+	if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
+		ret = IRQ_HANDLED;
+		resume = RESUME_RETRY;
+	} else {
+		ret = IRQ_NONE;
+		resume = RESUME_TERMINATE;
+	}
+
+	/* Clear the faulting FSR */
+	writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+	/* Retry or terminate any stalled transactions */
+	if (fsr & FSR_SS)
+		writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+
+	return ret;
+}
+
+static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
+{
+	u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+	struct arm_smmu_device *smmu = dev;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+	gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+	gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+	gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+	dev_err_ratelimited(smmu->dev,
+		"Unexpected global fault, this could be serious\n");
+	dev_err_ratelimited(smmu->dev,
+		"\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+		gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+	writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+	return IRQ_NONE;
+}
+
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+{
+	u32 reg;
+	bool stage1;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	struct arm_smmu_device *smmu = root_cfg->smmu;
+	void __iomem *cb_base, *gr0_base, *gr1_base;
+
+	gr0_base = ARM_SMMU_GR0(smmu);
+	gr1_base = ARM_SMMU_GR1(smmu);
+	stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS;
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+
+	/* CBAR */
+	reg = root_cfg->cbar |
+	      (root_cfg->vmid << CBAR_VMID_SHIFT);
+	if (smmu->version == 1)
+	      reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
+
+	/* Use the weakest memory type, so it is overridden by the pte */
+	if (stage1)
+		reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+	writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
+
+	if (smmu->version > 1) {
+		/* CBA2R */
+#ifdef CONFIG_64BIT
+		reg = CBA2R_RW64_64BIT;
+#else
+		reg = CBA2R_RW64_32BIT;
+#endif
+		writel_relaxed(reg,
+			       gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx));
+
+		/* TTBCR2 */
+		switch (smmu->input_size) {
+		case 32:
+			reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
+			break;
+		case 36:
+			reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
+			break;
+		case 39:
+			reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
+			break;
+		case 42:
+			reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
+			break;
+		case 44:
+			reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
+			break;
+		case 48:
+			reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
+			break;
+		}
+
+		switch (smmu->s1_output_size) {
+		case 32:
+			reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
+			break;
+		case 36:
+			reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
+			break;
+		case 39:
+			reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
+			break;
+		case 42:
+			reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
+			break;
+		case 44:
+			reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
+			break;
+		case 48:
+			reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
+			break;
+		}
+
+		if (stage1)
+			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+	}
+
+	/* TTBR0 */
+	reg = __pa(root_cfg->pgd);
+#ifndef __BIG_ENDIAN
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+	reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+#else
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+	reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+#endif
+
+	/*
+	 * TTBCR
+	 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
+	 */
+	if (smmu->version > 1) {
+		if (PAGE_SIZE == SZ_4K)
+			reg = TTBCR_TG0_4K;
+		else
+			reg = TTBCR_TG0_64K;
+
+		if (!stage1) {
+			switch (smmu->s2_output_size) {
+			case 32:
+				reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
+				break;
+			case 36:
+				reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
+				break;
+			case 40:
+				reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
+				break;
+			case 42:
+				reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
+				break;
+			case 44:
+				reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
+				break;
+			case 48:
+				reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
+				break;
+			}
+		} else {
+			reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
+		}
+	} else {
+		reg = 0;
+	}
+
+	reg |= TTBCR_EAE |
+	      (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
+	      (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
+	      (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
+	      (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+
+	/* MAIR0 (stage-1 only) */
+	if (stage1) {
+		reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
+		      (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
+		      (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
+		writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
+	}
+
+	/* Nuke the TLB */
+	writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
+	arm_smmu_tlb_sync(smmu);
+
+	/* SCTLR */
+	reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+	if (stage1)
+		reg |= SCTLR_S1_ASIDPNE;
+#ifdef __BIG_ENDIAN
+	reg |= SCTLR_E;
+#endif
+	writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
+}
+
+static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+					struct device *dev)
+{
+	int irq, ret, start;
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	struct arm_smmu_device *smmu, *parent;
+
+	/*
+	 * Walk the SMMU chain to find the root device for this chain.
+	 * We assume that no masters have translations which terminate
+	 * early, and therefore check that the root SMMU does indeed have
+	 * a StreamID for the master in question.
+	 */
+	parent = dev->archdata.iommu;
+	smmu_domain->output_mask = -1;
+	do {
+		smmu = parent;
+		smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1;
+	} while ((parent = find_parent_smmu(smmu)));
+
+	if (!find_smmu_master(smmu, dev->of_node)) {
+		dev_err(dev, "unable to find root SMMU for device\n");
+		return -ENODEV;
+	}
+
+	ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS);
+	if (IS_ERR_VALUE(ret))
+		return ret;
+
+	root_cfg->vmid = ret;
+	if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+		/*
+		 * We will likely want to change this if/when KVM gets
+		 * involved.
+		 */
+		root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+		start = smmu->num_s2_context_banks;
+	} else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) {
+		root_cfg->cbar = CBAR_TYPE_S2_TRANS;
+		start = 0;
+	} else {
+		root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+		start = smmu->num_s2_context_banks;
+	}
+
+	ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
+				      smmu->num_context_banks);
+	if (IS_ERR_VALUE(ret))
+		goto out_free_vmid;
+
+	root_cfg->cbndx = ret;
+
+	if (smmu->version == 1) {
+		root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
+		root_cfg->irptndx %= smmu->num_context_irqs;
+	} else {
+		root_cfg->irptndx = root_cfg->cbndx;
+	}
+
+	irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
+	ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
+			  "arm-smmu-context-fault", domain);
+	if (IS_ERR_VALUE(ret)) {
+		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+			root_cfg->irptndx, irq);
+		root_cfg->irptndx = -1;
+		goto out_free_context;
+	}
+
+	root_cfg->smmu = smmu;
+	arm_smmu_init_context_bank(smmu_domain);
+	return ret;
+
+out_free_context:
+	__arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
+out_free_vmid:
+	__arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
+	return ret;
+}
+
+static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	struct arm_smmu_device *smmu = root_cfg->smmu;
+	int irq;
+
+	if (!smmu)
+		return;
+
+	if (root_cfg->irptndx != -1) {
+		irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
+		free_irq(irq, domain);
+	}
+
+	__arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
+	__arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
+}
+
+static int arm_smmu_domain_init(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain;
+	pgd_t *pgd;
+
+	/*
+	 * Allocate the domain and initialise some of its data structures.
+	 * We can't really do anything meaningful until we've added a
+	 * master.
+	 */
+	smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
+	if (!smmu_domain)
+		return -ENOMEM;
+
+	pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+	if (!pgd)
+		goto out_free_domain;
+	smmu_domain->root_cfg.pgd = pgd;
+
+	spin_lock_init(&smmu_domain->lock);
+	domain->priv = smmu_domain;
+	return 0;
+
+out_free_domain:
+	kfree(smmu_domain);
+	return -ENOMEM;
+}
+
+static void arm_smmu_free_ptes(pmd_t *pmd)
+{
+	pgtable_t table = pmd_pgtable(*pmd);
+	pgtable_page_dtor(table);
+	__free_page(table);
+}
+
+static void arm_smmu_free_pmds(pud_t *pud)
+{
+	int i;
+	pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
+
+	pmd = pmd_base;
+	for (i = 0; i < PTRS_PER_PMD; ++i) {
+		if (pmd_none(*pmd))
+			continue;
+
+		arm_smmu_free_ptes(pmd);
+		pmd++;
+	}
+
+	pmd_free(NULL, pmd_base);
+}
+
+static void arm_smmu_free_puds(pgd_t *pgd)
+{
+	int i;
+	pud_t *pud, *pud_base = pud_offset(pgd, 0);
+
+	pud = pud_base;
+	for (i = 0; i < PTRS_PER_PUD; ++i) {
+		if (pud_none(*pud))
+			continue;
+
+		arm_smmu_free_pmds(pud);
+		pud++;
+	}
+
+	pud_free(NULL, pud_base);
+}
+
+static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
+{
+	int i;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	pgd_t *pgd, *pgd_base = root_cfg->pgd;
+
+	/*
+	 * Recursively free the page tables for this domain. We don't
+	 * care about speculative TLB filling, because the TLB will be
+	 * nuked next time this context bank is re-allocated and no devices
+	 * currently map to these tables.
+	 */
+	pgd = pgd_base;
+	for (i = 0; i < PTRS_PER_PGD; ++i) {
+		if (pgd_none(*pgd))
+			continue;
+		arm_smmu_free_puds(pgd);
+		pgd++;
+	}
+
+	kfree(pgd_base);
+}
+
+static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	arm_smmu_destroy_domain_context(domain);
+	arm_smmu_free_pgtables(smmu_domain);
+	kfree(smmu_domain);
+}
+
+static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
+					  struct arm_smmu_master *master)
+{
+	int i;
+	struct arm_smmu_smr *smrs;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
+		return 0;
+
+	if (master->smrs)
+		return -EEXIST;
+
+	smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL);
+	if (!smrs) {
+		dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n",
+			master->num_streamids, master->of_node->name);
+		return -ENOMEM;
+	}
+
+	/* Allocate the SMRs on the root SMMU */
+	for (i = 0; i < master->num_streamids; ++i) {
+		int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
+						  smmu->num_mapping_groups);
+		if (IS_ERR_VALUE(idx)) {
+			dev_err(smmu->dev, "failed to allocate free SMR\n");
+			goto err_free_smrs;
+		}
+
+		smrs[i] = (struct arm_smmu_smr) {
+			.idx	= idx,
+			.mask	= 0, /* We don't currently share SMRs */
+			.id	= master->streamids[i],
+		};
+	}
+
+	/* It worked! Now, poke the actual hardware */
+	for (i = 0; i < master->num_streamids; ++i) {
+		u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
+			  smrs[i].mask << SMR_MASK_SHIFT;
+		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
+	}
+
+	master->smrs = smrs;
+	return 0;
+
+err_free_smrs:
+	while (--i >= 0)
+		__arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
+	kfree(smrs);
+	return -ENOSPC;
+}
+
+static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
+				      struct arm_smmu_master *master)
+{
+	int i;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	struct arm_smmu_smr *smrs = master->smrs;
+
+	/* Invalidate the SMRs before freeing back to the allocator */
+	for (i = 0; i < master->num_streamids; ++i) {
+		u8 idx = smrs[i].idx;
+		writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
+		__arm_smmu_free_bitmap(smmu->smr_map, idx);
+	}
+
+	master->smrs = NULL;
+	kfree(smrs);
+}
+
+static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
+					   struct arm_smmu_master *master)
+{
+	int i;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	for (i = 0; i < master->num_streamids; ++i) {
+		u16 sid = master->streamids[i];
+		writel_relaxed(S2CR_TYPE_BYPASS,
+			       gr0_base + ARM_SMMU_GR0_S2CR(sid));
+	}
+}
+
+static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+				      struct arm_smmu_master *master)
+{
+	int i, ret;
+	struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	ret = arm_smmu_master_configure_smrs(smmu, master);
+	if (ret)
+		return ret;
+
+	/* Bypass the leaves */
+	smmu = smmu_domain->leaf_smmu;
+	while ((parent = find_parent_smmu(smmu))) {
+		/*
+		 * We won't have a StreamID match for anything but the root
+		 * smmu, so we only need to worry about StreamID indexing,
+		 * where we must install bypass entries in the S2CRs.
+		 */
+		if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)
+			continue;
+
+		arm_smmu_bypass_stream_mapping(smmu, master);
+		smmu = parent;
+	}
+
+	/* Now we're at the root, time to point at our context bank */
+	for (i = 0; i < master->num_streamids; ++i) {
+		u32 idx, s2cr;
+		idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
+		s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
+		       (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
+		writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+	}
+
+	return 0;
+}
+
+static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
+					  struct arm_smmu_master *master)
+{
+	struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu;
+
+	/*
+	 * We *must* clear the S2CR first, because freeing the SMR means
+	 * that it can be re-allocated immediately.
+	 */
+	arm_smmu_bypass_stream_mapping(smmu, master);
+	arm_smmu_master_free_smrs(smmu, master);
+}
+
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+	int ret = -EINVAL;
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_device *device_smmu = dev->archdata.iommu;
+	struct arm_smmu_master *master;
+
+	if (!device_smmu) {
+		dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
+		return -ENXIO;
+	}
+
+	/*
+	 * Sanity check the domain. We don't currently support domains
+	 * that cross between different SMMU chains.
+	 */
+	spin_lock(&smmu_domain->lock);
+	if (!smmu_domain->leaf_smmu) {
+		/* Now that we have a master, we can finalise the domain */
+		ret = arm_smmu_init_domain_context(domain, dev);
+		if (IS_ERR_VALUE(ret))
+			goto err_unlock;
+
+		smmu_domain->leaf_smmu = device_smmu;
+	} else if (smmu_domain->leaf_smmu != device_smmu) {
+		dev_err(dev,
+			"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
+			dev_name(smmu_domain->leaf_smmu->dev),
+			dev_name(device_smmu->dev));
+		goto err_unlock;
+	}
+	spin_unlock(&smmu_domain->lock);
+
+	/* Looks ok, so add the device to the domain */
+	master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+	if (!master)
+		return -ENODEV;
+
+	return arm_smmu_domain_add_master(smmu_domain, master);
+
+err_unlock:
+	spin_unlock(&smmu_domain->lock);
+	return ret;
+}
+
+static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+{
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_master *master;
+
+	master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+	if (master)
+		arm_smmu_domain_remove_master(smmu_domain, master);
+}
+
+static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+				   size_t size)
+{
+	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+	/*
+	 * If the SMMU can't walk tables in the CPU caches, treat them
+	 * like non-coherent DMA since we need to flush the new entries
+	 * all the way out to memory. There's no possibility of recursion
+	 * here as the SMMU table walker will not be wired through another
+	 * SMMU.
+	 */
+	if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
+		dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+			     DMA_TO_DEVICE);
+}
+
+static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
+					     unsigned long end)
+{
+	return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
+		(addr + ARM_SMMU_PTE_CONT_SIZE <= end);
+}
+
+static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
+				   unsigned long addr, unsigned long end,
+				   unsigned long pfn, int flags, int stage)
+{
+	pte_t *pte, *start;
+	pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
+
+	if (pmd_none(*pmd)) {
+		/* Allocate a new set of tables */
+		pgtable_t table = alloc_page(PGALLOC_GFP);
+		if (!table)
+			return -ENOMEM;
+
+		arm_smmu_flush_pgtable(smmu, page_address(table),
+				       ARM_SMMU_PTE_HWTABLE_SIZE);
+		pgtable_page_ctor(table);
+		pmd_populate(NULL, pmd, table);
+		arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
+	}
+
+	if (stage == 1) {
+		pteval |= ARM_SMMU_PTE_AP_UNPRIV;
+		if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
+			pteval |= ARM_SMMU_PTE_AP_RDONLY;
+
+		if (flags & IOMMU_CACHE)
+			pteval |= (MAIR_ATTR_IDX_CACHE <<
+				   ARM_SMMU_PTE_ATTRINDX_SHIFT);
+	} else {
+		pteval |= ARM_SMMU_PTE_HAP_FAULT;
+		if (flags & IOMMU_READ)
+			pteval |= ARM_SMMU_PTE_HAP_READ;
+		if (flags & IOMMU_WRITE)
+			pteval |= ARM_SMMU_PTE_HAP_WRITE;
+		if (flags & IOMMU_CACHE)
+			pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
+		else
+			pteval |= ARM_SMMU_PTE_MEMATTR_NC;
+	}
+
+	/* If no access, create a faulting entry to avoid TLB fills */
+	if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+		pteval &= ~ARM_SMMU_PTE_PAGE;
+
+	pteval |= ARM_SMMU_PTE_SH_IS;
+	start = pmd_page_vaddr(*pmd) + pte_index(addr);
+	pte = start;
+
+	/*
+	 * Install the page table entries. This is fairly complicated
+	 * since we attempt to make use of the contiguous hint in the
+	 * ptes where possible. The contiguous hint indicates a series
+	 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
+	 * contiguous region with the following constraints:
+	 *
+	 *   - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
+	 *   - Each pte in the region has the contiguous hint bit set
+	 *
+	 * This complicates unmapping (also handled by this code, when
+	 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
+	 * possible, yet highly unlikely, that a client may unmap only
+	 * part of a contiguous range. This requires clearing of the
+	 * contiguous hint bits in the range before installing the new
+	 * faulting entries.
+	 *
+	 * Note that re-mapping an address range without first unmapping
+	 * it is not supported, so TLB invalidation is not required here
+	 * and is instead performed at unmap and domain-init time.
+	 */
+	do {
+		int i = 1;
+		pteval &= ~ARM_SMMU_PTE_CONT;
+
+		if (arm_smmu_pte_is_contiguous_range(addr, end)) {
+			i = ARM_SMMU_PTE_CONT_ENTRIES;
+			pteval |= ARM_SMMU_PTE_CONT;
+		} else if (pte_val(*pte) &
+			   (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
+			int j;
+			pte_t *cont_start;
+			unsigned long idx = pte_index(addr);
+
+			idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
+			cont_start = pmd_page_vaddr(*pmd) + idx;
+			for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
+				pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT;
+
+			arm_smmu_flush_pgtable(smmu, cont_start,
+					       sizeof(*pte) *
+					       ARM_SMMU_PTE_CONT_ENTRIES);
+		}
+
+		do {
+			*pte = pfn_pte(pfn, __pgprot(pteval));
+		} while (pte++, pfn++, addr += PAGE_SIZE, --i);
+	} while (addr != end);
+
+	arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
+	return 0;
+}
+
+static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
+				   unsigned long addr, unsigned long end,
+				   phys_addr_t phys, int flags, int stage)
+{
+	int ret;
+	pmd_t *pmd;
+	unsigned long next, pfn = __phys_to_pfn(phys);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+	if (pud_none(*pud)) {
+		pmd = pmd_alloc_one(NULL, addr);
+		if (!pmd)
+			return -ENOMEM;
+	} else
+#endif
+		pmd = pmd_offset(pud, addr);
+
+	do {
+		next = pmd_addr_end(addr, end);
+		ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
+					      flags, stage);
+		pud_populate(NULL, pud, pmd);
+		arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+		phys += next - addr;
+	} while (pmd++, addr = next, addr < end);
+
+	return ret;
+}
+
+static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
+				   unsigned long addr, unsigned long end,
+				   phys_addr_t phys, int flags, int stage)
+{
+	int ret = 0;
+	pud_t *pud;
+	unsigned long next;
+
+#ifndef __PAGETABLE_PUD_FOLDED
+	if (pgd_none(*pgd)) {
+		pud = pud_alloc_one(NULL, addr);
+		if (!pud)
+			return -ENOMEM;
+	} else
+#endif
+		pud = pud_offset(pgd, addr);
+
+	do {
+		next = pud_addr_end(addr, end);
+		ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
+					      flags, stage);
+		pgd_populate(NULL, pud, pgd);
+		arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+		phys += next - addr;
+	} while (pud++, addr = next, addr < end);
+
+	return ret;
+}
+
+static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+				   unsigned long iova, phys_addr_t paddr,
+				   size_t size, int flags)
+{
+	int ret, stage;
+	unsigned long end;
+	phys_addr_t input_mask, output_mask;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	pgd_t *pgd = root_cfg->pgd;
+	struct arm_smmu_device *smmu = root_cfg->smmu;
+
+	if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
+		stage = 2;
+		output_mask = (1ULL << smmu->s2_output_size) - 1;
+	} else {
+		stage = 1;
+		output_mask = (1ULL << smmu->s1_output_size) - 1;
+	}
+
+	if (!pgd)
+		return -EINVAL;
+
+	if (size & ~PAGE_MASK)
+		return -EINVAL;
+
+	input_mask = (1ULL << smmu->input_size) - 1;
+	if ((phys_addr_t)iova & ~input_mask)
+		return -ERANGE;
+
+	if (paddr & ~output_mask)
+		return -ERANGE;
+
+	spin_lock(&smmu_domain->lock);
+	pgd += pgd_index(iova);
+	end = iova + size;
+	do {
+		unsigned long next = pgd_addr_end(iova, end);
+
+		ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
+					      flags, stage);
+		if (ret)
+			goto out_unlock;
+
+		paddr += next - iova;
+		iova = next;
+	} while (pgd++, iova != end);
+
+out_unlock:
+	spin_unlock(&smmu_domain->lock);
+
+	/* Ensure new page tables are visible to the hardware walker */
+	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+		dsb();
+
+	return ret;
+}
+
+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+			phys_addr_t paddr, size_t size, int flags)
+{
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
+
+	if (!smmu_domain || !smmu)
+		return -ENODEV;
+
+	/* Check for silent address truncation up the SMMU chain. */
+	if ((phys_addr_t)iova & ~smmu_domain->output_mask)
+		return -ERANGE;
+
+	return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags);
+}
+
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+			     size_t size)
+{
+	int ret;
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	struct arm_smmu_device *smmu = root_cfg->smmu;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
+	writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
+	arm_smmu_tlb_sync(smmu);
+	return ret ? ret : size;
+}
+
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+					 dma_addr_t iova)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+	struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+	struct arm_smmu_device *smmu = root_cfg->smmu;
+
+	spin_lock(&smmu_domain->lock);
+	pgd = root_cfg->pgd;
+	if (!pgd)
+		goto err_unlock;
+
+	pgd += pgd_index(iova);
+	if (pgd_none_or_clear_bad(pgd))
+		goto err_unlock;
+
+	pud = pud_offset(pgd, iova);
+	if (pud_none_or_clear_bad(pud))
+		goto err_unlock;
+
+	pmd = pmd_offset(pud, iova);
+	if (pmd_none_or_clear_bad(pmd))
+		goto err_unlock;
+
+	pte = pmd_page_vaddr(*pmd) + pte_index(iova);
+	if (pte_none(pte))
+		goto err_unlock;
+
+	spin_unlock(&smmu_domain->lock);
+	return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
+
+err_unlock:
+	spin_unlock(&smmu_domain->lock);
+	dev_warn(smmu->dev,
+		 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
+		 (unsigned long long)iova);
+	return -EINVAL;
+}
+
+static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
+				   unsigned long cap)
+{
+	unsigned long caps = 0;
+	struct arm_smmu_domain *smmu_domain = domain->priv;
+
+	if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+		caps |= IOMMU_CAP_CACHE_COHERENCY;
+
+	return !!(cap & caps);
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+	struct arm_smmu_device *child, *parent, *smmu;
+	struct arm_smmu_master *master = NULL;
+
+	spin_lock(&arm_smmu_devices_lock);
+	list_for_each_entry(parent, &arm_smmu_devices, list) {
+		smmu = parent;
+
+		/* Try to find a child of the current SMMU. */
+		list_for_each_entry(child, &arm_smmu_devices, list) {
+			if (child->parent_of_node == parent->dev->of_node) {
+				/* Does the child sit above our master? */
+				master = find_smmu_master(child, dev->of_node);
+				if (master) {
+					smmu = NULL;
+					break;
+				}
+			}
+		}
+
+		/* We found some children, so keep searching. */
+		if (!smmu) {
+			master = NULL;
+			continue;
+		}
+
+		master = find_smmu_master(smmu, dev->of_node);
+		if (master)
+			break;
+	}
+	spin_unlock(&arm_smmu_devices_lock);
+
+	if (!master)
+		return -ENODEV;
+
+	dev->archdata.iommu = smmu;
+	return 0;
+}
+
+static void arm_smmu_remove_device(struct device *dev)
+{
+	dev->archdata.iommu = NULL;
+}
+
+static struct iommu_ops arm_smmu_ops = {
+	.domain_init	= arm_smmu_domain_init,
+	.domain_destroy	= arm_smmu_domain_destroy,
+	.attach_dev	= arm_smmu_attach_dev,
+	.detach_dev	= arm_smmu_detach_dev,
+	.map		= arm_smmu_map,
+	.unmap		= arm_smmu_unmap,
+	.iova_to_phys	= arm_smmu_iova_to_phys,
+	.domain_has_cap	= arm_smmu_domain_has_cap,
+	.add_device	= arm_smmu_add_device,
+	.remove_device	= arm_smmu_remove_device,
+	.pgsize_bitmap	= (SECTION_SIZE |
+			   ARM_SMMU_PTE_CONT_SIZE |
+			   PAGE_SIZE),
+};
+
+static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+{
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	int i = 0;
+	u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+
+	/* Mark all SMRn as invalid and all S2CRn as bypass */
+	for (i = 0; i < smmu->num_mapping_groups; ++i) {
+		writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
+		writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
+	}
+
+	/* Invalidate the TLB, just in case */
+	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
+	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+
+	/* Enable fault reporting */
+	scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+
+	/* Disable TLB broadcasting. */
+	scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
+
+	/* Enable client access, but bypass when no mapping is found */
+	scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+
+	/* Disable forced broadcasting */
+	scr0 &= ~sCR0_FB;
+
+	/* Don't upgrade barriers */
+	scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+
+	/* Push the button */
+	arm_smmu_tlb_sync(smmu);
+	writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
+}
+
+static int arm_smmu_id_size_to_bits(int size)
+{
+	switch (size) {
+	case 0:
+		return 32;
+	case 1:
+		return 36;
+	case 2:
+		return 40;
+	case 3:
+		return 42;
+	case 4:
+		return 44;
+	case 5:
+	default:
+		return 48;
+	}
+}
+
+static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+{
+	unsigned long size;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	u32 id;
+
+	dev_notice(smmu->dev, "probing hardware configuration...\n");
+
+	/* Primecell ID */
+	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
+	smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
+	dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
+
+	/* ID0 */
+	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+#ifndef CONFIG_64BIT
+	if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
+		dev_err(smmu->dev, "\tno v7 descriptor support!\n");
+		return -ENODEV;
+	}
+#endif
+	if (id & ID0_S1TS) {
+		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
+		dev_notice(smmu->dev, "\tstage 1 translation\n");
+	}
+
+	if (id & ID0_S2TS) {
+		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
+		dev_notice(smmu->dev, "\tstage 2 translation\n");
+	}
+
+	if (id & ID0_NTS) {
+		smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
+		dev_notice(smmu->dev, "\tnested translation\n");
+	}
+
+	if (!(smmu->features &
+		(ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
+		 ARM_SMMU_FEAT_TRANS_NESTED))) {
+		dev_err(smmu->dev, "\tno translation support!\n");
+		return -ENODEV;
+	}
+
+	if (id & ID0_CTTW) {
+		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+		dev_notice(smmu->dev, "\tcoherent table walk\n");
+	}
+
+	if (id & ID0_SMS) {
+		u32 smr, sid, mask;
+
+		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
+		smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
+					   ID0_NUMSMRG_MASK;
+		if (smmu->num_mapping_groups == 0) {
+			dev_err(smmu->dev,
+				"stream-matching supported, but no SMRs present!\n");
+			return -ENODEV;
+		}
+
+		smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
+		smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+
+		mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+		sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
+		if ((mask & sid) != sid) {
+			dev_err(smmu->dev,
+				"SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
+				mask, sid);
+			return -ENODEV;
+		}
+
+		dev_notice(smmu->dev,
+			   "\tstream matching with %u register groups, mask 0x%x",
+			   smmu->num_mapping_groups, mask);
+	}
+
+	/* ID1 */
+	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
+	smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
+
+	/* Check that we ioremapped enough */
+	size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+	size *= (smmu->pagesize << 1);
+	if (smmu->size < size)
+		dev_warn(smmu->dev,
+			 "device is 0x%lx bytes but only mapped 0x%lx!\n",
+			 size, smmu->size);
+
+	smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
+				      ID1_NUMS2CB_MASK;
+	smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
+	if (smmu->num_s2_context_banks > smmu->num_context_banks) {
+		dev_err(smmu->dev, "impossible number of S2 context banks!\n");
+		return -ENODEV;
+	}
+	dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
+		   smmu->num_context_banks, smmu->num_s2_context_banks);
+
+	/* ID2 */
+	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
+	size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+
+	/*
+	 * Stage-1 output limited by stage-2 input size due to pgd
+	 * allocation (PTRS_PER_PGD).
+	 */
+#ifdef CONFIG_64BIT
+	/* Current maximum output size of 39 bits */
+	smmu->s1_output_size = min(39UL, size);
+#else
+	smmu->s1_output_size = min(32UL, size);
+#endif
+
+	/* The stage-2 output mask is also applied for bypass */
+	size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
+	smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size);
+
+	if (smmu->version == 1) {
+		smmu->input_size = 32;
+	} else {
+#ifdef CONFIG_64BIT
+		size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+		size = min(39, arm_smmu_id_size_to_bits(size));
+#else
+		size = 32;
+#endif
+		smmu->input_size = size;
+
+		if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
+		    (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
+		    (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
+			dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
+				PAGE_SIZE);
+			return -ENODEV;
+		}
+	}
+
+	dev_notice(smmu->dev,
+		   "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
+		   smmu->input_size, smmu->s1_output_size, smmu->s2_output_size);
+	return 0;
+}
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct arm_smmu_device *smmu;
+	struct device_node *dev_node;
+	struct device *dev = &pdev->dev;
+	struct rb_node *node;
+	struct of_phandle_args masterspec;
+	int num_irqs, i, err;
+
+	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
+	if (!smmu) {
+		dev_err(dev, "failed to allocate arm_smmu_device\n");
+		return -ENOMEM;
+	}
+	smmu->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "missing base address/size\n");
+		return -ENODEV;
+	}
+
+	smmu->size = resource_size(res);
+	smmu->base = devm_request_and_ioremap(dev, res);
+	if (!smmu->base)
+		return -EADDRNOTAVAIL;
+
+	if (of_property_read_u32(dev->of_node, "#global-interrupts",
+				 &smmu->num_global_irqs)) {
+		dev_err(dev, "missing #global-interrupts property\n");
+		return -ENODEV;
+	}
+
+	num_irqs = 0;
+	while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
+		num_irqs++;
+		if (num_irqs > smmu->num_global_irqs)
+			smmu->num_context_irqs++;
+	}
+
+	if (num_irqs < smmu->num_global_irqs) {
+		dev_warn(dev, "found %d interrupts but expected at least %d\n",
+			 num_irqs, smmu->num_global_irqs);
+		smmu->num_global_irqs = num_irqs;
+	}
+	smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
+
+	smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
+				  GFP_KERNEL);
+	if (!smmu->irqs) {
+		dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_irqs; ++i) {
+		int irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			dev_err(dev, "failed to get irq index %d\n", i);
+			return -ENODEV;
+		}
+		smmu->irqs[i] = irq;
+	}
+
+	i = 0;
+	smmu->masters = RB_ROOT;
+	while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
+					   "#stream-id-cells", i,
+					   &masterspec)) {
+		err = register_smmu_master(smmu, dev, &masterspec);
+		if (err) {
+			dev_err(dev, "failed to add master %s\n",
+				masterspec.np->name);
+			goto out_put_masters;
+		}
+
+		i++;
+	}
+	dev_notice(dev, "registered %d master devices\n", i);
+
+	if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0)))
+		smmu->parent_of_node = dev_node;
+
+	err = arm_smmu_device_cfg_probe(smmu);
+	if (err)
+		goto out_put_parent;
+
+	if (smmu->version > 1 &&
+	    smmu->num_context_banks != smmu->num_context_irqs) {
+		dev_err(dev,
+			"found only %d context interrupt(s) but %d required\n",
+			smmu->num_context_irqs, smmu->num_context_banks);
+		goto out_put_parent;
+	}
+
+	arm_smmu_device_reset(smmu);
+
+	for (i = 0; i < smmu->num_global_irqs; ++i) {
+		err = request_irq(smmu->irqs[i],
+				  arm_smmu_global_fault,
+				  IRQF_SHARED,
+				  "arm-smmu global fault",
+				  smmu);
+		if (err) {
+			dev_err(dev, "failed to request global IRQ %d (%u)\n",
+				i, smmu->irqs[i]);
+			goto out_free_irqs;
+		}
+	}
+
+	INIT_LIST_HEAD(&smmu->list);
+	spin_lock(&arm_smmu_devices_lock);
+	list_add(&smmu->list, &arm_smmu_devices);
+	spin_unlock(&arm_smmu_devices_lock);
+	return 0;
+
+out_free_irqs:
+	while (i--)
+		free_irq(smmu->irqs[i], smmu);
+
+out_put_parent:
+	if (smmu->parent_of_node)
+		of_node_put(smmu->parent_of_node);
+
+out_put_masters:
+	for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+		struct arm_smmu_master *master;
+		master = container_of(node, struct arm_smmu_master, node);
+		of_node_put(master->of_node);
+	}
+
+	return err;
+}
+
+static int arm_smmu_device_remove(struct platform_device *pdev)
+{
+	int i;
+	struct device *dev = &pdev->dev;
+	struct arm_smmu_device *curr, *smmu = NULL;
+	struct rb_node *node;
+
+	spin_lock(&arm_smmu_devices_lock);
+	list_for_each_entry(curr, &arm_smmu_devices, list) {
+		if (curr->dev == dev) {
+			smmu = curr;
+			list_del(&smmu->list);
+			break;
+		}
+	}
+	spin_unlock(&arm_smmu_devices_lock);
+
+	if (!smmu)
+		return -ENODEV;
+
+	if (smmu->parent_of_node)
+		of_node_put(smmu->parent_of_node);
+
+	for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+		struct arm_smmu_master *master;
+		master = container_of(node, struct arm_smmu_master, node);
+		of_node_put(master->of_node);
+	}
+
+	if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS))
+		dev_err(dev, "removing device with active domains!\n");
+
+	for (i = 0; i < smmu->num_global_irqs; ++i)
+		free_irq(smmu->irqs[i], smmu);
+
+	/* Turn the thing off */
+	writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id arm_smmu_of_match[] = {
+	{ .compatible = "arm,smmu-v1", },
+	{ .compatible = "arm,smmu-v2", },
+	{ .compatible = "arm,mmu-400", },
+	{ .compatible = "arm,mmu-500", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+#endif
+
+static struct platform_driver arm_smmu_driver = {
+	.driver	= {
+		.owner		= THIS_MODULE,
+		.name		= "arm-smmu",
+		.of_match_table	= of_match_ptr(arm_smmu_of_match),
+	},
+	.probe	= arm_smmu_device_dt_probe,
+	.remove	= arm_smmu_device_remove,
+};
+
+static int __init arm_smmu_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&arm_smmu_driver);
+	if (ret)
+		return ret;
+
+	/* Oh, for a proper bus abstraction */
+	if (!iommu_present(&platform_bus_type));
+		bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+
+	if (!iommu_present(&amba_bustype));
+		bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+
+	return 0;
+}
+
+static void __exit arm_smmu_exit(void)
+{
+	return platform_driver_unregister(&arm_smmu_driver);
+}
+
+module_init(arm_smmu_init);
+module_exit(arm_smmu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");

+ 45 - 41
drivers/iommu/iommu.c

@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 
+static size_t iommu_pgsize(struct iommu_domain *domain,
+			   unsigned long addr_merge, size_t size)
+{
+	unsigned int pgsize_idx;
+	size_t pgsize;
+
+	/* Max page size that still fits into 'size' */
+	pgsize_idx = __fls(size);
+
+	/* need to consider alignment requirements ? */
+	if (likely(addr_merge)) {
+		/* Max page size allowed by address */
+		unsigned int align_pgsize_idx = __ffs(addr_merge);
+		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+	}
+
+	/* build a mask of acceptable page sizes */
+	pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+	/* throw away page sizes not supported by the hardware */
+	pgsize &= domain->ops->pgsize_bitmap;
+
+	/* make sure we're still sane */
+	BUG_ON(!pgsize);
+
+	/* pick the biggest page */
+	pgsize_idx = __fls(pgsize);
+	pgsize = 1UL << pgsize_idx;
+
+	return pgsize;
+}
+
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
 	      phys_addr_t paddr, size_t size, int prot)
 {
@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
 	 * size of the smallest page supported by the hardware
 	 */
 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
-		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
-			"0x%x\n", iova, (unsigned long)paddr,
-			(unsigned long)size, min_pagesz);
+		pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+		       iova, &paddr, size, min_pagesz);
 		return -EINVAL;
 	}
 
-	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
-				(unsigned long)paddr, (unsigned long)size);
+	pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
 
 	while (size) {
-		unsigned long pgsize, addr_merge = iova | paddr;
-		unsigned int pgsize_idx;
-
-		/* Max page size that still fits into 'size' */
-		pgsize_idx = __fls(size);
-
-		/* need to consider alignment requirements ? */
-		if (likely(addr_merge)) {
-			/* Max page size allowed by both iova and paddr */
-			unsigned int align_pgsize_idx = __ffs(addr_merge);
-
-			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
-		}
-
-		/* build a mask of acceptable page sizes */
-		pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
-		/* throw away page sizes not supported by the hardware */
-		pgsize &= domain->ops->pgsize_bitmap;
-
-		/* make sure we're still sane */
-		BUG_ON(!pgsize);
-
-		/* pick the biggest page */
-		pgsize_idx = __fls(pgsize);
-		pgsize = 1UL << pgsize_idx;
+		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
 
-		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
-					(unsigned long)paddr, pgsize);
+		pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+			 iova, &paddr, pgsize);
 
 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
 		if (ret)
@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
 	 * by the hardware
 	 */
 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
-		pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
-					iova, (unsigned long)size, min_pagesz);
+		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+		       iova, size, min_pagesz);
 		return -EINVAL;
 	}
 
-	pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
-							(unsigned long)size);
+	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
 
 	/*
 	 * Keep iterating until we either unmap 'size' bytes (or more)
 	 * or we hit an area that isn't mapped.
 	 */
 	while (unmapped < size) {
-		size_t left = size - unmapped;
+		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 
-		unmapped_page = domain->ops->unmap(domain, iova, left);
+		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
 		if (!unmapped_page)
 			break;
 
-		pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
-					(unsigned long)unmapped_page);
+		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
+			 iova, unmapped_page);
 
 		iova += unmapped_page;
 		unmapped += unmapped_page;

+ 8 - 7
drivers/iommu/omap-iommu.c

@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
 	iopgd = iopgd_offset(obj, da);
 
 	if (!iopgd_is_table(*iopgd)) {
-		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
-			"*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
+		dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
+				obj->name, errs, da, iopgd, *iopgd);
 		return IRQ_NONE;
 	}
 
 	iopte = iopte_offset(iopgd, da);
 
-	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
-		"pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
-		iopte, *iopte);
+	dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
+			obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 
 	return IRQ_NONE;
 }
@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
 		else if (iopte_is_large(*pte))
 			ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
 		else
-			dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
+			dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
+							(unsigned long long)da);
 	} else {
 		if (iopgd_is_section(*pgd))
 			ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
 		else if (iopgd_is_super(*pgd))
 			ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
 		else
-			dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
+			dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
+							(unsigned long long)da);
 	}
 
 	return ret;

+ 1 - 1
drivers/iommu/omap-iopgtable.h

@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 #define iopte_offset(iopgd, da)	(iopgd_page_vaddr(iopgd) + iopte_index(da))
 
 #define to_iommu(dev)							\
-	(struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
+	((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))

+ 2 - 2
drivers/iommu/omap-iovmm.c

@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt)
 		}
 
 		if (i && sg->offset) {
-			pr_err("%s: sg[%d] offset not allowed in internal "
-					"entries\n", __func__, i);
+			pr_err("%s: sg[%d] offset not allowed in internal entries\n",
+				__func__, i);
 			return 0;
 		}
 

+ 1 - 1
drivers/irqchip/irq-gic.c

@@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
 static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
 					unsigned long action, void *hcpu)
 {
-	if (action == CPU_STARTING)
+	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
 		gic_cpu_init(&gic_data[0]);
 	return NOTIFY_OK;
 }

+ 9 - 3
drivers/media/Kconfig

@@ -136,9 +136,9 @@ config DVB_NET
 
 # This Kconfig option is used by both PCI and USB drivers
 config TTPCI_EEPROM
-        tristate
-        depends on I2C
-        default n
+	tristate
+	depends on I2C
+	default n
 
 source "drivers/media/dvb-core/Kconfig"
 
@@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT
 
 	  If unsure say Y.
 
+config MEDIA_ATTACH
+	bool
+	depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
+	depends on MODULES
+	default MODULES
+
 source "drivers/media/i2c/Kconfig"
 source "drivers/media/tuners/Kconfig"
 source "drivers/media/dvb-frontends/Kconfig"

+ 1 - 1
drivers/media/i2c/s5c73m3/s5c73m3-core.c

@@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
 
 	if (fie->pad != OIF_SOURCE_PAD)
 		return -EINVAL;
-	if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
+	if (fie->index >= ARRAY_SIZE(s5c73m3_intervals))
 		return -EINVAL;
 
 	mutex_lock(&state->lock);

+ 3 - 4
drivers/media/pci/cx88/cx88-alsa.c

@@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
 	int changed = 0;
 	u32 old;
 
-	if (core->board.audio_chip == V4L2_IDENT_WM8775)
+	if (core->sd_wm8775)
 		snd_cx88_wm8775_volume_put(kcontrol, value);
 
 	left = value->value.integer.value[0] & 0x3f;
@@ -682,8 +682,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
 		vol ^= bit;
 		cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
 		/* Pass mute onto any WM8775 */
-		if ((core->board.audio_chip == V4L2_IDENT_WM8775) &&
-		    ((1<<6) == bit))
+		if (core->sd_wm8775 && ((1<<6) == bit))
 			wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
 		ret = 1;
 	}
@@ -903,7 +902,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
 		goto error;
 
 	/* If there's a wm8775 then add a Line-In ALC switch */
-	if (core->board.audio_chip == V4L2_IDENT_WM8775)
+	if (core->sd_wm8775)
 		snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
 
 	strcpy (card->driver, "CX88x");

+ 3 - 5
drivers/media/pci/cx88/cx88-video.c

@@ -385,8 +385,7 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
 		/* The wm8775 module has the "2" route hardwired into
 		   the initialization. Some boards may use different
 		   routes for different inputs. HVR-1300 surely does */
-		if (core->board.audio_chip &&
-		    core->board.audio_chip == V4L2_IDENT_WM8775) {
+		if (core->sd_wm8775) {
 			call_all(core, audio, s_routing,
 				 INPUT(input).audioroute, 0, 0);
 		}
@@ -771,8 +770,7 @@ static int video_open(struct file *file)
 		cx_write(MO_GP1_IO, core->board.radio.gpio1);
 		cx_write(MO_GP2_IO, core->board.radio.gpio2);
 		if (core->board.radio.audioroute) {
-			if(core->board.audio_chip &&
-				core->board.audio_chip == V4L2_IDENT_WM8775) {
+			if (core->sd_wm8775) {
 				call_all(core, audio, s_routing,
 					core->board.radio.audioroute, 0, 0);
 			}
@@ -959,7 +957,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
 	u32 value,mask;
 
 	/* Pass changes onto any WM8775 */
-	if (core->board.audio_chip == V4L2_IDENT_WM8775) {
+	if (core->sd_wm8775) {
 		switch (ctrl->id) {
 		case V4L2_CID_AUDIO_MUTE:
 			wm8775_s_ctrl(core, ctrl->id, ctrl->val);

+ 9 - 0
drivers/media/platform/coda.c

@@ -576,6 +576,14 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
 	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
 }
 
+static int vidioc_create_bufs(struct file *file, void *priv,
+			      struct v4l2_create_buffers *create)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
+}
+
 static int vidioc_streamon(struct file *file, void *priv,
 			   enum v4l2_buf_type type)
 {
@@ -610,6 +618,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
 
 	.vidioc_qbuf		= vidioc_qbuf,
 	.vidioc_dqbuf		= vidioc_dqbuf,
+	.vidioc_create_bufs	= vidioc_create_bufs,
 
 	.vidioc_streamon	= vidioc_streamon,
 	.vidioc_streamoff	= vidioc_streamoff,

+ 15 - 0
drivers/media/platform/davinci/vpbe_display.c

@@ -916,6 +916,21 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
 	other video window */
 
 	layer->pix_fmt = *pixfmt;
+	if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
+		struct vpbe_layer *otherlayer;
+
+		otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
+		/* if other layer is available, only
+		 * claim it, do not configure it
+		 */
+		ret = osd_device->ops.request_layer(osd_device,
+						    otherlayer->layer_info.id);
+		if (ret < 0) {
+			v4l2_err(&vpbe_dev->v4l2_dev,
+				 "Display Manager failed to allocate layer\n");
+			return -EBUSY;
+		}
+	}
 
 	/* Get osd layer config */
 	osd_device->ops.get_layer_config(osd_device,

+ 1 - 2
drivers/media/platform/davinci/vpfe_capture.c

@@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev)
 	if (NULL == ccdc_cfg) {
 		v4l2_err(pdev->dev.driver,
 			 "Memory allocation failed for ccdc_cfg\n");
-		goto probe_free_lock;
+		goto probe_free_dev_mem;
 	}
 
 	mutex_lock(&ccdc_lock);
@@ -1991,7 +1991,6 @@ probe_out_release_irq:
 	free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
 probe_free_ccdc_cfg_mem:
 	kfree(ccdc_cfg);
-probe_free_lock:
 	mutex_unlock(&ccdc_lock);
 probe_free_dev_mem:
 	kfree(vpfe_dev);

+ 1 - 1
drivers/media/platform/exynos4-is/fimc-is-regs.c

@@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
 		HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
 	};
 
-	if (WARN_ON(is->config_index > ARRAY_SIZE(cmd)))
+	if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
 		return -EINVAL;
 
 	mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));

+ 18 - 30
drivers/media/platform/exynos4-is/fimc-is.c

@@ -48,7 +48,6 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = {
 	[ISS_CLK_LITE0]			= "lite0",
 	[ISS_CLK_LITE1]			= "lite1",
 	[ISS_CLK_MPLL]			= "mpll",
-	[ISS_CLK_SYSREG]		= "sysreg",
 	[ISS_CLK_ISP]			= "isp",
 	[ISS_CLK_DRC]			= "drc",
 	[ISS_CLK_FD]			= "fd",
@@ -71,7 +70,6 @@ static void fimc_is_put_clocks(struct fimc_is *is)
 	for (i = 0; i < ISS_CLKS_MAX; i++) {
 		if (IS_ERR(is->clocks[i]))
 			continue;
-		clk_unprepare(is->clocks[i]);
 		clk_put(is->clocks[i]);
 		is->clocks[i] = ERR_PTR(-EINVAL);
 	}
@@ -90,12 +88,6 @@ static int fimc_is_get_clocks(struct fimc_is *is)
 			ret = PTR_ERR(is->clocks[i]);
 			goto err;
 		}
-		ret = clk_prepare(is->clocks[i]);
-		if (ret < 0) {
-			clk_put(is->clocks[i]);
-			is->clocks[i] = ERR_PTR(-EINVAL);
-			goto err;
-		}
 	}
 
 	return 0;
@@ -103,7 +95,7 @@ err:
 	fimc_is_put_clocks(is);
 	dev_err(&is->pdev->dev, "failed to get clock: %s\n",
 		fimc_is_clocks[i]);
-	return -ENXIO;
+	return ret;
 }
 
 static int fimc_is_setup_clocks(struct fimc_is *is)
@@ -144,7 +136,7 @@ int fimc_is_enable_clocks(struct fimc_is *is)
 	for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
 		if (IS_ERR(is->clocks[i]))
 			continue;
-		ret = clk_enable(is->clocks[i]);
+		ret = clk_prepare_enable(is->clocks[i]);
 		if (ret < 0) {
 			dev_err(&is->pdev->dev, "clock %s enable failed\n",
 				fimc_is_clocks[i]);
@@ -163,7 +155,7 @@ void fimc_is_disable_clocks(struct fimc_is *is)
 
 	for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
 		if (!IS_ERR(is->clocks[i])) {
-			clk_disable(is->clocks[i]);
+			clk_disable_unprepare(is->clocks[i]);
 			pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
 		}
 	}
@@ -326,6 +318,11 @@ int fimc_is_start_firmware(struct fimc_is *is)
 	struct device *dev = &is->pdev->dev;
 	int ret;
 
+	if (is->fw.f_w == NULL) {
+		dev_err(dev, "firmware is not loaded\n");
+		return -EINVAL;
+	}
+
 	memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
 	wmb();
 
@@ -837,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev)
 		goto err_clk;
 	}
 	pm_runtime_enable(dev);
-	/*
-	 * Enable only the ISP power domain, keep FIMC-IS clocks off until
-	 * the whole clock tree is configured. The ISP power domain needs
-	 * be active in order to acces any CMU_ISP clock registers.
-	 */
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0)
-		goto err_irq;
-
-	ret = fimc_is_setup_clocks(is);
-	pm_runtime_put_sync(dev);
 
+	ret = pm_runtime_get_sync(dev);
 	if (ret < 0)
 		goto err_irq;
 
-	is->clk_init = true;
-
 	is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
 	if (IS_ERR(is->alloc_ctx)) {
 		ret = PTR_ERR(is->alloc_ctx);
@@ -875,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev)
 	if (ret < 0)
 		goto err_dfs;
 
+	pm_runtime_put_sync(dev);
+
 	dev_dbg(dev, "FIMC-IS registered successfully\n");
 	return 0;
 
@@ -894,9 +881,11 @@ err_clk:
 static int fimc_is_runtime_resume(struct device *dev)
 {
 	struct fimc_is *is = dev_get_drvdata(dev);
+	int ret;
 
-	if (!is->clk_init)
-		return 0;
+	ret = fimc_is_setup_clocks(is);
+	if (ret)
+		return ret;
 
 	return fimc_is_enable_clocks(is);
 }
@@ -905,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev)
 {
 	struct fimc_is *is = dev_get_drvdata(dev);
 
-	if (is->clk_init)
-		fimc_is_disable_clocks(is);
-
+	fimc_is_disable_clocks(is);
 	return 0;
 }
 
@@ -941,7 +928,8 @@ static int fimc_is_remove(struct platform_device *pdev)
 	vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
 	fimc_is_put_clocks(is);
 	fimc_is_debugfs_remove(is);
-	release_firmware(is->fw.f_w);
+	if (is->fw.f_w)
+		release_firmware(is->fw.f_w);
 	fimc_is_free_cpu_memory(is);
 
 	return 0;

+ 0 - 2
drivers/media/platform/exynos4-is/fimc-is.h

@@ -73,7 +73,6 @@ enum {
 	ISS_CLK_LITE0,
 	ISS_CLK_LITE1,
 	ISS_CLK_MPLL,
-	ISS_CLK_SYSREG,
 	ISS_CLK_ISP,
 	ISS_CLK_DRC,
 	ISS_CLK_FD,
@@ -265,7 +264,6 @@ struct fimc_is {
 	spinlock_t			slock;
 
 	struct clk			*clocks[ISS_CLKS_MAX];
-	bool				clk_init;
 	void __iomem			*regs;
 	void __iomem			*pmu_regs;
 	int				irq;

Some files were not shown because too many files changed in this diff