Browse Source

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm tree changes from Dave Airlie:
 "This is the main drm pull request, I have some overlap with sound and
  arm-soc, the sound patch is acked and may conflict based on -next
  reports but should be a trivial fixup, which I'll leave to you!

  Highlights:

   - new drivers:

     MSM driver from Rob Clark

   - non-drm:

     switcheroo and hdmi audio driver support for secondary GPU
     poweroff, so drivers can use runtime PM to poweroff the GPUs.  This
     can save 5 or 6W on some optimus laptops.

   - drm core:

     combined GEM and TTM VMA manager
     per-filp mmap permission tracking
     initial rendernode support (via a runtime enable for now, until we get api stable),
     remove old proc support,
     lots of cleanups of legacy code
     hdmi vendor infoframes and 4k modes
     lots of gem/prime locking and races fixes
     async pageflip scaffolding
     drm bridge objects

   - i915:

     Haswell PC8+ support and eLLC support, HDMI 4K support, initial
     per-process VMA pieces, watermark reworks, convert to generic hdmi
     infoframes, encoder reworking, fastboot support,

   - radeon:

     CIK PM support, remove 3d blit code in favour of DMA engines,
     Berlin GPU support, HDMI audio fixes

   - nouveau:

     secondary GPU power down support for optimus laptops, lots of
     fixes, use MSI, VP3 engine support

   - exynos:

     runtime pm support for g2d, DT support, remove non-DT,

   - tda998x i2c driver:

     lots of fixes for sync issues

   - gma500:

     lots of cleanups

   - rcar:

     add LVDS support, fbdev emulation,

   - tegra:

     just minor fixes"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (684 commits)
  drm/exynos: Fix build error with exynos_drm_connector.c
  drm/exynos: Remove non-DT support in exynos_drm_fimd
  drm/exynos: Remove non-DT support in exynos_hdmi
  drm/exynos: Remove non-DT support in exynos_drm_g2d
  drm/exynos: Remove non-DT support in exynos_hdmiphy
  drm/exynos: Remove non-DT support in exynos_ddc
  drm/exynos: Make Exynos DRM drivers depend on OF
  drm/exynos: Consider fallback option to allocation fail
  drm/exynos: fimd: move platform data parsing to separate function
  drm/exynos: fimd: get signal polarities from device tree
  drm/exynos: fimd: replace struct fb_videomode with videomode
  drm/exynos: check a pixel format to a particular window layer
  drm/exynos: fix fimd pixel format setting
  drm/exynos: Add NULL pointer check
  drm/exynos: Remove redundant error messages
  drm/exynos: Add missing of.h header include
  drm/exynos: Remove redundant NULL check in exynos_drm_buf
  drm/exynos: add device tree support for rotator
  drm/exynos: Add missing includes
  drm/exynos: add runtime pm interfaces to g2d driver
  ...
Linus Torvalds 11 years ago
parent
commit
a09e9a7a4b
100 changed files with 4467 additions and 4596 deletions
  1. 97 41
      Documentation/DocBook/drm.tmpl
  2. 27 0
      Documentation/devicetree/bindings/gpu/samsung-rotator.txt
  3. 14 1
      drivers/gpu/drm/Kconfig
  4. 3 2
      drivers/gpu/drm/Makefile
  5. 2 3
      drivers/gpu/drm/ast/ast_drv.c
  6. 0 3
      drivers/gpu/drm/ast/ast_drv.h
  7. 1 8
      drivers/gpu/drm/ast/ast_main.c
  8. 3 2
      drivers/gpu/drm/ast/ast_ttm.c
  9. 2 3
      drivers/gpu/drm/cirrus/cirrus_drv.c
  10. 0 3
      drivers/gpu/drm/cirrus/cirrus_drv.h
  11. 1 8
      drivers/gpu/drm/cirrus/cirrus_main.c
  12. 3 2
      drivers/gpu/drm/cirrus/cirrus_ttm.c
  13. 51 0
      drivers/gpu/drm/drm_agpsupport.c
  14. 47 189
      drivers/gpu/drm/drm_bufs.c
  15. 65 16
      drivers/gpu/drm/drm_context.c
  16. 116 57
      drivers/gpu/drm/drm_crtc.c
  17. 70 19
      drivers/gpu/drm/drm_crtc_helper.c
  18. 15 2
      drivers/gpu/drm/drm_dma.c
  19. 42 64
      drivers/gpu/drm/drm_drv.c
  20. 274 32
      drivers/gpu/drm/drm_edid.c
  21. 2 3
      drivers/gpu/drm/drm_fb_cma_helper.c
  22. 124 0
      drivers/gpu/drm/drm_flip_work.c
  23. 15 83
      drivers/gpu/drm/drm_fops.c
  24. 297 143
      drivers/gpu/drm/drm_gem.c
  25. 5 21
      drivers/gpu/drm/drm_gem_cma_helper.c
  26. 5 1
      drivers/gpu/drm/drm_info.c
  27. 26 36
      drivers/gpu/drm/drm_ioctl.c
  28. 0 2
      drivers/gpu/drm/drm_memory.c
  29. 47 182
      drivers/gpu/drm/drm_mm.c
  30. 0 58
      drivers/gpu/drm/drm_modes.c
  31. 25 10
      drivers/gpu/drm/drm_pci.c
  32. 12 4
      drivers/gpu/drm/drm_platform.c
  33. 121 69
      drivers/gpu/drm/drm_prime.c
  34. 0 209
      drivers/gpu/drm/drm_proc.c
  35. 18 11
      drivers/gpu/drm/drm_scatter.c
  36. 28 45
      drivers/gpu/drm/drm_stub.c
  37. 9 0
      drivers/gpu/drm/drm_usb.c
  38. 1 2
      drivers/gpu/drm/drm_vm.c
  39. 436 0
      drivers/gpu/drm/drm_vma_manager.c
  40. 3 3
      drivers/gpu/drm/exynos/Kconfig
  41. 2 11
      drivers/gpu/drm/exynos/exynos_ddc.c
  42. 1 8
      drivers/gpu/drm/exynos/exynos_drm_buf.c
  43. 5 33
      drivers/gpu/drm/exynos/exynos_drm_connector.c
  44. 5 5
      drivers/gpu/drm/exynos/exynos_drm_crtc.c
  45. 10 27
      drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
  46. 4 6
      drivers/gpu/drm/exynos/exynos_drm_drv.c
  47. 1 3
      drivers/gpu/drm/exynos/exynos_drm_encoder.c
  48. 2 6
      drivers/gpu/drm/exynos/exynos_drm_fb.c
  49. 15 5
      drivers/gpu/drm/exynos/exynos_drm_fbdev.c
  50. 3 3
      drivers/gpu/drm/exynos/exynos_drm_fimc.c
  51. 120 143
      drivers/gpu/drm/exynos/exynos_drm_fimd.c
  52. 33 27
      drivers/gpu/drm/exynos/exynos_drm_g2d.c
  53. 21 32
      drivers/gpu/drm/exynos/exynos_drm_gem.c
  54. 0 9
      drivers/gpu/drm/exynos/exynos_drm_gem.h
  55. 2 3
      drivers/gpu/drm/exynos/exynos_drm_gsc.c
  56. 1 3
      drivers/gpu/drm/exynos/exynos_drm_hdmi.c
  57. 9 0
      drivers/gpu/drm/exynos/exynos_drm_iommu.c
  58. 5 17
      drivers/gpu/drm/exynos/exynos_drm_ipp.c
  59. 2 3
      drivers/gpu/drm/exynos/exynos_drm_plane.c
  60. 83 34
      drivers/gpu/drm/exynos/exynos_drm_rotator.c
  61. 1 0
      drivers/gpu/drm/exynos/exynos_drm_vidi.c
  62. 17 70
      drivers/gpu/drm/exynos/exynos_hdmi.c
  63. 2 10
      drivers/gpu/drm/exynos/exynos_hdmiphy.c
  64. 3 6
      drivers/gpu/drm/exynos/exynos_mixer.c
  65. 1 0
      drivers/gpu/drm/gma500/Makefile
  66. 3 0
      drivers/gpu/drm/gma500/cdv_device.c
  67. 3 9
      drivers/gpu/drm/gma500/cdv_device.h
  68. 27 30
      drivers/gpu/drm/gma500/cdv_intel_crt.c
  69. 57 799
      drivers/gpu/drm/gma500/cdv_intel_display.c
  70. 77 77
      drivers/gpu/drm/gma500/cdv_intel_dp.c
  71. 40 49
      drivers/gpu/drm/gma500/cdv_intel_hdmi.c
  72. 33 38
      drivers/gpu/drm/gma500/cdv_intel_lvds.c
  73. 14 17
      drivers/gpu/drm/gma500/framebuffer.c
  74. 1 1
      drivers/gpu/drm/gma500/framebuffer.h
  75. 10 29
      drivers/gpu/drm/gma500/gem.c
  76. 776 0
      drivers/gpu/drm/gma500/gma_display.c
  77. 103 0
      drivers/gpu/drm/gma500/gma_display.h
  78. 6 32
      drivers/gpu/drm/gma500/gtt.c
  79. 7 8
      drivers/gpu/drm/gma500/mdfld_dsi_output.c
  80. 8 8
      drivers/gpu/drm/gma500/mdfld_dsi_output.h
  81. 23 42
      drivers/gpu/drm/gma500/mdfld_intel_display.c
  82. 22 41
      drivers/gpu/drm/gma500/oaktrail_crtc.c
  83. 18 25
      drivers/gpu/drm/gma500/oaktrail_hdmi.c
  84. 22 26
      drivers/gpu/drm/gma500/oaktrail_lvds.c
  85. 2 1
      drivers/gpu/drm/gma500/psb_device.c
  86. 6 7
      drivers/gpu/drm/gma500/psb_device.h
  87. 10 11
      drivers/gpu/drm/gma500/psb_drv.c
  88. 5 2
      drivers/gpu/drm/gma500/psb_drv.h
  89. 86 824
      drivers/gpu/drm/gma500/psb_intel_display.c
  90. 19 25
      drivers/gpu/drm/gma500/psb_intel_drv.h
  91. 32 43
      drivers/gpu/drm/gma500/psb_intel_lvds.c
  92. 25 28
      drivers/gpu/drm/gma500/psb_intel_sdvo.c
  93. 409 76
      drivers/gpu/drm/i2c/tda998x_drv.c
  94. 1 2
      drivers/gpu/drm/i810/i810_dma.c
  95. 1 2
      drivers/gpu/drm/i810/i810_drv.c
  96. 1 1
      drivers/gpu/drm/i810/i810_drv.h
  97. 2 0
      drivers/gpu/drm/i915/Makefile
  98. 1 1
      drivers/gpu/drm/i915/dvo_ch7xxx.c
  99. 183 479
      drivers/gpu/drm/i915/i915_debugfs.c
  100. 79 62
      drivers/gpu/drm/i915/i915_dma.c

+ 97 - 41
Documentation/DocBook/drm.tmpl

@@ -155,13 +155,6 @@
               will become a fatal error.
             </para></listitem>
           </varlistentry>
-          <varlistentry>
-            <term>DRIVER_USE_MTRR</term>
-            <listitem><para>
-              Driver uses MTRR interface for mapping memory, the DRM core will
-              manage MTRR resources. Deprecated.
-            </para></listitem>
-          </varlistentry>
           <varlistentry>
             <term>DRIVER_PCI_DMA</term>
             <listitem><para>
@@ -194,28 +187,6 @@
               support shared IRQs (note that this is required of PCI  drivers).
             </para></listitem>
           </varlistentry>
-          <varlistentry>
-            <term>DRIVER_IRQ_VBL</term>
-            <listitem><para>Unused. Deprecated.</para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_DMA_QUEUE</term>
-            <listitem><para>
-              Should be set if the driver queues DMA requests and completes them
-              asynchronously.  Deprecated.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_FB_DMA</term>
-            <listitem><para>
-              Driver supports DMA to/from the framebuffer, mapping of frambuffer
-              DMA buffers to userspace will be supported. Deprecated.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_IRQ_VBL2</term>
-            <listitem><para>Unused. Deprecated.</para></listitem>
-          </varlistentry>
           <varlistentry>
             <term>DRIVER_GEM</term>
             <listitem><para>
@@ -234,6 +205,12 @@
               Driver implements DRM PRIME buffer sharing.
             </para></listitem>
           </varlistentry>
+          <varlistentry>
+            <term>DRIVER_RENDER</term>
+            <listitem><para>
+              Driver supports dedicated render nodes.
+            </para></listitem>
+          </varlistentry>
         </variablelist>
       </sect3>
       <sect3>
@@ -2212,6 +2189,18 @@ void intel_crt_init(struct drm_device *dev)
 !Iinclude/drm/drm_rect.h
 !Edrivers/gpu/drm/drm_rect.c
     </sect2>
+    <sect2>
+      <title>Flip-work Helper Reference</title>
+!Pinclude/drm/drm_flip_work.h flip utils
+!Iinclude/drm/drm_flip_work.h
+!Edrivers/gpu/drm/drm_flip_work.c
+    </sect2>
+    <sect2>
+      <title>VMA Offset Manager</title>
+!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
+!Edrivers/gpu/drm/drm_vma_manager.c
+!Iinclude/drm/drm_vma_manager.h
+    </sect2>
   </sect1>
 
   <!-- Internals: kms properties -->
@@ -2422,18 +2411,18 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
       </abstract>
       <para>
         The <methodname>firstopen</methodname> method is called by the DRM core
-	when an application opens a device that has no other opened file handle.
-	Similarly the <methodname>lastclose</methodname> method is called when
-	the last application holding a file handle opened on the device closes
-	it. Both methods are mostly used for UMS (User Mode Setting) drivers to
-	acquire and release device resources which should be done in the
-	<methodname>load</methodname> and <methodname>unload</methodname>
-	methods for KMS drivers.
+	for legacy UMS (User Mode Setting) drivers only when an application
+	opens a device that has no other opened file handle. UMS drivers can
+	implement it to acquire device resources. KMS drivers can't use the
+	method and must acquire resources in the <methodname>load</methodname>
+	method instead.
       </para>
       <para>
-        Note that the <methodname>lastclose</methodname> method is also called
-	at module unload time or, for hot-pluggable devices, when the device is
-	unplugged. The <methodname>firstopen</methodname> and
+	Similarly the <methodname>lastclose</methodname> method is called when
+	the last application holding a file handle opened on the device closes
+	it, for both UMS and KMS drivers. Additionally, the method is also
+	called at module unload time or, for hot-pluggable devices, when the
+	device is unplugged. The <methodname>firstopen</methodname> and
 	<methodname>lastclose</methodname> calls can thus be unbalanced.
       </para>
       <para>
@@ -2462,7 +2451,12 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
       <para>
         The <methodname>lastclose</methodname> method should restore CRTC and
 	plane properties to default value, so that a subsequent open of the
-	device will not inherit state from the previous user.
+	device will not inherit state from the previous user. It can also be
+	used to execute delayed power switching state changes, e.g. in
+	conjunction with the vga-switcheroo infrastructure. Beyond that KMS
+	drivers should not do any further cleanup. Only legacy UMS drivers might
+	need to clean up device state so that the vga console or an independent
+	fbdev driver could take over.
       </para>
     </sect2>
     <sect2>
@@ -2498,7 +2492,6 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
 	<programlisting>
 	.poll = drm_poll,
 	.read = drm_read,
-	.fasync = drm_fasync,
 	.llseek = no_llseek,
 	</programlisting>
       </para>
@@ -2657,6 +2650,69 @@ int (*resume) (struct drm_device *);</synopsis>
       info, since man pages should cover the rest.
     </para>
 
+  <!-- External: render nodes -->
+
+    <sect1>
+      <title>Render nodes</title>
+      <para>
+        DRM core provides multiple character-devices for user-space to use.
+        Depending on which device is opened, user-space can perform a different
+        set of operations (mainly ioctls). The primary node is always created
+        and called <term>card&lt;num&gt;</term>. Additionally, a currently
+        unused control node, called <term>controlD&lt;num&gt;</term> is also
+        created. The primary node provides all legacy operations and
+        historically was the only interface used by userspace. With KMS, the
+        control node was introduced. However, the planned KMS control interface
+        has never been written and so the control node stays unused to date.
+      </para>
+      <para>
+        With the increased use of offscreen renderers and GPGPU applications,
+        clients no longer require running compositors or graphics servers to
+        make use of a GPU. But the DRM API required unprivileged clients to
+        authenticate to a DRM-Master prior to getting GPU access. To avoid this
+        step and to grant clients GPU access without authenticating, render
+        nodes were introduced. Render nodes solely serve render clients, that
+        is, no modesetting or privileged ioctls can be issued on render nodes.
+        Only non-global rendering commands are allowed. If a driver supports
+        render nodes, it must advertise it via the <term>DRIVER_RENDER</term>
+        DRM driver capability. If not supported, the primary node must be used
+        for render clients together with the legacy drmAuth authentication
+        procedure.
+      </para>
+      <para>
+        If a driver advertises render node support, DRM core will create a
+        separate render node called <term>renderD&lt;num&gt;</term>. There will
+        be one render node per device. No ioctls except  PRIME-related ioctls
+        will be allowed on this node. Especially <term>GEM_OPEN</term> will be
+        explicitly prohibited. Render nodes are designed to avoid the
+        buffer-leaks, which occur if clients guess the flink names or mmap
+        offsets on the legacy interface. Additionally to this basic interface,
+        drivers must mark their driver-dependent render-only ioctls as
+        <term>DRM_RENDER_ALLOW</term> so render clients can use them. Driver
+        authors must be careful not to allow any privileged ioctls on render
+        nodes.
+      </para>
+      <para>
+        With render nodes, user-space can now control access to the render node
+        via basic file-system access-modes. A running graphics server which
+        authenticates clients on the privileged primary/legacy node is no longer
+        required. Instead, a client can open the render node and is immediately
+        granted GPU access. Communication between clients (or servers) is done
+        via PRIME. FLINK from render node to legacy node is not supported. New
+        clients must not use the insecure FLINK interface.
+      </para>
+      <para>
+        Besides dropping all modeset/global ioctls, render nodes also drop the
+        DRM-Master concept. There is no reason to associate render clients with
+        a DRM-Master as they are independent of any graphics server. Besides,
+        they must work without any running master, anyway.
+        Drivers must be able to run without a master object if they support
+        render nodes. If, on the other hand, a driver requires shared state
+        between clients which is visible to user-space and accessible beyond
+        open-file boundaries, they cannot support render nodes.
+      </para>
+    </sect1>
+
   <!-- External: vblank handling -->
 
     <sect1>

+ 27 - 0
Documentation/devicetree/bindings/gpu/samsung-rotator.txt

@@ -0,0 +1,27 @@
+* Samsung Image Rotator
+
+Required properties:
+  - compatible : value should be one of the following:
+	(a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
+	(b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
+	(c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
+
+  - reg : Physical base address of the IP registers and length of memory
+	  mapped region.
+
+  - interrupts : Interrupt specifier for rotator interrupt, according to format
+		 specific to interrupt parent.
+
+  - clocks : Clock specifier for rotator clock, according to generic clock
+	     bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
+
+  - clock-names : Names of clocks. For exynos rotator, it should be "rotator".
+
+Example:
+	rotator@12810000 {
+		compatible = "samsung,exynos4210-rotator";
+		reg = <0x12810000 0x1000>;
+		interrupts = <0 83 0>;
+		clocks = <&clock 278>;
+		clock-names = "rotator";
+	};

+ 14 - 1
drivers/gpu/drm/Kconfig

@@ -6,7 +6,7 @@
 #
 menuconfig DRM
 	tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
-	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
 	select HDMI
 	select I2C
 	select I2C_ALGOBIT
@@ -168,6 +168,17 @@ config DRM_I915_KMS
 	  the driver to bind to PCI devices, which precludes loading things
 	  like intelfb.
 
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+	bool "Enable preliminary support for prerelease Intel hardware by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you have prerelease Intel hardware and want the
+	  i915 driver to support it by default.  You can enable such support at
+	  runtime with the module option i915.preliminary_hw_support=1; this
+	  option changes the default for that module option.
+
+	  If in doubt, say "N".
+
 config DRM_MGA
 	tristate "Matrox g200/g400"
 	depends on DRM && PCI
@@ -223,3 +234,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig"
 source "drivers/gpu/drm/tilcdc/Kconfig"
 
 source "drivers/gpu/drm/qxl/Kconfig"
+
+source "drivers/gpu/drm/msm/Kconfig"

+ 3 - 2
drivers/gpu/drm/Makefile

@@ -7,13 +7,13 @@ ccflags-y := -Iinclude/drm
 drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 		drm_context.o drm_dma.o \
 		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
-		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+		drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
 		drm_agpsupport.o drm_scatter.o drm_pci.o \
 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_modes.o drm_edid.o \
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
 		drm_trace_points.o drm_global.o drm_prime.o \
-		drm_rect.o
+		drm_rect.o drm_vma_manager.o drm_flip_work.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_MSM) += msm/
 obj-y			+= i2c/

+ 2 - 3
drivers/gpu/drm/ast/ast_drv.c

@@ -190,7 +190,6 @@ static const struct file_operations ast_fops = {
 	.unlocked_ioctl = drm_ioctl,
 	.mmap = ast_mmap,
 	.poll = drm_poll,
-	.fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = drm_compat_ioctl,
 #endif
@@ -198,7 +197,7 @@ static const struct file_operations ast_fops = {
 };
 
 static struct drm_driver driver = {
-	.driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+	.driver_features = DRIVER_MODESET | DRIVER_GEM,
 	.dev_priv_size = 0,
 
 	.load = ast_driver_load,
@@ -216,7 +215,7 @@ static struct drm_driver driver = {
 	.gem_free_object = ast_gem_free_object,
 	.dumb_create = ast_dumb_create,
 	.dumb_map_offset = ast_dumb_mmap_offset,
-	.dumb_destroy = ast_dumb_destroy,
+	.dumb_destroy = drm_gem_dumb_destroy,
 
 };
 

+ 0 - 3
drivers/gpu/drm/ast/ast_drv.h

@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
 extern int ast_dumb_create(struct drm_file *file,
 			   struct drm_device *dev,
 			   struct drm_mode_create_dumb *args);
-extern int ast_dumb_destroy(struct drm_file *file,
-			    struct drm_device *dev,
-			    uint32_t handle);
 
 extern int ast_gem_init_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);

+ 1 - 8
drivers/gpu/drm/ast/ast_main.c

@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
 	return 0;
 }
 
-int ast_dumb_destroy(struct drm_file *file,
-		     struct drm_device *dev,
-		     uint32_t handle)
-{
-	return drm_gem_handle_delete(file, handle);
-}
-
 int ast_gem_init_object(struct drm_gem_object *obj)
 {
 	BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
 {
-	return bo->bo.addr_space_offset;
+	return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 int
 ast_dumb_mmap_offset(struct drm_file *file,

+ 3 - 2
drivers/gpu/drm/ast/ast_ttm.c

@@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 
 static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
-	return 0;
+	struct ast_bo *astbo = ast_bo(bo);
+
+	return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
 }
 
 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,7 +323,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
 		return ret;
 	}
 
-	astbo->gem.driver_private = NULL;
 	astbo->bo.bdev = &ast->ttm.bdev;
 	astbo->bo.bdev->dev_mapping = dev->dev_mapping;
 

+ 2 - 3
drivers/gpu/drm/cirrus/cirrus_drv.c

@@ -85,10 +85,9 @@ static const struct file_operations cirrus_driver_fops = {
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = drm_compat_ioctl,
 #endif
-	.fasync = drm_fasync,
 };
 static struct drm_driver driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+	.driver_features = DRIVER_MODESET | DRIVER_GEM,
 	.load = cirrus_driver_load,
 	.unload = cirrus_driver_unload,
 	.fops = &cirrus_driver_fops,
@@ -102,7 +101,7 @@ static struct drm_driver driver = {
 	.gem_free_object = cirrus_gem_free_object,
 	.dumb_create = cirrus_dumb_create,
 	.dumb_map_offset = cirrus_dumb_mmap_offset,
-	.dumb_destroy = cirrus_dumb_destroy,
+	.dumb_destroy = drm_gem_dumb_destroy,
 };
 
 static struct pci_driver cirrus_pci_driver = {

+ 0 - 3
drivers/gpu/drm/cirrus/cirrus_drv.h

@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
 int cirrus_dumb_create(struct drm_file *file,
 		    struct drm_device *dev,
 		       struct drm_mode_create_dumb *args);
-int cirrus_dumb_destroy(struct drm_file *file,
-		     struct drm_device *dev,
-			uint32_t handle);
 
 int cirrus_framebuffer_init(struct drm_device *dev,
 			   struct cirrus_framebuffer *gfb,

+ 1 - 8
drivers/gpu/drm/cirrus/cirrus_main.c

@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
 	return 0;
 }
 
-int cirrus_dumb_destroy(struct drm_file *file,
-		     struct drm_device *dev,
-		     uint32_t handle)
-{
-	return drm_gem_handle_delete(file, handle);
-}
-
 int cirrus_gem_init_object(struct drm_gem_object *obj)
 {
 	BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
 {
-	return bo->bo.addr_space_offset;
+	return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 
 int

+ 3 - 2
drivers/gpu/drm/cirrus/cirrus_ttm.c

@@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 
 static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
-	return 0;
+	struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+	return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
 }
 
 static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -326,7 +328,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
 		return ret;
 	}
 
-	cirrusbo->gem.driver_private = NULL;
 	cirrusbo->bo.bdev = &cirrus->ttm.bdev;
 	cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
 

+ 51 - 0
drivers/gpu/drm/drm_agpsupport.c

@@ -423,6 +423,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 	return head;
 }
 
+/**
+ * drm_agp_clear - Clear AGP resource list
+ * @dev: DRM device
+ *
+ * Iterate over all AGP resources and remove them. But keep the AGP head
+ * intact so it can still be used. It is safe to call this if AGP is disabled or
+ * was already removed.
+ *
+ * If DRIVER_MODESET is active, nothing is done to protect the modesetting
+ * resources from getting destroyed. Drivers are responsible of cleaning them up
+ * during device shutdown.
+ */
+void drm_agp_clear(struct drm_device *dev)
+{
+	struct drm_agp_mem *entry, *tempe;
+
+	if (!drm_core_has_AGP(dev) || !dev->agp)
+		return;
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+		if (entry->bound)
+			drm_unbind_agp(entry->memory);
+		drm_free_agp(entry->memory, entry->pages);
+		kfree(entry);
+	}
+	INIT_LIST_HEAD(&dev->agp->memory);
+
+	if (dev->agp->acquired)
+		drm_agp_release(dev);
+
+	dev->agp->acquired = 0;
+	dev->agp->enabled = 0;
+}
+
+/**
+ * drm_agp_destroy - Destroy AGP head
+ * @dev: DRM device
+ *
+ * Destroy resources that were previously allocated via drm_agp_initp. Caller
+ * must ensure to clean up all AGP resources before calling this. See
+ * drm_agp_clear().
+ *
+ * Call this to destroy AGP heads allocated via drm_agp_init().
+ */
+void drm_agp_destroy(struct drm_agp_head *agp)
+{
+	kfree(agp);
+}
+
 /**
  * Binds a collection of pages into AGP memory at the given offset, returning
  * the AGP memory structure containing them.

+ 47 - 189
drivers/gpu/drm/drm_bufs.c

@@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
 			return 0;
 		}
 
-		if (drm_core_has_MTRR(dev)) {
-			if (map->type == _DRM_FRAME_BUFFER ||
-			    (map->flags & _DRM_WRITE_COMBINING)) {
-				map->mtrr =
-					arch_phys_wc_add(map->offset, map->size);
-			}
+		if (map->type == _DRM_FRAME_BUFFER ||
+		    (map->flags & _DRM_WRITE_COMBINING)) {
+			map->mtrr =
+				arch_phys_wc_add(map->offset, map->size);
 		}
 		if (map->type == _DRM_REGISTERS) {
 			if (map->flags & _DRM_WRITE_COMBINING)
@@ -243,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
 		}
 		map->handle = vmalloc_user(map->size);
 		DRM_DEBUG("%lu %d %p\n",
-			  map->size, drm_order(map->size), map->handle);
+			  map->size, order_base_2(map->size), map->handle);
 		if (!map->handle) {
 			kfree(map);
 			return -ENOMEM;
@@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 		iounmap(map->handle);
 		/* FALLTHROUGH */
 	case _DRM_FRAME_BUFFER:
-		if (drm_core_has_MTRR(dev))
-			arch_phys_wc_del(map->mtrr);
+		arch_phys_wc_del(map->mtrr);
 		break;
 	case _DRM_SHM:
 		vfree(map->handle);
@@ -630,7 +627,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
 		return -EINVAL;
 
 	count = request->count;
-	order = drm_order(request->size);
+	order = order_base_2(request->size);
 	size = 1 << order;
 
 	alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +797,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
 		return -EPERM;
 
 	count = request->count;
-	order = drm_order(request->size);
+	order = order_base_2(request->size);
 	size = 1 << order;
 
 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +999,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
 		return -EPERM;
 
 	count = request->count;
-	order = drm_order(request->size);
+	order = order_base_2(request->size);
 	size = 1 << order;
 
 	alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1130,161 +1127,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
 	return 0;
 }
 
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
-	struct drm_device_dma *dma = dev->dma;
-	struct drm_buf_entry *entry;
-	struct drm_buf *buf;
-	unsigned long offset;
-	unsigned long agp_offset;
-	int count;
-	int order;
-	int size;
-	int alignment;
-	int page_order;
-	int total;
-	int byte_count;
-	int i;
-	struct drm_buf **temp_buflist;
-
-	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
-		return -EINVAL;
-
-	if (!dma)
-		return -EINVAL;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
-	count = request->count;
-	order = drm_order(request->size);
-	size = 1 << order;
-
-	alignment = (request->flags & _DRM_PAGE_ALIGN)
-	    ? PAGE_ALIGN(size) : size;
-	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
-	total = PAGE_SIZE << page_order;
-
-	byte_count = 0;
-	agp_offset = request->agp_start;
-
-	DRM_DEBUG("count:      %d\n", count);
-	DRM_DEBUG("order:      %d\n", order);
-	DRM_DEBUG("size:       %d\n", size);
-	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
-	DRM_DEBUG("alignment:  %d\n", alignment);
-	DRM_DEBUG("page_order: %d\n", page_order);
-	DRM_DEBUG("total:      %d\n", total);
-
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return -EINVAL;
-
-	spin_lock(&dev->count_lock);
-	if (dev->buf_use) {
-		spin_unlock(&dev->count_lock);
-		return -EBUSY;
-	}
-	atomic_inc(&dev->buf_alloc);
-	spin_unlock(&dev->count_lock);
-
-	mutex_lock(&dev->struct_mutex);
-	entry = &dma->bufs[order];
-	if (entry->buf_count) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;	/* May only call once for each order */
-	}
-
-	if (count < 0 || count > 4096) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -EINVAL;
-	}
-
-	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
-				GFP_KERNEL);
-	if (!entry->buflist) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-
-	entry->buf_size = size;
-	entry->page_order = page_order;
-
-	offset = 0;
-
-	while (entry->buf_count < count) {
-		buf = &entry->buflist[entry->buf_count];
-		buf->idx = dma->buf_count + entry->buf_count;
-		buf->total = alignment;
-		buf->order = order;
-		buf->used = 0;
-
-		buf->offset = (dma->byte_count + offset);
-		buf->bus_address = agp_offset + offset;
-		buf->address = (void *)(agp_offset + offset);
-		buf->next = NULL;
-		buf->waiting = 0;
-		buf->pending = 0;
-		buf->file_priv = NULL;
-
-		buf->dev_priv_size = dev->driver->dev_priv_size;
-		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
-		if (!buf->dev_private) {
-			/* Set count correctly so we free the proper amount. */
-			entry->buf_count = count;
-			drm_cleanup_buf_error(dev, entry);
-			mutex_unlock(&dev->struct_mutex);
-			atomic_dec(&dev->buf_alloc);
-			return -ENOMEM;
-		}
-
-		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
-		offset += alignment;
-		entry->buf_count++;
-		byte_count += PAGE_SIZE << page_order;
-	}
-
-	DRM_DEBUG("byte_count: %d\n", byte_count);
-
-	temp_buflist = krealloc(dma->buflist,
-				(dma->buf_count + entry->buf_count) *
-				sizeof(*dma->buflist), GFP_KERNEL);
-	if (!temp_buflist) {
-		/* Free the entry because it isn't valid */
-		drm_cleanup_buf_error(dev, entry);
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-	dma->buflist = temp_buflist;
-
-	for (i = 0; i < entry->buf_count; i++) {
-		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
-	}
-
-	dma->buf_count += entry->buf_count;
-	dma->seg_count += entry->seg_count;
-	dma->page_count += byte_count >> PAGE_SHIFT;
-	dma->byte_count += byte_count;
-
-	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
-	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	request->count = entry->buf_count;
-	request->size = size;
-
-	dma->flags = _DRM_DMA_USE_FB;
-
-	atomic_dec(&dev->buf_alloc);
-	return 0;
-}
-
-
 /**
  * Add buffers for DMA transfers (ioctl).
  *
@@ -1305,6 +1147,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
 	struct drm_buf_desc *request = data;
 	int ret;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
 
@@ -1316,7 +1161,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
 	if (request->flags & _DRM_SG_BUFFER)
 		ret = drm_addbufs_sg(dev, request);
 	else if (request->flags & _DRM_FB_BUFFER)
-		ret = drm_addbufs_fb(dev, request);
+		ret = -EINVAL;
 	else
 		ret = drm_addbufs_pci(dev, request);
 
@@ -1348,6 +1193,9 @@ int drm_infobufs(struct drm_device *dev, void *data,
 	int i;
 	int count;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
 
@@ -1427,6 +1275,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
 	int order;
 	struct drm_buf_entry *entry;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
 
@@ -1435,7 +1286,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
 
 	DRM_DEBUG("%d, %d, %d\n",
 		  request->size, request->low_mark, request->high_mark);
-	order = drm_order(request->size);
+	order = order_base_2(request->size);
 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
 		return -EINVAL;
 	entry = &dma->bufs[order];
@@ -1472,6 +1323,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
 	int idx;
 	struct drm_buf *buf;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
 
@@ -1524,6 +1378,9 @@ int drm_mapbufs(struct drm_device *dev, void *data,
 	struct drm_buf_map *request = data;
 	int i;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
 
@@ -1541,9 +1398,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
 	if (request->count >= dma->buf_count) {
 		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
 		    || (drm_core_check_feature(dev, DRIVER_SG)
-			&& (dma->flags & _DRM_DMA_USE_SG))
-		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
-			&& (dma->flags & _DRM_DMA_USE_FB))) {
+			&& (dma->flags & _DRM_DMA_USE_SG))) {
 			struct drm_local_map *map = dev->agp_buffer_map;
 			unsigned long token = dev->agp_buffer_token;
 
@@ -1600,25 +1455,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
 	return retcode;
 }
 
-/**
- * Compute size order.  Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
+int drm_dma_ioctl(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
 {
-	int order;
-	unsigned long tmp;
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
 
-	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+	if (dev->driver->dma_ioctl)
+		return dev->driver->dma_ioctl(dev, data, file_priv);
+	else
+		return -EINVAL;
+}
 
-	if (size & (size - 1))
-		++order;
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+	struct drm_map_list *entry;
 
-	return order;
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map && entry->map->type == _DRM_SHM &&
+		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+			return entry->map;
+		}
+	}
+	return NULL;
 }
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_getsarea);

+ 65 - 16
drivers/gpu/drm/drm_context.c

@@ -42,10 +42,6 @@
 
 #include <drm/drmP.h>
 
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
 /**
  * Free a handle from the context bitmap.
  *
@@ -56,13 +52,48 @@
  * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
  * lock.
  */
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->struct_mutex);
 	idr_remove(&dev->ctx_idr, ctx_handle);
 	mutex_unlock(&dev->struct_mutex);
 }
 
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+void drm_legacy_ctxbitmap_release(struct drm_device *dev,
+				  struct drm_file *file_priv)
+{
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	mutex_lock(&dev->ctxlist_mutex);
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+			if (pos->tag == file_priv &&
+			    pos->handle != DRM_KERNEL_CONTEXT) {
+				if (dev->driver->context_dtor)
+					dev->driver->context_dtor(dev,
+								  pos->handle);
+
+				drm_ctxbitmap_free(dev, pos->handle);
+
+				list_del(&pos->head);
+				kfree(pos);
+				--dev->ctx_count;
+			}
+		}
+	}
+	mutex_unlock(&dev->ctxlist_mutex);
+}
+
 /**
  * Context bitmap allocation.
  *
@@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
  *
  * Initialise the drm_device::ctx_idr
  */
-int drm_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	idr_init(&dev->ctx_idr);
-	return 0;
 }
 
 /**
@@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
  * Free all idr members using drm_ctx_sarea_free helper function
  * while holding the drm_device::struct_mutex lock.
  */
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
 	mutex_lock(&dev->struct_mutex);
 	idr_destroy(&dev->ctx_idr);
@@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
 	struct drm_local_map *map;
 	struct drm_map_list *_entry;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 
 	map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data,
 	struct drm_local_map *map = NULL;
 	struct drm_map_list *r_list = NULL;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map
@@ -251,7 +290,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
 				       struct drm_file *file_priv, int new)
 {
 	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
-	dev->last_switch = jiffies;
 
 	if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
 		DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +299,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
 	   when the kernel holds the lock, release
 	   that lock here. */
 	clear_bit(0, &dev->context_flag);
-	wake_up(&dev->context_wait);
 
 	return 0;
 }
@@ -282,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data,
 	struct drm_ctx ctx;
 	int i;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (res->count >= DRM_RESERVED_CONTEXTS) {
 		memset(&ctx, 0, sizeof(ctx));
 		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -312,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data,
 	struct drm_ctx_list *ctx_entry;
 	struct drm_ctx *ctx = data;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	ctx->handle = drm_ctxbitmap_next(dev);
 	if (ctx->handle == DRM_KERNEL_CONTEXT) {
 		/* Skip kernel's context and get a new one. */
@@ -342,12 +385,6 @@ int drm_addctx(struct drm_device *dev, void *data,
 	return 0;
 }
 
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	/* This does nothing */
-	return 0;
-}
-
 /**
  * Get context.
  *
@@ -361,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
 	struct drm_ctx *ctx = data;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	/* This is 0, because we don't handle any context flags */
 	ctx->flags = 0;
 
@@ -383,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data,
 {
 	struct drm_ctx *ctx = data;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -403,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data,
 {
 	struct drm_ctx *ctx = data;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -425,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data,
 {
 	struct drm_ctx *ctx = data;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	if (ctx->handle != DRM_KERNEL_CONTEXT) {
 		if (dev->driver->context_dtor)

+ 116 - 57
drivers/gpu/drm/drm_crtc.c

@@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
 	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
 };
 
-static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
-	{ DRM_MODE_DITHERING_OFF, "Off" },
-	{ DRM_MODE_DITHERING_ON, "On" },
-	{ DRM_MODE_DITHERING_AUTO, "Automatic" },
-};
-
 /*
  * Non-global properties, but "required" for certain connectors.
  */
@@ -186,29 +179,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
 struct drm_conn_prop_enum_list {
 	int type;
 	const char *name;
-	int count;
+	struct ida ida;
 };
 
 /*
  * Connector and encoder types.
  */
 static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{	{ DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
-	{ DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
-	{ DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
-	{ DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
-	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
-	{ DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
-	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
-	{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
-	{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
-	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
-	{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
-	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
-	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
-	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
-	{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
-	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+{	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+	{ DRM_MODE_CONNECTOR_VGA, "VGA" },
+	{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+	{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+	{ DRM_MODE_CONNECTOR_Composite, "Composite" },
+	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+	{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+	{ DRM_MODE_CONNECTOR_Component, "Component" },
+	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+	{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+	{ DRM_MODE_CONNECTOR_TV, "TV" },
+	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
+	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
 };
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -220,6 +213,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 };
 
+void drm_connector_ida_init(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+		ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+		ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
 const char *drm_get_encoder_name(const struct drm_encoder *encoder)
 {
 	static char buf[32];
@@ -677,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_mode_probed_add);
 
-/**
+/*
  * drm_mode_remove - remove and free a mode
  * @connector: connector list to modify
  * @mode: mode to remove
  *
  * Remove @mode from @connector's mode list, then free it.
  */
-void drm_mode_remove(struct drm_connector *connector,
-		     struct drm_display_mode *mode)
+static void drm_mode_remove(struct drm_connector *connector,
+			    struct drm_display_mode *mode)
 {
 	list_del(&mode->head);
 	drm_mode_destroy(connector->dev, mode);
 }
-EXPORT_SYMBOL(drm_mode_remove);
 
 /**
  * drm_connector_init - Init a preallocated connector
@@ -711,6 +719,8 @@ int drm_connector_init(struct drm_device *dev,
 		       int connector_type)
 {
 	int ret;
+	struct ida *connector_ida =
+		&drm_connector_enum_list[connector_type].ida;
 
 	drm_modeset_lock_all(dev);
 
@@ -723,7 +733,12 @@ int drm_connector_init(struct drm_device *dev,
 	connector->funcs = funcs;
 	connector->connector_type = connector_type;
 	connector->connector_type_id =
-		++drm_connector_enum_list[connector_type].count; /* TODO */
+		ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+	if (connector->connector_type_id < 0) {
+		ret = connector->connector_type_id;
+		drm_mode_object_put(dev, &connector->base);
+		goto out;
+	}
 	INIT_LIST_HEAD(&connector->probed_modes);
 	INIT_LIST_HEAD(&connector->modes);
 	connector->edid_blob_ptr = NULL;
@@ -764,6 +779,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
 	list_for_each_entry_safe(mode, t, &connector->modes, head)
 		drm_mode_remove(connector, mode);
 
+	ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
+		   connector->connector_type_id);
+
 	drm_mode_object_put(dev, &connector->base);
 	list_del(&connector->head);
 	dev->mode_config.num_connector--;
@@ -781,6 +799,41 @@ void drm_connector_unplug_all(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_connector_unplug_all);
 
+int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
+		const struct drm_bridge_funcs *funcs)
+{
+	int ret;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
+	if (ret)
+		goto out;
+
+	bridge->dev = dev;
+	bridge->funcs = funcs;
+
+	list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
+	dev->mode_config.num_bridge++;
+
+ out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+EXPORT_SYMBOL(drm_bridge_init);
+
+void drm_bridge_cleanup(struct drm_bridge *bridge)
+{
+	struct drm_device *dev = bridge->dev;
+
+	drm_modeset_lock_all(dev);
+	drm_mode_object_put(dev, &bridge->base);
+	list_del(&bridge->head);
+	dev->mode_config.num_bridge--;
+	drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_bridge_cleanup);
+
 int drm_encoder_init(struct drm_device *dev,
 		      struct drm_encoder *encoder,
 		      const struct drm_encoder_funcs *funcs,
@@ -1134,30 +1187,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
-/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
-	struct drm_property *dithering_mode;
-
-	if (dev->mode_config.dithering_mode_property)
-		return 0;
-
-	dithering_mode =
-		drm_property_create_enum(dev, 0, "dithering",
-				drm_dithering_mode_enum_list,
-				    ARRAY_SIZE(drm_dithering_mode_enum_list));
-	dev->mode_config.dithering_mode_property = dithering_mode;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
 /**
  * drm_mode_create_dirty_property - create dirty property
  * @dev: DRM device
@@ -1190,6 +1219,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
 	total_objects += dev->mode_config.num_crtc;
 	total_objects += dev->mode_config.num_connector;
 	total_objects += dev->mode_config.num_encoder;
+	total_objects += dev->mode_config.num_bridge;
 
 	group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
 	if (!group->id_list)
@@ -1198,6 +1228,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
 	group->num_crtcs = 0;
 	group->num_connectors = 0;
 	group->num_encoders = 0;
+	group->num_bridges = 0;
 	return 0;
 }
 
@@ -1207,6 +1238,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
 	struct drm_crtc *crtc;
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
+	struct drm_bridge *bridge;
 	int ret;
 
 	if ((ret = drm_mode_group_init(dev, group)))
@@ -1223,6 +1255,11 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
 		group->id_list[group->num_crtcs + group->num_encoders +
 			       group->num_connectors++] = connector->base.id;
 
+	list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
+		group->id_list[group->num_crtcs + group->num_encoders +
+			       group->num_connectors + group->num_bridges++] =
+					bridge->base.id;
+
 	return 0;
 }
 EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
@@ -2604,10 +2641,22 @@ int drm_mode_getfb(struct drm_device *dev,
 	r->depth = fb->depth;
 	r->bpp = fb->bits_per_pixel;
 	r->pitch = fb->pitches[0];
-	if (fb->funcs->create_handle)
-		ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
-	else
+	if (fb->funcs->create_handle) {
+		if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
+			ret = fb->funcs->create_handle(fb, file_priv,
+						       &r->handle);
+		} else {
+			/* GET_FB() is an unprivileged ioctl so we must not
+			 * return a buffer-handle to non-master processes! For
+			 * backwards-compatibility reasons, we cannot make
+			 * GET_FB() privileged, so just return an invalid handle
+			 * for non-masters. */
+			r->handle = 0;
+			ret = 0;
+		}
+	} else {
 		ret = -ENODEV;
+	}
 
 	drm_framebuffer_unreference(fb);
 
@@ -3514,6 +3563,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 	    page_flip->reserved != 0)
 		return -EINVAL;
 
+	if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+		return -EINVAL;
+
 	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj)
 		return -EINVAL;
@@ -3587,7 +3639,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 	}
 
 	old_fb = crtc->fb;
-	ret = crtc->funcs->page_flip(crtc, fb, e);
+	ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
 	if (ret) {
 		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
 			spin_lock_irqsave(&dev->event_lock, flags);
@@ -3905,6 +3957,7 @@ void drm_mode_config_init(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev->mode_config.fb_list);
 	INIT_LIST_HEAD(&dev->mode_config.crtc_list);
 	INIT_LIST_HEAD(&dev->mode_config.connector_list);
+	INIT_LIST_HEAD(&dev->mode_config.bridge_list);
 	INIT_LIST_HEAD(&dev->mode_config.encoder_list);
 	INIT_LIST_HEAD(&dev->mode_config.property_list);
 	INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
@@ -3941,6 +3994,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
 	struct drm_connector *connector, *ot;
 	struct drm_crtc *crtc, *ct;
 	struct drm_encoder *encoder, *enct;
+	struct drm_bridge *bridge, *brt;
 	struct drm_framebuffer *fb, *fbt;
 	struct drm_property *property, *pt;
 	struct drm_property_blob *blob, *bt;
@@ -3951,6 +4005,11 @@ void drm_mode_config_cleanup(struct drm_device *dev)
 		encoder->funcs->destroy(encoder);
 	}
 
+	list_for_each_entry_safe(bridge, brt,
+				 &dev->mode_config.bridge_list, head) {
+		bridge->funcs->destroy(bridge);
+	}
+
 	list_for_each_entry_safe(connector, ot,
 				 &dev->mode_config.connector_list, head) {
 		connector->funcs->destroy(connector);

+ 70 - 19
drivers/gpu/drm/drm_crtc_helper.c

@@ -257,10 +257,16 @@ drm_encoder_disable(struct drm_encoder *encoder)
 {
 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 
+	if (encoder->bridge)
+		encoder->bridge->funcs->disable(encoder->bridge);
+
 	if (encoder_funcs->disable)
 		(*encoder_funcs->disable)(encoder);
 	else
 		(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+
+	if (encoder->bridge)
+		encoder->bridge->funcs->post_disable(encoder->bridge);
 }
 
 /**
@@ -424,6 +430,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 
 		if (encoder->crtc != crtc)
 			continue;
+
+		if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
+			ret = encoder->bridge->funcs->mode_fixup(
+					encoder->bridge, mode, adjusted_mode);
+			if (!ret) {
+				DRM_DEBUG_KMS("Bridge fixup failed\n");
+				goto done;
+			}
+		}
+
 		encoder_funcs = encoder->helper_private;
 		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
 						      adjusted_mode))) {
@@ -443,9 +459,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 
 		if (encoder->crtc != crtc)
 			continue;
+
+		if (encoder->bridge)
+			encoder->bridge->funcs->disable(encoder->bridge);
+
 		encoder_funcs = encoder->helper_private;
 		/* Disable the encoders as the first thing we do. */
 		encoder_funcs->prepare(encoder);
+
+		if (encoder->bridge)
+			encoder->bridge->funcs->post_disable(encoder->bridge);
 	}
 
 	drm_crtc_prepare_encoders(dev);
@@ -469,6 +492,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 			mode->base.id, mode->name);
 		encoder_funcs = encoder->helper_private;
 		encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+
+		if (encoder->bridge && encoder->bridge->funcs->mode_set)
+			encoder->bridge->funcs->mode_set(encoder->bridge, mode,
+					adjusted_mode);
 	}
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -479,9 +506,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 		if (encoder->crtc != crtc)
 			continue;
 
+		if (encoder->bridge)
+			encoder->bridge->funcs->pre_enable(encoder->bridge);
+
 		encoder_funcs = encoder->helper_private;
 		encoder_funcs->commit(encoder);
 
+		if (encoder->bridge)
+			encoder->bridge->funcs->enable(encoder->bridge);
 	}
 
 	/* Store real post-adjustment hardware mode. */
@@ -830,6 +862,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
 	return dpms;
 }
 
+/* Helper which handles bridge ordering around encoder dpms */
+static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_bridge *bridge = encoder->bridge;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+
+	if (bridge) {
+		if (mode == DRM_MODE_DPMS_ON)
+			bridge->funcs->pre_enable(bridge);
+		else
+			bridge->funcs->disable(bridge);
+	}
+
+	encoder_funcs = encoder->helper_private;
+	if (encoder_funcs->dpms)
+		encoder_funcs->dpms(encoder, mode);
+
+	if (bridge) {
+		if (mode == DRM_MODE_DPMS_ON)
+			bridge->funcs->enable(bridge);
+		else
+			bridge->funcs->post_disable(bridge);
+	}
+}
+
 static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
 {
 	int dpms = DRM_MODE_DPMS_OFF;
@@ -857,7 +914,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
 	struct drm_encoder *encoder = connector->encoder;
 	struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
-	int old_dpms;
+	int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
 	if (mode == connector->dpms)
 		return;
@@ -865,6 +922,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 	old_dpms = connector->dpms;
 	connector->dpms = mode;
 
+	if (encoder)
+		encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
+
 	/* from off to on, do crtc then encoder */
 	if (mode < old_dpms) {
 		if (crtc) {
@@ -873,22 +933,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 				(*crtc_funcs->dpms) (crtc,
 						     drm_helper_choose_crtc_dpms(crtc));
 		}
-		if (encoder) {
-			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-			if (encoder_funcs->dpms)
-				(*encoder_funcs->dpms) (encoder,
-							drm_helper_choose_encoder_dpms(encoder));
-		}
+		if (encoder)
+			drm_helper_encoder_dpms(encoder, encoder_dpms);
 	}
 
 	/* from on to off, do encoder then crtc */
 	if (mode > old_dpms) {
-		if (encoder) {
-			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
-			if (encoder_funcs->dpms)
-				(*encoder_funcs->dpms) (encoder,
-							drm_helper_choose_encoder_dpms(encoder));
-		}
+		if (encoder)
+			drm_helper_encoder_dpms(encoder, encoder_dpms);
 		if (crtc) {
 			struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 			if (crtc_funcs->dpms)
@@ -924,9 +976,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
 {
 	struct drm_crtc *crtc;
 	struct drm_encoder *encoder;
-	struct drm_encoder_helper_funcs *encoder_funcs;
 	struct drm_crtc_helper_funcs *crtc_funcs;
-	int ret;
+	int ret, encoder_dpms;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 
@@ -946,10 +997,10 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
 				if(encoder->crtc != crtc)
 					continue;
 
-				encoder_funcs = encoder->helper_private;
-				if (encoder_funcs->dpms)
-					(*encoder_funcs->dpms) (encoder,
-								drm_helper_choose_encoder_dpms(encoder));
+				encoder_dpms = drm_helper_choose_encoder_dpms(
+							encoder);
+
+				drm_helper_encoder_dpms(encoder, encoder_dpms);
 			}
 
 			crtc_funcs = crtc->helper_private;

+ 15 - 2
drivers/gpu/drm/drm_dma.c

@@ -44,10 +44,18 @@
  *
  * Allocate and initialize a drm_device_dma structure.
  */
-int drm_dma_setup(struct drm_device *dev)
+int drm_legacy_dma_setup(struct drm_device *dev)
 {
 	int i;
 
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+	    drm_core_check_feature(dev, DRIVER_MODESET)) {
+		return 0;
+	}
+
+	dev->buf_use = 0;
+	atomic_set(&dev->buf_alloc, 0);
+
 	dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
 	if (!dev->dma)
 		return -ENOMEM;
@@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
  * Free all pages associated with DMA buffers, the buffers and pages lists, and
  * finally the drm_device::dma structure itself.
  */
-void drm_dma_takedown(struct drm_device *dev)
+void drm_legacy_dma_takedown(struct drm_device *dev)
 {
 	struct drm_device_dma *dma = dev->dma;
 	int i, j;
 
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+	    drm_core_check_feature(dev, DRIVER_MODESET)) {
+		return;
+	}
+
 	if (!dma)
 		return;
 

+ 42 - 64
drivers/gpu/drm/drm_drv.c

@@ -68,7 +68,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
-	/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
-	DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
@@ -122,7 +121,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 #endif
 
-	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -131,14 +130,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 
-	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -171,6 +170,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
 
+/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+	int i;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	atomic_set(&dev->ioctl_count, 0);
+	atomic_set(&dev->vma_count, 0);
+
+	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+		atomic_set(&dev->counts[i], 0);
+
+	dev->sigdata.lock = NULL;
+
+	dev->context_flag = 0;
+	dev->last_context = 0;
+	dev->if_version = 0;
+}
+
 /**
  * Take down the DRM device.
  *
@@ -195,32 +219,9 @@ int drm_lastclose(struct drm_device * dev)
 
 	mutex_lock(&dev->struct_mutex);
 
-	/* Clear AGP information */
-	if (drm_core_has_AGP(dev) && dev->agp &&
-			!drm_core_check_feature(dev, DRIVER_MODESET)) {
-		struct drm_agp_mem *entry, *tempe;
-
-		/* Remove AGP resources, but leave dev->agp
-		   intact until drv_cleanup is called. */
-		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
-			if (entry->bound)
-				drm_unbind_agp(entry->memory);
-			drm_free_agp(entry->memory, entry->pages);
-			kfree(entry);
-		}
-		INIT_LIST_HEAD(&dev->agp->memory);
+	drm_agp_clear(dev);
 
-		if (dev->agp->acquired)
-			drm_agp_release(dev);
-
-		dev->agp->acquired = 0;
-		dev->agp->enabled = 0;
-	}
-	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
-	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
-		drm_sg_cleanup(dev->sg);
-		dev->sg = NULL;
-	}
+	drm_legacy_sg_cleanup(dev);
 
 	/* Clear vma list (only built for debugging) */
 	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
@@ -228,13 +229,13 @@ int drm_lastclose(struct drm_device * dev)
 		kfree(vma);
 	}
 
-	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
-	    !drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_dma_takedown(dev);
+	drm_legacy_dma_takedown(dev);
 
 	dev->dev_mapping = NULL;
 	mutex_unlock(&dev->struct_mutex);
 
+	drm_legacy_dev_reinit(dev);
+
 	DRM_DEBUG("lastclose completed\n");
 	return 0;
 }
@@ -251,6 +252,7 @@ static int __init drm_core_init(void)
 	int ret = -ENOMEM;
 
 	drm_global_init();
+	drm_connector_ida_init();
 	idr_init(&drm_minors_idr);
 
 	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -263,13 +265,6 @@ static int __init drm_core_init(void)
 		goto err_p2;
 	}
 
-	drm_proc_root = proc_mkdir("dri", NULL);
-	if (!drm_proc_root) {
-		DRM_ERROR("Cannot create /proc/dri\n");
-		ret = -1;
-		goto err_p3;
-	}
-
 	drm_debugfs_root = debugfs_create_dir("dri", NULL);
 	if (!drm_debugfs_root) {
 		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -292,12 +287,12 @@ err_p1:
 
 static void __exit drm_core_exit(void)
 {
-	remove_proc_entry("dri", NULL);
 	debugfs_remove(drm_debugfs_root);
 	drm_sysfs_destroy();
 
 	unregister_chrdev(DRM_MAJOR, "drm");
 
+	drm_connector_ida_destroy();
 	idr_destroy(&drm_minors_idr);
 }
 
@@ -420,17 +415,15 @@ long drm_ioctl(struct file *filp,
 
 	/* Do not trust userspace, use our own definition */
 	func = ioctl->func;
-	/* is there a local override? */
-	if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
-		func = dev->driver->dma_ioctl;
 
 	if (!func) {
 		DRM_DEBUG("no function\n");
 		retcode = -EINVAL;
 	} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
-		   ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+		   ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
 		   ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
-		   (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+		   (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
+		   (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
 		retcode = -EACCES;
 	} else {
 		if (cmd & (IOC_IN | IOC_OUT)) {
@@ -485,19 +478,4 @@ long drm_ioctl(struct file *filp,
 		DRM_DEBUG("ret = %d\n", retcode);
 	return retcode;
 }
-
 EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
-	struct drm_map_list *entry;
-
-	list_for_each_entry(entry, &dev->maplist, head) {
-		if (entry->map && entry->map->type == _DRM_SHM &&
-		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
-			return entry->map;
-		}
-	}
-	return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);

+ 274 - 32
drivers/gpu/drm/drm_edid.c

@@ -125,6 +125,9 @@ static struct edid_quirk {
 
 	/* ViewSonic VA2026w */
 	{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+	/* Medion MD 30217 PG */
+	{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 };
 
 /*
@@ -931,6 +934,36 @@ static const struct drm_display_mode edid_cea_modes[] = {
 	 .vrefresh = 100, },
 };
 
+/*
+ * HDMI 1.4 4k modes.
+ */
+static const struct drm_display_mode edid_4k_modes[] = {
+	/* 1 - 3840x2160@30Hz */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+		   3840, 4016, 4104, 4400, 0,
+		   2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, },
+	/* 2 - 3840x2160@25Hz */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+		   3840, 4896, 4984, 5280, 0,
+		   2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, },
+	/* 3 - 3840x2160@24Hz */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+		   3840, 5116, 5204, 5500, 0,
+		   2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, },
+	/* 4 - 4096x2160@24Hz (SMPTE) */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
+		   4096, 5116, 5204, 5500, 0,
+		   2160, 2168, 2178, 2250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, },
+};
+
 /*** DDC fetch and block validation ***/
 
 static const u8 edid_header[] = {
@@ -2287,7 +2320,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 	return closure.modes;
 }
 
-#define HDMI_IDENTIFIER 0x000C03
 #define AUDIO_BLOCK	0x01
 #define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
@@ -2298,10 +2330,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 #define EDID_CEA_YCRCB422	(1 << 4)
 #define EDID_CEA_VCDB_QS	(1 << 6)
 
-/**
+/*
  * Search EDID for CEA extension block.
  */
-u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(struct edid *edid)
 {
 	u8 *edid_ext = NULL;
 	int i;
@@ -2322,7 +2354,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
 
 	return edid_ext;
 }
-EXPORT_SYMBOL(drm_find_cea_extension);
 
 /*
  * Calculate the alternate clock for the CEA mode
@@ -2380,6 +2411,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
 }
 EXPORT_SYMBOL(drm_match_cea_mode);
 
+/*
+ * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
+ * specific block).
+ *
+ * It's almost like cea_mode_alternate_clock(), we just need to add an
+ * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
+ * one.
+ */
+static unsigned int
+hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
+{
+	if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
+		return hdmi_mode->clock;
+
+	return cea_mode_alternate_clock(hdmi_mode);
+}
+
+/*
+ * drm_match_hdmi_mode - look for a HDMI mode matching given mode
+ * @to_match: display mode
+ *
+ * An HDMI mode is one defined in the HDMI vendor specific block.
+ *
+ * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
+ */
+static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
+{
+	u8 mode;
+
+	if (!to_match->clock)
+		return 0;
+
+	for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
+		const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
+		unsigned int clock1, clock2;
+
+		/* Make sure to also match alternate clocks */
+		clock1 = hdmi_mode->clock;
+		clock2 = hdmi_mode_alternate_clock(hdmi_mode);
+
+		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+		    drm_mode_equal_no_clocks(to_match, hdmi_mode))
+			return mode + 1;
+	}
+	return 0;
+}
+
 static int
 add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
@@ -2397,18 +2476,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
 	 * with the alternate clock for certain CEA modes.
 	 */
 	list_for_each_entry(mode, &connector->probed_modes, head) {
-		const struct drm_display_mode *cea_mode;
+		const struct drm_display_mode *cea_mode = NULL;
 		struct drm_display_mode *newmode;
-		u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
+		u8 mode_idx = drm_match_cea_mode(mode) - 1;
 		unsigned int clock1, clock2;
 
-		if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
-			continue;
+		if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
+			cea_mode = &edid_cea_modes[mode_idx];
+			clock2 = cea_mode_alternate_clock(cea_mode);
+		} else {
+			mode_idx = drm_match_hdmi_mode(mode) - 1;
+			if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
+				cea_mode = &edid_4k_modes[mode_idx];
+				clock2 = hdmi_mode_alternate_clock(cea_mode);
+			}
+		}
 
-		cea_mode = &edid_cea_modes[cea_mode_idx];
+		if (!cea_mode)
+			continue;
 
 		clock1 = cea_mode->clock;
-		clock2 = cea_mode_alternate_clock(cea_mode);
 
 		if (clock1 == clock2)
 			continue;
@@ -2442,10 +2529,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
 }
 
 static int
-do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
 {
 	struct drm_device *dev = connector->dev;
-	u8 * mode, cea_mode;
+	const u8 *mode;
+	u8 cea_mode;
 	int modes = 0;
 
 	for (mode = db; mode < db + len; mode++) {
@@ -2465,6 +2553,68 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
 	return modes;
 }
 
+/*
+ * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ * @len: length of the CEA block payload, ie. one can access up to db[len]
+ *
+ * Parses the HDMI VSDB looking for modes to add to @connector.
+ */
+static int
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+{
+	struct drm_device *dev = connector->dev;
+	int modes = 0, offset = 0, i;
+	u8 vic_len;
+
+	if (len < 8)
+		goto out;
+
+	/* no HDMI_Video_Present */
+	if (!(db[8] & (1 << 5)))
+		goto out;
+
+	/* Latency_Fields_Present */
+	if (db[8] & (1 << 7))
+		offset += 2;
+
+	/* I_Latency_Fields_Present */
+	if (db[8] & (1 << 6))
+		offset += 2;
+
+	/* the declared length is not long enough for the 2 first bytes
+	 * of additional video format capabilities */
+	offset += 2;
+	if (len < (8 + offset))
+		goto out;
+
+	vic_len = db[8 + offset] >> 5;
+
+	for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
+		struct drm_display_mode *newmode;
+		u8 vic;
+
+		vic = db[9 + offset + i];
+
+		vic--; /* VICs start at 1 */
+		if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+			DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+			continue;
+		}
+
+		newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+		if (!newmode)
+			continue;
+
+		drm_mode_probed_add(connector, newmode);
+		modes++;
+	}
+
+out:
+	return modes;
+}
+
 static int
 cea_db_payload_len(const u8 *db)
 {
@@ -2496,14 +2646,30 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
 	return 0;
 }
 
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 5)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IEEE_OUI;
+}
+
 #define for_each_cea_db(cea, i, start, end) \
 	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
 static int
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
-	u8 * cea = drm_find_cea_extension(edid);
-	u8 * db, dbl;
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db;
+	u8 dbl;
 	int modes = 0;
 
 	if (cea && cea_revision(cea) >= 3) {
@@ -2517,7 +2683,9 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
 			dbl = cea_db_payload_len(db);
 
 			if (cea_db_tag(db) == VIDEO_BLOCK)
-				modes += do_cea_modes (connector, db+1, dbl);
+				modes += do_cea_modes(connector, db + 1, dbl);
+			else if (cea_db_is_hdmi_vsdb(db))
+				modes += do_hdmi_vsdb_modes(connector, db, dbl);
 		}
 	}
 
@@ -2570,21 +2738,6 @@ monitor_name(struct detailed_timing *t, void *data)
 		*(u8 **)data = t->data.other_data.data.str.str;
 }
 
-static bool cea_db_is_hdmi_vsdb(const u8 *db)
-{
-	int hdmi_id;
-
-	if (cea_db_tag(db) != VENDOR_BLOCK)
-		return false;
-
-	if (cea_db_payload_len(db) < 5)
-		return false;
-
-	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
-
-	return hdmi_id == HDMI_IDENTIFIER;
-}
-
 /**
  * drm_edid_to_eld - build ELD from EDID
  * @connector: connector corresponding to the HDMI/DP sink
@@ -2731,6 +2884,58 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
 }
 EXPORT_SYMBOL(drm_edid_to_sad);
 
+/**
+ * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
+ * @edid: EDID to parse
+ * @sadb: pointer to the speaker block
+ *
+ * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found Speaker Allocation Blocks or negative number on error.
+ */
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
+{
+	int count = 0;
+	int i, start, end, dbl;
+	const u8 *cea;
+
+	cea = drm_find_cea_extension(edid);
+	if (!cea) {
+		DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+		return -ENOENT;
+	}
+
+	if (cea_revision(cea) < 3) {
+		DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+		return -ENOTSUPP;
+	}
+
+	if (cea_db_offsets(cea, &start, &end)) {
+		DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+		return -EPROTO;
+	}
+
+	for_each_cea_db(cea, i, start, end) {
+		const u8 *db = &cea[i];
+
+		if (cea_db_tag(db) == SPEAKER_BLOCK) {
+			dbl = cea_db_payload_len(db);
+
+			/* Speaker Allocation Data Block */
+			if (dbl == 3) {
+				*sadb = kmalloc(dbl, GFP_KERNEL);
+				memcpy(*sadb, &db[1], dbl);
+				count = dbl;
+				break;
+			}
+		}
+	}
+
+	return count;
+}
+EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
+
 /**
  * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
  * @connector: connector associated with the HDMI/DP sink
@@ -3102,9 +3307,10 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 	if (err < 0)
 		return err;
 
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		frame->pixel_repeat = 1;
+
 	frame->video_code = drm_match_cea_mode(mode);
-	if (!frame->video_code)
-		return 0;
 
 	frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 	frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
@@ -3112,3 +3318,39 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 	return 0;
 }
 EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+
+/**
+ * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI vendor infoframe
+ * @mode: DRM display mode
+ *
+ * Note that there's is a need to send HDMI vendor infoframes only when using a
+ * 4k or stereoscopic 3D mode. So when giving any other mode as input this
+ * function will return -EINVAL, error that can be safely ignored.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+					    const struct drm_display_mode *mode)
+{
+	int err;
+	u8 vic;
+
+	if (!frame || !mode)
+		return -EINVAL;
+
+	vic = drm_match_hdmi_mode(mode);
+	if (!vic)
+		return -EINVAL;
+
+	err = hdmi_vendor_infoframe_init(frame);
+	if (err < 0)
+		return err;
+
+	frame->vic = vic;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);

+ 2 - 3
drivers/gpu/drm/drm_fb_cma_helper.c

@@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 
 #ifdef CONFIG_DEBUG_FS
-/**
+/*
  * drm_fb_cma_describe() - Helper to dump information about a single
  * CMA framebuffer object
  */
-void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
 	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
 	int i, n = drm_format_num_planes(fb->pixel_format);
@@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 		drm_gem_cma_describe(fb_cma->obj[i], m);
 	}
 }
-EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
 
 /**
  * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects

+ 124 - 0
drivers/gpu/drm/drm_flip_work.c

@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_flip_work.h"
+
+/**
+ * drm_flip_work_queue - queue work
+ * @work: the flip-work
+ * @val: the value to queue
+ *
+ * Queues work, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue(struct drm_flip_work *work, void *val)
+{
+	if (kfifo_put(&work->fifo, (const void **)&val)) {
+		atomic_inc(&work->pending);
+	} else {
+		DRM_ERROR("%s fifo full!\n", work->name);
+		work->func(work, val);
+	}
+}
+EXPORT_SYMBOL(drm_flip_work_queue);
+
+/**
+ * drm_flip_work_commit - commit queued work
+ * @work: the flip-work
+ * @wq: the work-queue to run the queued work on
+ *
+ * Trigger work previously queued by drm_flip_work_queue() to run
+ * on a workqueue.  The typical usage would be to queue work (via
+ * drm_flip_work_queue()) at any point (from vblank irq and/or
+ * prior), and then from vblank irq commit the queued work.
+ */
+void drm_flip_work_commit(struct drm_flip_work *work,
+		struct workqueue_struct *wq)
+{
+	uint32_t pending = atomic_read(&work->pending);
+	atomic_add(pending, &work->count);
+	atomic_sub(pending, &work->pending);
+	queue_work(wq, &work->worker);
+}
+EXPORT_SYMBOL(drm_flip_work_commit);
+
+static void flip_worker(struct work_struct *w)
+{
+	struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
+	uint32_t count = atomic_read(&work->count);
+	void *val = NULL;
+
+	atomic_sub(count, &work->count);
+
+	while(count--)
+		if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
+			work->func(work, val);
+}
+
+/**
+ * drm_flip_work_init - initialize flip-work
+ * @work: the flip-work to initialize
+ * @size: the max queue depth
+ * @name: debug name
+ * @func: the callback work function
+ *
+ * Initializes/allocates resources for the flip-work
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_flip_work_init(struct drm_flip_work *work, int size,
+		const char *name, drm_flip_func_t func)
+{
+	int ret;
+
+	work->name = name;
+	atomic_set(&work->count, 0);
+	atomic_set(&work->pending, 0);
+	work->func = func;
+
+	ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("could not allocate %s fifo\n", name);
+		return ret;
+	}
+
+	INIT_WORK(&work->worker, flip_worker);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_flip_work_init);
+
+/**
+ * drm_flip_work_cleanup - cleans up flip-work
+ * @work: the flip-work to cleanup
+ *
+ * Destroy resources allocated for the flip-work
+ */
+void drm_flip_work_cleanup(struct drm_flip_work *work)
+{
+	WARN_ON(!kfifo_is_empty(&work->fifo));
+	kfifo_free(&work->fifo);
+}
+EXPORT_SYMBOL(drm_flip_work_cleanup);

+ 15 - 83
drivers/gpu/drm/drm_fops.c

@@ -48,59 +48,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 static int drm_setup(struct drm_device * dev)
 {
-	int i;
 	int ret;
 
-	if (dev->driver->firstopen) {
+	if (dev->driver->firstopen &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = dev->driver->firstopen(dev);
 		if (ret != 0)
 			return ret;
 	}
 
-	atomic_set(&dev->ioctl_count, 0);
-	atomic_set(&dev->vma_count, 0);
-
-	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
-	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
-		dev->buf_use = 0;
-		atomic_set(&dev->buf_alloc, 0);
-
-		i = drm_dma_setup(dev);
-		if (i < 0)
-			return i;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
-		atomic_set(&dev->counts[i], 0);
-
-	dev->sigdata.lock = NULL;
-
-	dev->context_flag = 0;
-	dev->interrupt_flag = 0;
-	dev->dma_flag = 0;
-	dev->last_context = 0;
-	dev->last_switch = 0;
-	dev->last_checked = 0;
-	init_waitqueue_head(&dev->context_wait);
-	dev->if_version = 0;
-
-	dev->ctx_start = 0;
-	dev->lck_start = 0;
+	ret = drm_legacy_dma_setup(dev);
+	if (ret < 0)
+		return ret;
 
-	dev->buf_async = NULL;
-	init_waitqueue_head(&dev->buf_readers);
-	init_waitqueue_head(&dev->buf_writers);
 
 	DRM_DEBUG("\n");
-
-	/*
-	 * The kernel's context could be created here, but is now created
-	 * in drm_dma_enqueue.  This is more resource-efficient for
-	 * hardware that does not do DMA, but may mean that
-	 * drm_select_queue fails between the time the interrupt is
-	 * initialized and the time the queues are initialized.
-	 */
-
 	return 0;
 }
 
@@ -257,7 +219,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 		return -EBUSY;	/* No exclusive opens */
 	if (!drm_cpu_valid())
 		return -EINVAL;
-	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
 		return -EINVAL;
 
 	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
@@ -300,10 +262,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 			goto out_prime_destroy;
 	}
 
-
-	/* if there is no current master make this fd it */
+	/* if there is no current master make this fd it, but do not create
+	 * any master object for render clients */
 	mutex_lock(&dev->struct_mutex);
-	if (!priv->minor->master) {
+	if (!priv->minor->master && !drm_is_render_client(priv)) {
 		/* create a new master */
 		priv->minor->master = drm_master_create(priv->minor);
 		if (!priv->minor->master) {
@@ -341,12 +303,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 				goto out_close;
 			}
 		}
-		mutex_unlock(&dev->struct_mutex);
-	} else {
+	} else if (!drm_is_render_client(priv)) {
 		/* get a reference to the master */
 		priv->master = drm_master_get(priv->minor->master);
-		mutex_unlock(&dev->struct_mutex);
 	}
+	mutex_unlock(&dev->struct_mutex);
 
 	mutex_lock(&dev->struct_mutex);
 	list_add(&priv->lhead, &dev->filelist);
@@ -388,18 +349,6 @@ out_put_pid:
 	return ret;
 }
 
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
-	struct drm_file *priv = filp->private_data;
-	struct drm_device *dev = priv->minor->dev;
-
-	DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
-		  (long)old_encode_dev(priv->minor->device));
-	return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
 static void drm_master_release(struct drm_device *dev, struct file *filp)
 {
 	struct drm_file *file_priv = filp->private_data;
@@ -490,26 +439,7 @@ int drm_release(struct inode *inode, struct file *filp)
 	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_release(dev, file_priv);
 
-	mutex_lock(&dev->ctxlist_mutex);
-	if (!list_empty(&dev->ctxlist)) {
-		struct drm_ctx_list *pos, *n;
-
-		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
-			if (pos->tag == file_priv &&
-			    pos->handle != DRM_KERNEL_CONTEXT) {
-				if (dev->driver->context_dtor)
-					dev->driver->context_dtor(dev,
-								  pos->handle);
-
-				drm_ctxbitmap_free(dev, pos->handle);
-
-				list_del(&pos->head);
-				kfree(pos);
-				--dev->ctx_count;
-			}
-		}
-	}
-	mutex_unlock(&dev->ctxlist_mutex);
+	drm_legacy_ctxbitmap_release(dev, file_priv);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -547,7 +477,8 @@ int drm_release(struct inode *inode, struct file *filp)
 	iput(container_of(dev->dev_mapping, struct inode, i_data));
 
 	/* drop the reference held my the file priv */
-	drm_master_put(&file_priv->master);
+	if (file_priv->master)
+		drm_master_put(&file_priv->master);
 	file_priv->is_master = 0;
 	list_del(&file_priv->lhead);
 	mutex_unlock(&dev->struct_mutex);
@@ -555,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
 	if (dev->driver->postclose)
 		dev->driver->postclose(dev, file_priv);
 
+
 	if (drm_core_check_feature(dev, DRIVER_PRIME))
 		drm_prime_destroy_file_private(&file_priv->prime);
 

+ 297 - 143
drivers/gpu/drm/drm_gem.c

@@ -37,6 +37,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 
 /** @file drm_gem.c
  *
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
 {
 	struct drm_gem_mm *mm;
 
-	spin_lock_init(&dev->object_name_lock);
+	mutex_init(&dev->object_name_lock);
 	idr_init(&dev->object_name_idr);
 
 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
 	}
 
 	dev->mm_private = mm;
-
-	if (drm_ht_create(&mm->offset_hash, 12)) {
-		kfree(mm);
-		return -ENOMEM;
-	}
-
-	drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-		    DRM_FILE_PAGE_OFFSET_SIZE);
+	drm_vma_offset_manager_init(&mm->vma_manager,
+				    DRM_FILE_PAGE_OFFSET_START,
+				    DRM_FILE_PAGE_OFFSET_SIZE);
 
 	return 0;
 }
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
 {
 	struct drm_gem_mm *mm = dev->mm_private;
 
-	drm_mm_takedown(&mm->offset_manager);
-	drm_ht_remove(&mm->offset_hash);
+	drm_vma_offset_manager_destroy(&mm->vma_manager);
 	kfree(mm);
 	dev->mm_private = NULL;
 }
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
 int drm_gem_object_init(struct drm_device *dev,
 			struct drm_gem_object *obj, size_t size)
 {
-	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+	struct file *filp;
 
-	obj->dev = dev;
-	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
-	if (IS_ERR(obj->filp))
-		return PTR_ERR(obj->filp);
+	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
 
-	kref_init(&obj->refcount);
-	atomic_set(&obj->handle_count, 0);
-	obj->size = size;
+	drm_gem_private_object_init(dev, obj, size);
+	obj->filp = filp;
 
 	return 0;
 }
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
  * no GEM provided backing store. Instead the caller is responsible for
  * backing the object and handling it.
  */
-int drm_gem_private_object_init(struct drm_device *dev,
-			struct drm_gem_object *obj, size_t size)
+void drm_gem_private_object_init(struct drm_device *dev,
+				 struct drm_gem_object *obj, size_t size)
 {
 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 
@@ -161,10 +154,9 @@ int drm_gem_private_object_init(struct drm_device *dev,
 	obj->filp = NULL;
 
 	kref_init(&obj->refcount);
-	atomic_set(&obj->handle_count, 0);
+	obj->handle_count = 0;
 	obj->size = size;
-
-	return 0;
+	drm_vma_node_reset(&obj->vma_node);
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
@@ -200,16 +192,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
 static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
-	if (obj->import_attach) {
-		drm_prime_remove_buf_handle(&filp->prime,
-				obj->import_attach->dmabuf);
+	/*
+	 * Note: obj->dma_buf can't disappear as long as we still hold a
+	 * handle reference in obj->handle_count.
+	 */
+	mutex_lock(&filp->prime.lock);
+	if (obj->dma_buf) {
+		drm_prime_remove_buf_handle_locked(&filp->prime,
+						   obj->dma_buf);
 	}
-	if (obj->export_dma_buf) {
-		drm_prime_remove_buf_handle(&filp->prime,
-				obj->export_dma_buf);
+	mutex_unlock(&filp->prime.lock);
+}
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+	BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+
+	/* Remove any name for this object */
+	if (obj->name) {
+		idr_remove(&dev->object_name_idr, obj->name);
+		obj->name = 0;
+		/*
+		 * The object name held a reference to this object, drop
+		 * that now.
+		*
+		* This cannot be the last reference, since the handle holds one too.
+		 */
+		kref_put(&obj->refcount, drm_gem_object_ref_bug);
 	}
 }
 
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+	/* Unbreak the reference cycle if we have an exported dma_buf. */
+	if (obj->dma_buf) {
+		dma_buf_put(obj->dma_buf);
+		obj->dma_buf = NULL;
+	}
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+	if (WARN_ON(obj->handle_count == 0))
+		return;
+
+	/*
+	* Must bump handle count first as this may be the last
+	* ref, in which case the object would disappear before we
+	* checked for a name
+	*/
+
+	mutex_lock(&obj->dev->object_name_lock);
+	if (--obj->handle_count == 0) {
+		drm_gem_object_handle_free(obj);
+		drm_gem_object_exported_dma_buf_free(obj);
+	}
+	mutex_unlock(&obj->dev->object_name_lock);
+
+	drm_gem_object_unreference_unlocked(obj);
+}
+
 /**
  * Removes the mapping from handle to filp for this object.
  */
@@ -242,7 +297,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 	idr_remove(&filp->object_idr, handle);
 	spin_unlock(&filp->table_lock);
 
-	drm_gem_remove_prime_handles(obj, filp);
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_gem_remove_prime_handles(obj, filp);
+	drm_vma_node_revoke(&obj->vma_node, filp->filp);
 
 	if (dev->driver->gem_close_object)
 		dev->driver->gem_close_object(obj, filp);
@@ -253,18 +310,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 EXPORT_SYMBOL(drm_gem_handle_delete);
 
 /**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ * 
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+			 struct drm_device *dev,
+			 uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+/**
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ * 
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
  */
 int
-drm_gem_handle_create(struct drm_file *file_priv,
-		       struct drm_gem_object *obj,
-		       u32 *handlep)
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+			   struct drm_gem_object *obj,
+			   u32 *handlep)
 {
 	struct drm_device *dev = obj->dev;
 	int ret;
 
+	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
 	/*
 	 * Get the user-visible handle using idr.  Preload and perform
 	 * allocation under our spinlock.
@@ -273,14 +348,22 @@ drm_gem_handle_create(struct drm_file *file_priv,
 	spin_lock(&file_priv->table_lock);
 
 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
-
+	drm_gem_object_reference(obj);
+	obj->handle_count++;
 	spin_unlock(&file_priv->table_lock);
 	idr_preload_end();
-	if (ret < 0)
+	mutex_unlock(&dev->object_name_lock);
+	if (ret < 0) {
+		drm_gem_object_handle_unreference_unlocked(obj);
 		return ret;
+	}
 	*handlep = ret;
 
-	drm_gem_object_handle_reference(obj);
+	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+	if (ret) {
+		drm_gem_handle_delete(file_priv, *handlep);
+		return ret;
+	}
 
 	if (dev->driver->gem_open_object) {
 		ret = dev->driver->gem_open_object(obj, file_priv);
@@ -292,6 +375,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
 
 	return 0;
 }
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+		       struct drm_gem_object *obj,
+		       u32 *handlep)
+{
+	mutex_lock(&obj->dev->object_name_lock);
+
+	return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
 EXPORT_SYMBOL(drm_gem_handle_create);
 
 
@@ -306,81 +404,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list = &obj->map_list;
 
-	drm_ht_remove_item(&mm->offset_hash, &list->hash);
-	drm_mm_put_block(list->file_offset_node);
-	kfree(list->map);
-	list->map = NULL;
+	drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
 }
 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
 /**
- * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  * @obj: obj in question
+ * @size: the virtual size
  *
  * GEM memory mapping works by handing back to userspace a fake mmap offset
  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  * up the object based on the offset and sets up the various memory mapping
  * structures.
  *
- * This routine allocates and attaches a fake offset for @obj.
+ * This routine allocates and attaches a fake offset for @obj, in cases where
+ * the virtual size differs from the physical size (ie. obj->size).  Otherwise
+ * just use drm_gem_create_mmap_offset().
  */
 int
-drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 {
 	struct drm_device *dev = obj->dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list;
-	struct drm_local_map *map;
-	int ret;
 
-	/* Set the object up for mmap'ing */
-	list = &obj->map_list;
-	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-	if (!list->map)
-		return -ENOMEM;
-
-	map = list->map;
-	map->type = _DRM_GEM;
-	map->size = obj->size;
-	map->handle = obj;
+	return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+				  size / PAGE_SIZE);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
 
-	/* Get a DRM GEM mmap offset allocated... */
-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-			obj->size / PAGE_SIZE, 0, false);
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call.  The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+	return drm_gem_create_mmap_offset_size(obj, obj->size);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
-	if (!list->file_offset_node) {
-		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-		ret = -ENOSPC;
-		goto out_free_list;
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+	struct inode *inode;
+	struct address_space *mapping;
+	struct page *p, **pages;
+	int i, npages;
+
+	/* This is the shared memory object that backs the GEM resource */
+	inode = file_inode(obj->filp);
+	mapping = inode->i_mapping;
+
+	/* We already BUG_ON() for non-page-aligned sizes in
+	 * drm_gem_object_init(), so we should never hit this unless
+	 * driver author is doing something really wrong:
+	 */
+	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (pages == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	gfpmask |= mapping_gfp_mask(mapping);
+
+	for (i = 0; i < npages; i++) {
+		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		if (IS_ERR(p))
+			goto fail;
+		pages[i] = p;
+
+		/* There is a hypothetical issue w/ drivers that require
+		 * buffer memory in the low 4GB.. if the pages are un-
+		 * pinned, and swapped out, they can end up swapped back
+		 * in above 4GB.  If pages are already in memory, then
+		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
+		 * even if the already in-memory page disobeys the mask.
+		 *
+		 * It is only a theoretical issue today, because none of
+		 * the devices with this limitation can be populated with
+		 * enough memory to trigger the issue.  But this BUG_ON()
+		 * is here as a reminder in case the problem with
+		 * shmem_read_mapping_page_gfp() isn't solved by the time
+		 * it does become a real issue.
+		 *
+		 * See this thread: http://lkml.org/lkml/2011/7/11/238
+		 */
+		BUG_ON((gfpmask & __GFP_DMA32) &&
+				(page_to_pfn(p) >= 0x00100000UL));
 	}
 
-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-			obj->size / PAGE_SIZE, 0);
-	if (!list->file_offset_node) {
-		ret = -ENOMEM;
-		goto out_free_list;
-	}
+	return pages;
 
-	list->hash.key = list->file_offset_node->start;
-	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-	if (ret) {
-		DRM_ERROR("failed to add to map hash\n");
-		goto out_free_mm;
-	}
+fail:
+	while (i--)
+		page_cache_release(pages[i]);
 
-	return 0;
+	drm_free_large(pages);
+	return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
 
-out_free_mm:
-	drm_mm_put_block(list->file_offset_node);
-out_free_list:
-	kfree(list->map);
-	list->map = NULL;
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+		bool dirty, bool accessed)
+{
+	int i, npages;
 
-	return ret;
+	/* We already BUG_ON() for non-page-aligned sizes in
+	 * drm_gem_object_init(), so we should never hit this unless
+	 * driver author is doing something really wrong:
+	 */
+	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	for (i = 0; i < npages; i++) {
+		if (dirty)
+			set_page_dirty(pages[i]);
+
+		if (accessed)
+			mark_page_accessed(pages[i]);
+
+		/* Undo the reference we took when populating the table */
+		page_cache_release(pages[i]);
+	}
+
+	drm_free_large(pages);
 }
-EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+EXPORT_SYMBOL(drm_gem_put_pages);
 
 /** Returns a reference to the object named by the handle. */
 struct drm_gem_object *
@@ -445,8 +617,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
 	if (obj == NULL)
 		return -ENOENT;
 
+	mutex_lock(&dev->object_name_lock);
 	idr_preload(GFP_KERNEL);
-	spin_lock(&dev->object_name_lock);
+	/* prevent races with concurrent gem_close. */
+	if (obj->handle_count == 0) {
+		ret = -ENOENT;
+		goto err;
+	}
+
 	if (!obj->name) {
 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
 		if (ret < 0)
@@ -462,8 +640,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
 	ret = 0;
 
 err:
-	spin_unlock(&dev->object_name_lock);
 	idr_preload_end();
+	mutex_unlock(&dev->object_name_lock);
 	drm_gem_object_unreference_unlocked(obj);
 	return ret;
 }
@@ -486,15 +664,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
 	if (!(dev->driver->driver_features & DRIVER_GEM))
 		return -ENODEV;
 
-	spin_lock(&dev->object_name_lock);
+	mutex_lock(&dev->object_name_lock);
 	obj = idr_find(&dev->object_name_idr, (int) args->name);
-	if (obj)
+	if (obj) {
 		drm_gem_object_reference(obj);
-	spin_unlock(&dev->object_name_lock);
-	if (!obj)
+	} else {
+		mutex_unlock(&dev->object_name_lock);
 		return -ENOENT;
+	}
 
-	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
 	drm_gem_object_unreference_unlocked(obj);
 	if (ret)
 		return ret;
@@ -527,7 +707,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
 	struct drm_gem_object *obj = ptr;
 	struct drm_device *dev = obj->dev;
 
-	drm_gem_remove_prime_handles(obj, file_priv);
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_gem_remove_prime_handles(obj, file_priv);
+	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
 
 	if (dev->driver->gem_close_object)
 		dev->driver->gem_close_object(obj, file_priv);
@@ -553,6 +735,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 {
+	WARN_ON(obj->dma_buf);
+
 	if (obj->filp)
 	    fput(obj->filp);
 }
@@ -577,41 +761,6 @@ drm_gem_object_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
-	BUG();
-}
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void drm_gem_object_handle_free(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-
-	/* Remove any name for this object */
-	spin_lock(&dev->object_name_lock);
-	if (obj->name) {
-		idr_remove(&dev->object_name_idr, obj->name);
-		obj->name = 0;
-		spin_unlock(&dev->object_name_lock);
-		/*
-		 * The object name held a reference to this object, drop
-		 * that now.
-		*
-		* This cannot be the last reference, since the handle holds one too.
-		 */
-		kref_put(&obj->refcount, drm_gem_object_ref_bug);
-	} else
-		spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
 void drm_gem_vm_open(struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
@@ -653,6 +802,10 @@ EXPORT_SYMBOL(drm_gem_vm_close);
  * the GEM object is not looked up based on its fake offset. To implement the
  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
  *
+ * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
+ * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
+ * callers must verify access restrictions before calling this helper.
+ *
  * NOTE: This function has to be protected with dev->struct_mutex
  *
  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
@@ -701,14 +854,17 @@ EXPORT_SYMBOL(drm_gem_mmap_obj);
  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
  * contain the fake offset we created when the GTT map ioctl was called on
  * the object) and map it with a call to drm_gem_mmap_obj().
+ *
+ * If the caller is not granted access to the buffer object, the mmap will fail
+ * with EACCES. Please see the vma manager for more information.
  */
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct drm_file *priv = filp->private_data;
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_local_map *map = NULL;
-	struct drm_hash_item *hash;
+	struct drm_gem_object *obj;
+	struct drm_vma_offset_node *node;
 	int ret = 0;
 
 	if (drm_device_is_unplugged(dev))
@@ -716,21 +872,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
 	mutex_lock(&dev->struct_mutex);
 
-	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+	node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+					   vma_pages(vma));
+	if (!node) {
 		mutex_unlock(&dev->struct_mutex);
 		return drm_mmap(filp, vma);
+	} else if (!drm_vma_node_is_allowed(node, filp)) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EACCES;
 	}
 
-	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
-	if (!map ||
-	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
-		ret =  -EPERM;
-		goto out_unlock;
-	}
-
-	ret = drm_gem_mmap_obj(map->handle, map->size, vma);
+	obj = container_of(node, struct drm_gem_object, vma_node);
+	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
 
-out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 
 	return ret;

+ 5 - 21
drivers/gpu/drm/drm_gem_cma_helper.c

@@ -27,11 +27,7 @@
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/drm_gem_cma_helper.h>
-
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
-{
-	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
-}
+#include <drm/drm_vma_manager.h>
 
 /*
  * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
 	struct drm_gem_cma_object *cma_obj;
 
-	if (gem_obj->map_list.map)
-		drm_gem_free_mmap_offset(gem_obj);
+	drm_gem_free_mmap_offset(gem_obj);
 
 	cma_obj = to_drm_gem_cma_obj(gem_obj);
 
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
 		return -EINVAL;
 	}
 
-	*offset = get_gem_mmap_offset(gem_obj);
+	*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 
 	drm_gem_object_unreference(gem_obj);
 
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
 
-/*
- * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
-		struct drm_device *drm, unsigned int handle)
-{
-	return drm_gem_handle_delete(file_priv, handle);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
-
 #ifdef CONFIG_DEBUG_FS
 void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
 {
 	struct drm_gem_object *obj = &cma_obj->base;
 	struct drm_device *dev = obj->dev;
-	uint64_t off = 0;
+	uint64_t off;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (obj->map_list.map)
-		off = (uint64_t)obj->map_list.hash.key;
+	off = drm_vma_node_start(&obj->vma_node);
 
 	seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
 			obj->name, obj->refcount.refcount.counter,

+ 5 - 1
drivers/gpu/drm/drm_info.c

@@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
 
 	seq_printf(m, "%6d %8zd %7d %8d\n",
 		   obj->name, obj->size,
-		   atomic_read(&obj->handle_count),
+		   obj->handle_count,
 		   atomic_read(&obj->refcount.refcount));
 	return 0;
 }
@@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 
 	seq_printf(m, "  name     size handles refcount\n");
+
+	mutex_lock(&dev->object_name_lock);
 	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+	mutex_unlock(&dev->object_name_lock);
+
 	return 0;
 }
 

+ 26 - 36
drivers/gpu/drm/drm_ioctl.c

@@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data,
 		  struct drm_file *file_priv)
 {
 	struct drm_client *client = data;
-	struct drm_file *pt;
-	int idx;
-	int i;
 
-	idx = client->idx;
-	i = 0;
-
-	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(pt, &dev->filelist, lhead) {
-		if (i++ >= idx) {
-			client->auth = pt->authenticated;
-			client->pid = pid_vnr(pt->pid);
-			client->uid = from_kuid_munged(current_user_ns(), pt->uid);
-			client->magic = pt->magic;
-			client->iocs = pt->ioctl_count;
-			mutex_unlock(&dev->struct_mutex);
-
-			return 0;
-		}
+	/*
+	 * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
+	 * not breaking completely. Userspace tools stop enumerating one they
+	 * get -EINVAL, hence this is the return value we need to hand back for
+	 * no clients tracked.
+	 *
+	 * Unfortunately some clients (*cough* libva *cough*) use this in a fun
+	 * attempt to figure out whether they're authenticated or not. Since
+	 * that's the only thing they care about, give it to the directly
+	 * instead of walking one giant list.
+	 */
+	if (client->idx == 0) {
+		client->auth = file_priv->authenticated;
+		client->pid = pid_vnr(file_priv->pid);
+		client->uid = from_kuid_munged(current_user_ns(),
+					       file_priv->uid);
+		client->magic = 0;
+		client->iocs = 0;
+
+		return 0;
+	} else {
+		return -EINVAL;
 	}
-	mutex_unlock(&dev->struct_mutex);
-
-	return -EINVAL;
 }
 
 /**
@@ -256,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data,
 		 struct drm_file *file_priv)
 {
 	struct drm_stats *stats = data;
-	int i;
 
+	/* Clear stats to prevent userspace from eating its stack garbage. */
 	memset(stats, 0, sizeof(*stats));
 
-	for (i = 0; i < dev->counters; i++) {
-		if (dev->types[i] == _DRM_STAT_LOCK)
-			stats->data[i].value =
-			    (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
-		else
-			stats->data[i].value = atomic_read(&dev->counts[i]);
-		stats->data[i].type = dev->types[i];
-	}
-
-	stats->count = dev->counters;
-
 	return 0;
 }
 
@@ -303,6 +293,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 	case DRM_CAP_TIMESTAMP_MONOTONIC:
 		req->value = drm_timestamp_monotonic;
 		break;
+	case DRM_CAP_ASYNC_PAGE_FLIP:
+		req->value = dev->mode_config.async_page_flip;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -352,9 +345,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
 			retcode = -EINVAL;
 			goto done;
 		}
-
-		if (dev->driver->set_version)
-			dev->driver->set_version(dev, sv);
 	}
 
 done:

+ 0 - 2
drivers/gpu/drm/drm_memory.c

@@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
 	agp_free_memory(handle);
 }
-EXPORT_SYMBOL(drm_free_agp);
 
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
 	return agp_unbind_memory(handle);
 }
-EXPORT_SYMBOL(drm_unbind_agp);
 
 #else  /*  __OS_HAS_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,

+ 47 - 182
drivers/gpu/drm/drm_mm.c

@@ -49,58 +49,18 @@
 
 #define MM_UNUSED_TARGET 4
 
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
-	struct drm_mm_node *child;
-
-	if (atomic)
-		child = kzalloc(sizeof(*child), GFP_ATOMIC);
-	else
-		child = kzalloc(sizeof(*child), GFP_KERNEL);
-
-	if (unlikely(child == NULL)) {
-		spin_lock(&mm->unused_lock);
-		if (list_empty(&mm->unused_nodes))
-			child = NULL;
-		else {
-			child =
-			    list_entry(mm->unused_nodes.next,
-				       struct drm_mm_node, node_list);
-			list_del(&child->node_list);
-			--mm->num_unused;
-		}
-		spin_unlock(&mm->unused_lock);
-	}
-	return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm:	memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
-	struct drm_mm_node *node;
-
-	spin_lock(&mm->unused_lock);
-	while (mm->num_unused < MM_UNUSED_TARGET) {
-		spin_unlock(&mm->unused_lock);
-		node = kzalloc(sizeof(*node), GFP_KERNEL);
-		spin_lock(&mm->unused_lock);
-
-		if (unlikely(node == NULL)) {
-			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
-			spin_unlock(&mm->unused_lock);
-			return ret;
-		}
-		++mm->num_unused;
-		list_add_tail(&node->node_list, &mm->unused_nodes);
-	}
-	spin_unlock(&mm->unused_lock);
-	return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long color,
+						enum drm_mm_search_flags flags);
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long color,
+						unsigned long start,
+						unsigned long end,
+						enum drm_mm_search_flags flags);
 
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 				 struct drm_mm_node *node,
@@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 	}
 }
 
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-					unsigned long start,
-					unsigned long size,
-					bool atomic)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
-	struct drm_mm_node *hole, *node;
-	unsigned long end = start + size;
+	struct drm_mm_node *hole;
+	unsigned long end = node->start + node->size;
 	unsigned long hole_start;
 	unsigned long hole_end;
 
+	BUG_ON(node == NULL);
+
+	/* Find the relevant hole to add our node to */
 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-		if (hole_start > start || hole_end < end)
+		if (hole_start > node->start || hole_end < end)
 			continue;
 
-		node = drm_mm_kmalloc(mm, atomic);
-		if (unlikely(node == NULL))
-			return NULL;
-
-		node->start = start;
-		node->size = size;
 		node->mm = mm;
 		node->allocated = 1;
 
 		INIT_LIST_HEAD(&node->hole_stack);
 		list_add(&node->node_list, &hole->node_list);
 
-		if (start == hole_start) {
+		if (node->start == hole_start) {
 			hole->hole_follows = 0;
 			list_del_init(&hole->hole_stack);
 		}
@@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
 			node->hole_follows = 1;
 		}
 
-		return node;
+		return 0;
 	}
 
-	WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
-	return NULL;
-}
-EXPORT_SYMBOL(drm_mm_create_block);
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
-					     unsigned long size,
-					     unsigned alignment,
-					     unsigned long color,
-					     int atomic)
-{
-	struct drm_mm_node *node;
-
-	node = drm_mm_kmalloc(hole_node->mm, atomic);
-	if (unlikely(node == NULL))
-		return NULL;
-
-	drm_mm_insert_helper(hole_node, node, size, alignment, color);
-
-	return node;
+	WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+	     node->start, node->size);
+	return -ENOSPC;
 }
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_reserve_node);
 
 /**
  * Search for free space and insert a preallocated memory node. Returns
@@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
  */
 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 			       unsigned long size, unsigned alignment,
-			       unsigned long color)
+			       unsigned long color,
+			       enum drm_mm_search_flags flags)
 {
 	struct drm_mm_node *hole_node;
 
 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
-					       color, 0);
+					       color, flags);
 	if (!hole_node)
 		return -ENOSPC;
 
@@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 }
 EXPORT_SYMBOL(drm_mm_insert_node_generic);
 
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-		       unsigned long size, unsigned alignment)
-{
-	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
-}
-EXPORT_SYMBOL(drm_mm_insert_node);
-
 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 				       struct drm_mm_node *node,
 				       unsigned long size, unsigned alignment,
@@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 	}
 }
 
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
-						unsigned long size,
-						unsigned alignment,
-						unsigned long color,
-						unsigned long start,
-						unsigned long end,
-						int atomic)
-{
-	struct drm_mm_node *node;
-
-	node = drm_mm_kmalloc(hole_node->mm, atomic);
-	if (unlikely(node == NULL))
-		return NULL;
-
-	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
-				   start, end);
-
-	return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
 /**
  * Search for free space and insert a preallocated memory node. Returns
  * -ENOSPC if no suitable free area is available. This is for range
@@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  */
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
 					unsigned long size, unsigned alignment, unsigned long color,
-					unsigned long start, unsigned long end)
+					unsigned long start, unsigned long end,
+					enum drm_mm_search_flags flags)
 {
 	struct drm_mm_node *hole_node;
 
 	hole_node = drm_mm_search_free_in_range_generic(mm,
 							size, alignment, color,
-							start, end, 0);
+							start, end, flags);
 	if (!hole_node)
 		return -ENOSPC;
 
@@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
 }
 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
-				unsigned long size, unsigned alignment,
-				unsigned long start, unsigned long end)
-{
-	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
 /**
  * Remove a memory node from the allocator.
  */
@@ -351,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 	struct drm_mm *mm = node->mm;
 	struct drm_mm_node *prev_node;
 
+	if (WARN_ON(!node->allocated))
+		return;
+
 	BUG_ON(node->scanned_block || node->scanned_prev_free
 				   || node->scanned_next_free);
 
@@ -377,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_remove_node);
 
-/*
- * Remove a memory node from the allocator and free the allocated struct
- * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
- * drm_mm_get_block functions.
- */
-void drm_mm_put_block(struct drm_mm_node *node)
-{
-
-	struct drm_mm *mm = node->mm;
-
-	drm_mm_remove_node(node);
-
-	spin_lock(&mm->unused_lock);
-	if (mm->num_unused < MM_UNUSED_TARGET) {
-		list_add(&node->node_list, &mm->unused_nodes);
-		++mm->num_unused;
-	} else
-		kfree(node);
-	spin_unlock(&mm->unused_lock);
-}
-EXPORT_SYMBOL(drm_mm_put_block);
-
 static int check_free_hole(unsigned long start, unsigned long end,
 			   unsigned long size, unsigned alignment)
 {
@@ -414,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
 	return end >= start + size;
 }
 
-struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-					       unsigned long size,
-					       unsigned alignment,
-					       unsigned long color,
-					       bool best_match)
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+						      unsigned long size,
+						      unsigned alignment,
+						      unsigned long color,
+						      enum drm_mm_search_flags flags)
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
@@ -441,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 		if (!check_free_hole(adj_start, adj_end, size, alignment))
 			continue;
 
-		if (!best_match)
+		if (!(flags & DRM_MM_SEARCH_BEST))
 			return entry;
 
 		if (entry->size < best_size) {
@@ -452,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 
 	return best;
 }
-EXPORT_SYMBOL(drm_mm_search_free_generic);
 
-struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 							unsigned long size,
 							unsigned alignment,
 							unsigned long color,
 							unsigned long start,
 							unsigned long end,
-							bool best_match)
+							enum drm_mm_search_flags flags)
 {
 	struct drm_mm_node *entry;
 	struct drm_mm_node *best;
@@ -488,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 		if (!check_free_hole(adj_start, adj_end, size, alignment))
 			continue;
 
-		if (!best_match)
+		if (!(flags & DRM_MM_SEARCH_BEST))
 			return entry;
 
 		if (entry->size < best_size) {
@@ -499,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 
 	return best;
 }
-EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 
 /**
  * Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -634,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
  * corrupted.
  *
  * When the scan list is empty, the selected memory nodes can be freed. An
- * immediately following drm_mm_search_free with best_match = 0 will then return
- * the just freed block (because its at the top of the free_stack list).
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
  *
  * Returns one if this block should be evicted, zero otherwise. Will always
  * return zero when no hole has been found.
@@ -672,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
 	INIT_LIST_HEAD(&mm->hole_stack);
-	INIT_LIST_HEAD(&mm->unused_nodes);
-	mm->num_unused = 0;
 	mm->scanned_blocks = 0;
-	spin_lock_init(&mm->unused_lock);
 
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -695,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
 
 void drm_mm_takedown(struct drm_mm * mm)
 {
-	struct drm_mm_node *entry, *next;
-
-	if (WARN(!list_empty(&mm->head_node.node_list),
-		 "Memory manager not clean. Delaying takedown\n")) {
-		return;
-	}
-
-	spin_lock(&mm->unused_lock);
-	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
-		list_del(&entry->node_list);
-		kfree(entry);
-		--mm->num_unused;
-	}
-	spin_unlock(&mm->unused_lock);
-
-	BUG_ON(mm->num_unused != 0);
+	WARN(!list_empty(&mm->head_node.node_list),
+	     "Memory manager not clean during takedown.\n");
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 

+ 0 - 58
drivers/gpu/drm/drm_modes.c

@@ -595,27 +595,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
 }
 EXPORT_SYMBOL(drm_mode_set_name);
 
-/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
- *
- * Move all the modes from @head to @new.
- */
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-{
-
-	struct list_head *entry, *tmp;
-
-	list_for_each_safe(entry, tmp, head) {
-		list_move_tail(entry, new);
-	}
-}
-EXPORT_SYMBOL(drm_mode_list_concat);
-
 /**
  * drm_mode_width - get the width of a mode
  * @mode: mode
@@ -922,43 +901,6 @@ void drm_mode_validate_size(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_mode_validate_size);
 
-/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question.  This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
-			      struct list_head *mode_list,
-			      int *min, int *max, int n_ranges)
-{
-	struct drm_display_mode *mode;
-	int i;
-
-	list_for_each_entry(mode, mode_list, head) {
-		bool good = false;
-		for (i = 0; i < n_ranges; i++) {
-			if (mode->clock >= min[i] && mode->clock <= max[i]) {
-				good = true;
-				break;
-			}
-		}
-		if (!good)
-			mode->status = MODE_CLOCK_RANGE;
-	}
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
 /**
  * drm_mode_prune_invalid - remove invalid modes from mode list
  * @dev: DRM device

+ 25 - 10
drivers/gpu/drm/drm_pci.c

@@ -52,10 +52,8 @@
 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
 {
 	drm_dma_handle_t *dmah;
-#if 1
 	unsigned long addr;
 	size_t sz;
-#endif
 
 	/* pci_alloc_consistent only guarantees alignment to the smallest
 	 * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
  */
 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
-#if 1
 	unsigned long addr;
 	size_t sz;
-#endif
 
 	if (dmah->vaddr) {
 		/* XXX - Is virt_to_page() legal for consistent mem? */
@@ -276,17 +272,26 @@ static int drm_pci_agp_init(struct drm_device *dev)
 			DRM_ERROR("Cannot initialize the agpgart module.\n");
 			return -EINVAL;
 		}
-		if (drm_core_has_MTRR(dev)) {
-			if (dev->agp)
-				dev->agp->agp_mtrr = arch_phys_wc_add(
-					dev->agp->agp_info.aper_base,
-					dev->agp->agp_info.aper_size *
-					1024 * 1024);
+		if (dev->agp) {
+			dev->agp->agp_mtrr = arch_phys_wc_add(
+				dev->agp->agp_info.aper_base,
+				dev->agp->agp_info.aper_size *
+				1024 * 1024);
 		}
 	}
 	return 0;
 }
 
+static void drm_pci_agp_destroy(struct drm_device *dev)
+{
+	if (drm_core_has_AGP(dev) && dev->agp) {
+		arch_phys_wc_del(dev->agp->agp_mtrr);
+		drm_agp_clear(dev);
+		drm_agp_destroy(dev->agp);
+		dev->agp = NULL;
+	}
+}
+
 static struct drm_bus drm_pci_bus = {
 	.bus_type = DRIVER_BUS_PCI,
 	.get_irq = drm_pci_get_irq,
@@ -295,6 +300,7 @@ static struct drm_bus drm_pci_bus = {
 	.set_unique = drm_pci_set_unique,
 	.irq_by_busid = drm_pci_irq_by_busid,
 	.agp_init = drm_pci_agp_init,
+	.agp_destroy = drm_pci_agp_destroy,
 };
 
 /**
@@ -348,6 +354,12 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 			goto err_g2;
 	}
 
+	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+		if (ret)
+			goto err_g21;
+	}
+
 	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
 		goto err_g3;
 
@@ -377,6 +389,9 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 err_g4:
 	drm_put_minor(&dev->primary);
 err_g3:
+	if (dev->render)
+		drm_put_minor(&dev->render);
+err_g21:
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_put_minor(&dev->control);
 err_g2:

+ 12 - 4
drivers/gpu/drm/drm_platform.c

@@ -28,7 +28,7 @@
 #include <linux/export.h>
 #include <drm/drmP.h>
 
-/**
+/*
  * Register.
  *
  * \param platdev - Platform device struture
@@ -39,8 +39,8 @@
  * Try and register, if we fail to register, backout previous work.
  */
 
-int drm_get_platform_dev(struct platform_device *platdev,
-			 struct drm_driver *driver)
+static int drm_get_platform_dev(struct platform_device *platdev,
+				struct drm_driver *driver)
 {
 	struct drm_device *dev;
 	int ret;
@@ -69,6 +69,12 @@ int drm_get_platform_dev(struct platform_device *platdev,
 			goto err_g1;
 	}
 
+	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+		if (ret)
+			goto err_g11;
+	}
+
 	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
 	if (ret)
 		goto err_g2;
@@ -100,6 +106,9 @@ int drm_get_platform_dev(struct platform_device *platdev,
 err_g3:
 	drm_put_minor(&dev->primary);
 err_g2:
+	if (dev->render)
+		drm_put_minor(&dev->render);
+err_g11:
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_put_minor(&dev->control);
 err_g1:
@@ -107,7 +116,6 @@ err_g1:
 	mutex_unlock(&drm_global_mutex);
 	return ret;
 }
-EXPORT_SYMBOL(drm_get_platform_dev);
 
 static int drm_platform_get_irq(struct drm_device *dev)
 {

+ 121 - 69
drivers/gpu/drm/drm_prime.c

@@ -83,6 +83,34 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
 	return 0;
 }
 
+static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
+						      uint32_t handle)
+{
+	struct drm_prime_member *member;
+
+	list_for_each_entry(member, &prime_fpriv->head, entry) {
+		if (member->handle == handle)
+			return member->dma_buf;
+	}
+
+	return NULL;
+}
+
+static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
+				       struct dma_buf *dma_buf,
+				       uint32_t *handle)
+{
+	struct drm_prime_member *member;
+
+	list_for_each_entry(member, &prime_fpriv->head, entry) {
+		if (member->dma_buf == dma_buf) {
+			*handle = member->handle;
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
 static int drm_gem_map_attach(struct dma_buf *dma_buf,
 			      struct device *target_dev,
 			      struct dma_buf_attachment *attach)
@@ -131,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
 	attach->priv = NULL;
 }
 
-static void drm_prime_remove_buf_handle_locked(
-		struct drm_prime_file_private *prime_fpriv,
-		struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
+					struct dma_buf *dma_buf)
 {
 	struct drm_prime_member *member, *safe;
 
@@ -167,8 +194,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 	if (WARN_ON(prime_attach->dir != DMA_NONE))
 		return ERR_PTR(-EBUSY);
 
-	mutex_lock(&obj->dev->struct_mutex);
-
 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 
 	if (!IS_ERR(sgt)) {
@@ -182,7 +207,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 		}
 	}
 
-	mutex_unlock(&obj->dev->struct_mutex);
 	return sgt;
 }
 
@@ -192,16 +216,14 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 	/* nothing to be done here */
 }
 
-static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
 {
 	struct drm_gem_object *obj = dma_buf->priv;
 
-	if (obj->export_dma_buf == dma_buf) {
-		/* drop the reference on the export fd holds */
-		obj->export_dma_buf = NULL;
-		drm_gem_object_unreference_unlocked(obj);
-	}
+	/* drop the reference on the export fd holds */
+	drm_gem_object_unreference_unlocked(obj);
 }
+EXPORT_SYMBOL(drm_gem_dmabuf_release);
 
 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
@@ -300,62 +322,107 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+						  struct drm_gem_object *obj,
+						  uint32_t flags)
+{
+	struct dma_buf *dmabuf;
+
+	/* prevent races with concurrent gem_close. */
+	if (obj->handle_count == 0) {
+		dmabuf = ERR_PTR(-ENOENT);
+		return dmabuf;
+	}
+
+	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+	if (IS_ERR(dmabuf)) {
+		/* normally the created dma-buf takes ownership of the ref,
+		 * but if that fails then drop the ref
+		 */
+		return dmabuf;
+	}
+
+	/*
+	 * Note that callers do not need to clean up the export cache
+	 * since the check for obj->handle_count guarantees that someone
+	 * will clean it up.
+	 */
+	obj->dma_buf = dmabuf;
+	get_dma_buf(obj->dma_buf);
+	/* Grab a new ref since the callers is now used by the dma-buf */
+	drm_gem_object_reference(obj);
+
+	return dmabuf;
+}
+
 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
 		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
 		int *prime_fd)
 {
 	struct drm_gem_object *obj;
-	void *buf;
 	int ret = 0;
 	struct dma_buf *dmabuf;
 
+	mutex_lock(&file_priv->prime.lock);
 	obj = drm_gem_object_lookup(dev, file_priv, handle);
-	if (!obj)
-		return -ENOENT;
+	if (!obj)  {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
 
-	mutex_lock(&file_priv->prime.lock);
+	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
+	if (dmabuf) {
+		get_dma_buf(dmabuf);
+		goto out_have_handle;
+	}
+
+	mutex_lock(&dev->object_name_lock);
 	/* re-export the original imported object */
 	if (obj->import_attach) {
 		dmabuf = obj->import_attach->dmabuf;
+		get_dma_buf(dmabuf);
 		goto out_have_obj;
 	}
 
-	if (obj->export_dma_buf) {
-		dmabuf = obj->export_dma_buf;
+	if (obj->dma_buf) {
+		get_dma_buf(obj->dma_buf);
+		dmabuf = obj->dma_buf;
 		goto out_have_obj;
 	}
 
-	buf = dev->driver->gem_prime_export(dev, obj, flags);
-	if (IS_ERR(buf)) {
+	dmabuf = export_and_register_object(dev, obj, flags);
+	if (IS_ERR(dmabuf)) {
 		/* normally the created dma-buf takes ownership of the ref,
 		 * but if that fails then drop the ref
 		 */
-		ret = PTR_ERR(buf);
+		ret = PTR_ERR(dmabuf);
+		mutex_unlock(&dev->object_name_lock);
 		goto out;
 	}
-	obj->export_dma_buf = buf;
 
-	/* if we've exported this buffer the cheat and add it to the import list
-	 * so we get the correct handle back
+out_have_obj:
+	/*
+	 * If we've exported this buffer then cheat and add it to the import list
+	 * so we get the correct handle back. We must do this under the
+	 * protection of dev->object_name_lock to ensure that a racing gem close
+	 * ioctl doesn't miss to remove this buffer handle from the cache.
 	 */
 	ret = drm_prime_add_buf_handle(&file_priv->prime,
-				       obj->export_dma_buf, handle);
+				       dmabuf, handle);
+	mutex_unlock(&dev->object_name_lock);
 	if (ret)
 		goto fail_put_dmabuf;
 
-	ret = dma_buf_fd(buf, flags);
-	if (ret < 0)
-		goto fail_rm_handle;
-
-	*prime_fd = ret;
-	mutex_unlock(&file_priv->prime.lock);
-	return 0;
-
-out_have_obj:
-	get_dma_buf(dmabuf);
+out_have_handle:
 	ret = dma_buf_fd(dmabuf, flags);
+	/*
+	 * We must _not_ remove the buffer from the handle cache since the newly
+	 * created dma buf is already linked in the global obj->dma_buf pointer,
+	 * and that is invariant as long as a userspace gem handle exists.
+	 * Closing the handle will clean out the cache anyway, so we don't leak.
+	 */
 	if (ret < 0) {
-		dma_buf_put(dmabuf);
+		goto fail_put_dmabuf;
 	} else {
 		*prime_fd = ret;
 		ret = 0;
@@ -363,15 +430,13 @@ out_have_obj:
 
 	goto out;
 
-fail_rm_handle:
-	drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
 fail_put_dmabuf:
-	/* clear NOT to be checked when releasing dma_buf */
-	obj->export_dma_buf = NULL;
-	dma_buf_put(buf);
+	dma_buf_put(dmabuf);
 out:
 	drm_gem_object_unreference_unlocked(obj);
+out_unlock:
 	mutex_unlock(&file_priv->prime.lock);
+
 	return ret;
 }
 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -446,19 +511,26 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 
 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
 			dma_buf, handle);
-	if (!ret) {
-		ret = 0;
+	if (ret == 0)
 		goto out_put;
-	}
 
 	/* never seen this one, need to import */
+	mutex_lock(&dev->object_name_lock);
 	obj = dev->driver->gem_prime_import(dev, dma_buf);
 	if (IS_ERR(obj)) {
 		ret = PTR_ERR(obj);
-		goto out_put;
+		goto out_unlock;
 	}
 
-	ret = drm_gem_handle_create(file_priv, obj, handle);
+	if (obj->dma_buf) {
+		WARN_ON(obj->dma_buf != dma_buf);
+	} else {
+		obj->dma_buf = dma_buf;
+		get_dma_buf(dma_buf);
+	}
+
+	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
 	drm_gem_object_unreference_unlocked(obj);
 	if (ret)
 		goto out_put;
@@ -478,7 +550,9 @@ fail:
 	/* hmm, if driver attached, we are relying on the free-object path
 	 * to detach.. which seems ok..
 	 */
-	drm_gem_object_handle_unreference_unlocked(obj);
+	drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+	mutex_unlock(&dev->object_name_lock);
 out_put:
 	dma_buf_put(dma_buf);
 	mutex_unlock(&file_priv->prime.lock);
@@ -618,25 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
 	WARN_ON(!list_empty(&prime_fpriv->head));
 }
 EXPORT_SYMBOL(drm_prime_destroy_file_private);
-
-int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
-{
-	struct drm_prime_member *member;
-
-	list_for_each_entry(member, &prime_fpriv->head, entry) {
-		if (member->dma_buf == dma_buf) {
-			*handle = member->handle;
-			return 0;
-		}
-	}
-	return -ENOENT;
-}
-EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
-
-void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
-{
-	mutex_lock(&prime_fpriv->lock);
-	drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
-	mutex_unlock(&prime_fpriv->lock);
-}
-EXPORT_SYMBOL(drm_prime_remove_buf_handle);

+ 0 - 209
drivers/gpu/drm/drm_proc.c

@@ -1,209 +0,0 @@
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- *    the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <drm/drmP.h>
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static const struct drm_info_list drm_proc_list[] = {
-	{"name", drm_name_info, 0},
-	{"vm", drm_vm_info, 0},
-	{"clients", drm_clients_info, 0},
-	{"bufs", drm_bufs_info, 0},
-	{"gem_names", drm_gem_name_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
-	{"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
-	struct drm_info_node* node = PDE_DATA(inode);
-
-	return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_proc_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-static int drm_proc_create_files(const struct drm_info_list *files, int count,
-			  struct proc_dir_entry *root, struct drm_minor *minor)
-{
-	struct drm_device *dev = minor->dev;
-	struct proc_dir_entry *ent;
-	struct drm_info_node *tmp;
-	int i;
-
-	for (i = 0; i < count; i++) {
-		u32 features = files[i].driver_features;
-
-		if (features != 0 &&
-		    (dev->driver->driver_features & features) != features)
-			continue;
-
-		tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
-		if (!tmp)
-			return -1;
-
-		tmp->minor = minor;
-		tmp->info_ent = &files[i];
-		list_add(&tmp->list, &minor->proc_nodes.list);
-
-		ent = proc_create_data(files[i].name, S_IRUGO, root,
-				       &drm_proc_fops, tmp);
-		if (!ent) {
-			DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
-				  minor->index, files[i].name);
-			list_del(&tmp->list);
-			kfree(tmp);
-			return -1;
-		}
-	}
-	return 0;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
-{
-	char name[12];
-	int ret;
-
-	INIT_LIST_HEAD(&minor->proc_nodes.list);
-	sprintf(name, "%u", minor->index);
-	minor->proc_root = proc_mkdir(name, root);
-	if (!minor->proc_root) {
-		DRM_ERROR("Cannot create /proc/dri/%s\n", name);
-		return -1;
-	}
-
-	ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
-				    minor->proc_root, minor);
-	if (ret) {
-		remove_proc_subtree(name, root);
-		minor->proc_root = NULL;
-		DRM_ERROR("Failed to create core drm proc files\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static int drm_proc_remove_files(const struct drm_info_list *files, int count,
-			  struct drm_minor *minor)
-{
-	struct list_head *pos, *q;
-	struct drm_info_node *tmp;
-	int i;
-
-	for (i = 0; i < count; i++) {
-		list_for_each_safe(pos, q, &minor->proc_nodes.list) {
-			tmp = list_entry(pos, struct drm_info_node, list);
-			if (tmp->info_ent == &files[i]) {
-				remove_proc_entry(files[i].name,
-						  minor->proc_root);
-				list_del(pos);
-				kfree(tmp);
-			}
-		}
-	}
-	return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
-	char name[64];
-
-	if (!root || !minor->proc_root)
-		return 0;
-
-	drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
-	sprintf(name, "%d", minor->index);
-	remove_proc_subtree(name, root);
-	return 0;
-}
-

+ 18 - 11
drivers/gpu/drm/drm_scatter.c

@@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
 #endif
 }
 
-void drm_sg_cleanup(struct drm_sg_mem * entry)
+static void drm_sg_cleanup(struct drm_sg_mem * entry)
 {
 	struct page *page;
 	int i;
@@ -64,19 +64,32 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
 	kfree(entry);
 }
 
+void drm_legacy_sg_cleanup(struct drm_device *dev)
+{
+	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
+		drm_sg_cleanup(dev->sg);
+		dev->sg = NULL;
+	}
+}
 #ifdef _LP64
 # define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
 #else
 # define ScatterHandle(x) (unsigned int)(x)
 #endif
 
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+int drm_sg_alloc(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
 {
+	struct drm_scatter_gather *request = data;
 	struct drm_sg_mem *entry;
 	unsigned long pages, i, j;
 
 	DRM_DEBUG("\n");
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (!drm_core_check_feature(dev, DRIVER_SG))
 		return -EINVAL;
 
@@ -181,21 +194,15 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
 	return -ENOMEM;
 }
 
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv)
-{
-	struct drm_scatter_gather *request = data;
-
-	return drm_sg_alloc(dev, request);
-
-}
-
 int drm_sg_free(struct drm_device *dev, void *data,
 		struct drm_file *file_priv)
 {
 	struct drm_scatter_gather *request = data;
 	struct drm_sg_mem *entry;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (!drm_core_check_feature(dev, DRIVER_SG))
 		return -EINVAL;
 

+ 28 - 45
drivers/gpu/drm/drm_stub.c

@@ -40,6 +40,9 @@
 unsigned int drm_debug = 0;	/* 1 to enable debug output */
 EXPORT_SYMBOL(drm_debug);
 
+unsigned int drm_rnodes = 0;	/* 1 to enable experimental render nodes API */
+EXPORT_SYMBOL(drm_rnodes);
+
 unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
 EXPORT_SYMBOL(drm_vblank_offdelay);
 
@@ -56,11 +59,13 @@ MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
 
 module_param_named(debug, drm_debug, int, 0600);
+module_param_named(rnodes, drm_rnodes, int, 0600);
 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
@@ -68,7 +73,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 struct idr drm_minors_idr;
 
 struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
 struct dentry *drm_debugfs_root;
 
 int drm_err(const char *func, const char *format, ...)
@@ -113,12 +117,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
 	int base = 0, limit = 63;
 
 	if (type == DRM_MINOR_CONTROL) {
-                base += 64;
-                limit = base + 127;
-        } else if (type == DRM_MINOR_RENDER) {
-                base += 128;
-                limit = base + 255;
-        }
+		base += 64;
+		limit = base + 63;
+	} else if (type == DRM_MINOR_RENDER) {
+		base += 128;
+		limit = base + 63;
+	}
 
 	mutex_lock(&dev->struct_mutex);
 	ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
@@ -288,13 +292,7 @@ int drm_fill_in_dev(struct drm_device *dev,
 			goto error_out_unreg;
 	}
 
-
-
-	retcode = drm_ctxbitmap_init(dev);
-	if (retcode) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto error_out_unreg;
-	}
+	drm_legacy_ctxbitmap_init(dev);
 
 	if (driver->driver_features & DRIVER_GEM) {
 		retcode = drm_gem_init(dev);
@@ -321,9 +319,8 @@ EXPORT_SYMBOL(drm_fill_in_dev);
  * \param sec-minor structure to hold the assigned minor
  * \return negative number on failure.
  *
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
+ * Search an empty entry and initialize it to the given parameters. This
+ * routines assigns minor numbers to secondary heads of multi-headed cards
  */
 int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
 {
@@ -351,20 +348,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
 
 	idr_replace(&drm_minors_idr, new_minor, minor_id);
 
-	if (type == DRM_MINOR_LEGACY) {
-		ret = drm_proc_init(new_minor, drm_proc_root);
-		if (ret) {
-			DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
-			goto err_mem;
-		}
-	} else
-		new_minor->proc_root = NULL;
-
 #if defined(CONFIG_DEBUG_FS)
 	ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
 	if (ret) {
 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
-		goto err_g2;
+		goto err_mem;
 	}
 #endif
 
@@ -372,7 +360,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
 	if (ret) {
 		printk(KERN_ERR
 		       "DRM: Error sysfs_device_add.\n");
-		goto err_g2;
+		goto err_debugfs;
 	}
 	*minor = new_minor;
 
@@ -380,10 +368,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
 	return 0;
 
 
-err_g2:
-	if (new_minor->type == DRM_MINOR_LEGACY)
-		drm_proc_cleanup(new_minor, drm_proc_root);
+err_debugfs:
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_cleanup(new_minor);
 err_mem:
+#endif
 	kfree(new_minor);
 err_idr:
 	idr_remove(&drm_minors_idr, minor_id);
@@ -397,10 +386,6 @@ EXPORT_SYMBOL(drm_get_minor);
  *
  * \param sec_minor - structure to be released
  * \return always zero
- *
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
- *
  */
 int drm_put_minor(struct drm_minor **minor_p)
 {
@@ -408,8 +393,6 @@ int drm_put_minor(struct drm_minor **minor_p)
 
 	DRM_DEBUG("release secondary minor %d\n", minor->index);
 
-	if (minor->type == DRM_MINOR_LEGACY)
-		drm_proc_cleanup(minor, drm_proc_root);
 #if defined(CONFIG_DEBUG_FS)
 	drm_debugfs_cleanup(minor);
 #endif
@@ -451,16 +434,11 @@ void drm_put_dev(struct drm_device *dev)
 
 	drm_lastclose(dev);
 
-	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
-		arch_phys_wc_del(dev->agp->agp_mtrr);
-
 	if (dev->driver->unload)
 		dev->driver->unload(dev);
 
-	if (drm_core_has_AGP(dev) && dev->agp) {
-		kfree(dev->agp);
-		dev->agp = NULL;
-	}
+	if (dev->driver->bus->agp_destroy)
+		dev->driver->bus->agp_destroy(dev);
 
 	drm_vblank_cleanup(dev);
 
@@ -468,11 +446,14 @@ void drm_put_dev(struct drm_device *dev)
 		drm_rmmap(dev, r_list->map);
 	drm_ht_remove(&dev->map_hash);
 
-	drm_ctxbitmap_cleanup(dev);
+	drm_legacy_ctxbitmap_cleanup(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_put_minor(&dev->control);
 
+	if (dev->render)
+		drm_put_minor(&dev->render);
+
 	if (driver->driver_features & DRIVER_GEM)
 		drm_gem_destroy(dev);
 
@@ -489,6 +470,8 @@ void drm_unplug_dev(struct drm_device *dev)
 	/* for a USB device */
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_unplug_minor(dev->control);
+	if (dev->render)
+		drm_unplug_minor(dev->render);
 	drm_unplug_minor(dev->primary);
 
 	mutex_lock(&drm_global_mutex);

+ 9 - 0
drivers/gpu/drm/drm_usb.c

@@ -33,6 +33,12 @@ int drm_get_usb_dev(struct usb_interface *interface,
 	if (ret)
 		goto err_g1;
 
+	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+		if (ret)
+			goto err_g11;
+	}
+
 	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
 	if (ret)
 		goto err_g2;
@@ -62,6 +68,9 @@ int drm_get_usb_dev(struct usb_interface *interface,
 err_g3:
 	drm_put_minor(&dev->primary);
 err_g2:
+	if (dev->render)
+		drm_put_minor(&dev->render);
+err_g11:
 	drm_put_minor(&dev->control);
 err_g1:
 	kfree(dev);

+ 1 - 2
drivers/gpu/drm/drm_vm.c

@@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 			switch (map->type) {
 			case _DRM_REGISTERS:
 			case _DRM_FRAME_BUFFER:
-				if (drm_core_has_MTRR(dev))
-					arch_phys_wc_del(map->mtrr);
+				arch_phys_wc_del(map->mtrr);
 				iounmap(map->handle);
 				break;
 			case _DRM_SHM:

+ 436 - 0
drivers/gpu/drm/drm_vma_manager.c

@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ *
+ * Additionally to offset management, the vma offset manager also handles access
+ * management. For every open-file context that is allowed to access a given
+ * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
+ * open-file with the offset of the node will fail with -EACCES. To revoke
+ * access again, use drm_vma_node_revoke(). However, the caller is responsible
+ * for destroying already existing mappings, if required.
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+				 unsigned long page_offset, unsigned long size)
+{
+	rwlock_init(&mgr->vm_lock);
+	mgr->vm_addr_space_rb = RB_ROOT;
+	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+	/* take the lock to protect against buggy drivers */
+	write_lock(&mgr->vm_lock);
+	drm_mm_takedown(&mgr->vm_addr_space_mm);
+	write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+						  unsigned long start,
+						  unsigned long pages)
+{
+	struct drm_vma_offset_node *node;
+
+	read_lock(&mgr->vm_lock);
+	node = drm_vma_offset_lookup_locked(mgr, start, pages);
+	read_unlock(&mgr->vm_lock);
+
+	return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+							 unsigned long start,
+							 unsigned long pages)
+{
+	struct drm_vma_offset_node *node, *best;
+	struct rb_node *iter;
+	unsigned long offset;
+
+	iter = mgr->vm_addr_space_rb.rb_node;
+	best = NULL;
+
+	while (likely(iter)) {
+		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+		offset = node->vm_node.start;
+		if (start >= offset) {
+			iter = iter->rb_right;
+			best = node;
+			if (start == offset)
+				break;
+		} else {
+			iter = iter->rb_left;
+		}
+	}
+
+	/* verify that the node spans the requested area */
+	if (best) {
+		offset = best->vm_node.start + best->vm_node.size;
+		if (offset < start + pages)
+			best = NULL;
+	}
+
+	return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+				   struct drm_vma_offset_node *node)
+{
+	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+	struct rb_node *parent = NULL;
+	struct drm_vma_offset_node *iter_node;
+
+	while (likely(*iter)) {
+		parent = *iter;
+		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+		if (node->vm_node.start < iter_node->vm_node.start)
+			iter = &(*iter)->rb_left;
+		else if (node->vm_node.start > iter_node->vm_node.start)
+			iter = &(*iter)->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(&node->vm_rb, parent, iter);
+	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+		       struct drm_vma_offset_node *node, unsigned long pages)
+{
+	int ret;
+
+	write_lock(&mgr->vm_lock);
+
+	if (drm_mm_node_allocated(&node->vm_node)) {
+		ret = 0;
+		goto out_unlock;
+	}
+
+	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+				 pages, 0, DRM_MM_SEARCH_DEFAULT);
+	if (ret)
+		goto out_unlock;
+
+	_drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+	write_unlock(&mgr->vm_lock);
+	return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+			   struct drm_vma_offset_node *node)
+{
+	write_lock(&mgr->vm_lock);
+
+	if (drm_mm_node_allocated(&node->vm_node)) {
+		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+		drm_mm_remove_node(&node->vm_node);
+		memset(&node->vm_node, 0, sizeof(node->vm_node));
+	}
+
+	write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to add
+ *
+ * Add @filp to the list of allowed open-files for this node. If @filp is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+{
+	struct rb_node **iter;
+	struct rb_node *parent = NULL;
+	struct drm_vma_offset_file *new, *entry;
+	int ret = 0;
+
+	/* Preallocate entry to avoid atomic allocations below. It is quite
+	 * unlikely that an open-file is added twice to a single node so we
+	 * don't optimize for this case. OOM is checked below only if the entry
+	 * is actually used. */
+	new = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+	write_lock(&node->vm_lock);
+
+	iter = &node->vm_files.rb_node;
+
+	while (likely(*iter)) {
+		parent = *iter;
+		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
+
+		if (filp == entry->vm_filp) {
+			entry->vm_count++;
+			goto unlock;
+		} else if (filp > entry->vm_filp) {
+			iter = &(*iter)->rb_right;
+		} else {
+			iter = &(*iter)->rb_left;
+		}
+	}
+
+	if (!new) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	new->vm_filp = filp;
+	new->vm_count = 1;
+	rb_link_node(&new->vm_rb, parent, iter);
+	rb_insert_color(&new->vm_rb, &node->vm_files);
+	new = NULL;
+
+unlock:
+	write_unlock(&node->vm_lock);
+	kfree(new);
+	return ret;
+}
+EXPORT_SYMBOL(drm_vma_node_allow);
+
+/**
+ * drm_vma_node_revoke - Remove open-file from list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to remove
+ *
+ * Decrement the ref-count of @filp in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @filp from the list. You must call
+ * this once for every drm_vma_node_allow() on @filp.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * If @filp is not on the list, nothing is done.
+ */
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+{
+	struct drm_vma_offset_file *entry;
+	struct rb_node *iter;
+
+	write_lock(&node->vm_lock);
+
+	iter = node->vm_files.rb_node;
+	while (likely(iter)) {
+		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+		if (filp == entry->vm_filp) {
+			if (!--entry->vm_count) {
+				rb_erase(&entry->vm_rb, &node->vm_files);
+				kfree(entry);
+			}
+			break;
+		} else if (filp > entry->vm_filp) {
+			iter = iter->rb_right;
+		} else {
+			iter = iter->rb_left;
+		}
+	}
+
+	write_unlock(&node->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_node_revoke);
+
+/**
+ * drm_vma_node_is_allowed - Check whether an open-file is granted access
+ * @node: Node to check
+ * @filp: Open-file to check for
+ *
+ * Search the list in @node whether @filp is currently on the list of allowed
+ * open-files (see drm_vma_node_allow()).
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * true iff @filp is on the list
+ */
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+			     struct file *filp)
+{
+	struct drm_vma_offset_file *entry;
+	struct rb_node *iter;
+
+	read_lock(&node->vm_lock);
+
+	iter = node->vm_files.rb_node;
+	while (likely(iter)) {
+		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+		if (filp == entry->vm_filp)
+			break;
+		else if (filp > entry->vm_filp)
+			iter = iter->rb_right;
+		else
+			iter = iter->rb_left;
+	}
+
+	read_unlock(&node->vm_lock);
+
+	return iter;
+}
+EXPORT_SYMBOL(drm_vma_node_is_allowed);

+ 3 - 3
drivers/gpu/drm/exynos/Kconfig

@@ -1,11 +1,12 @@
 config DRM_EXYNOS
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
-	depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
 	select DRM_KMS_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+	select VIDEOMODE_HELPERS
 	help
 	  Choose this option if you have a Samsung SoC EXYNOS chipset.
 	  If M is selected the module will be called exynosdrm.
@@ -24,9 +25,8 @@ config DRM_EXYNOS_DMABUF
 
 config DRM_EXYNOS_FIMD
 	bool "Exynos DRM FIMD"
-	depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+	depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
 	select FB_MODE_HELPERS
-	select VIDEOMODE_HELPERS
 	help
 	  Choose this option if you want to use Exynos FIMD for DRM.
 

+ 2 - 11
drivers/gpu/drm/exynos/exynos_ddc.c

@@ -15,7 +15,7 @@
 
 #include <linux/kernel.h>
 #include <linux/i2c.h>
-
+#include <linux/of.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_hdmi.h"
@@ -41,13 +41,6 @@ static int s5p_ddc_remove(struct i2c_client *client)
 	return 0;
 }
 
-static struct i2c_device_id ddc_idtable[] = {
-	{"s5p_ddc", 0},
-	{"exynos5-hdmiddc", 0},
-	{ },
-};
-
-#ifdef CONFIG_OF
 static struct of_device_id hdmiddc_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmiddc",
@@ -57,15 +50,13 @@ static struct of_device_id hdmiddc_match_types[] = {
 		/* end node */
 	}
 };
-#endif
 
 struct i2c_driver ddc_driver = {
 	.driver = {
 		.name = "exynos-hdmiddc",
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(hdmiddc_match_types),
+		.of_match_table = hdmiddc_match_types,
 	},
-	.id_table	= ddc_idtable,
 	.probe		= s5p_ddc_probe,
 	.remove		= s5p_ddc_remove,
 	.command		= NULL,

+ 1 - 8
drivers/gpu/drm/exynos/exynos_drm_buf.c

@@ -149,10 +149,8 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
 	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
 
 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer) {
-		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+	if (!buffer)
 		return NULL;
-	}
 
 	buffer->size = size;
 	return buffer;
@@ -161,11 +159,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
 void exynos_drm_fini_buf(struct drm_device *dev,
 				struct exynos_drm_gem_buf *buffer)
 {
-	if (!buffer) {
-		DRM_DEBUG_KMS("buffer is null.\n");
-		return;
-	}
-
 	kfree(buffer);
 	buffer = NULL;
 }

+ 5 - 33
drivers/gpu/drm/exynos/exynos_drm_connector.c

@@ -17,6 +17,7 @@
 #include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
 
 #define to_exynos_connector(x)	container_of(x, struct exynos_drm_connector,\
 				drm_connector)
@@ -28,35 +29,6 @@ struct exynos_drm_connector {
 	uint32_t		dpms;
 };
 
-/* convert exynos_video_timings to drm_display_mode */
-static inline void
-convert_to_display_mode(struct drm_display_mode *mode,
-			struct exynos_drm_panel_info *panel)
-{
-	struct fb_videomode *timing = &panel->timing;
-
-	mode->clock = timing->pixclock / 1000;
-	mode->vrefresh = timing->refresh;
-
-	mode->hdisplay = timing->xres;
-	mode->hsync_start = mode->hdisplay + timing->right_margin;
-	mode->hsync_end = mode->hsync_start + timing->hsync_len;
-	mode->htotal = mode->hsync_end + timing->left_margin;
-
-	mode->vdisplay = timing->yres;
-	mode->vsync_start = mode->vdisplay + timing->lower_margin;
-	mode->vsync_end = mode->vsync_start + timing->vsync_len;
-	mode->vtotal = mode->vsync_end + timing->upper_margin;
-	mode->width_mm = panel->width_mm;
-	mode->height_mm = panel->height_mm;
-
-	if (timing->vmode & FB_VMODE_INTERLACED)
-		mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
-	if (timing->vmode & FB_VMODE_DOUBLE)
-		mode->flags |= DRM_MODE_FLAG_DBLSCAN;
-}
-
 static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 {
 	struct exynos_drm_connector *exynos_connector =
@@ -111,7 +83,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
 			return 0;
 		}
 
-		convert_to_display_mode(mode, panel);
+		drm_display_mode_from_videomode(&panel->vm, mode);
+		mode->width_mm = panel->width_mm;
+		mode->height_mm = panel->height_mm;
 		connector->display_info.width_mm = mode->width_mm;
 		connector->display_info.height_mm = mode->height_mm;
 
@@ -278,10 +252,8 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
 	int err;
 
 	exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
-	if (!exynos_connector) {
-		DRM_ERROR("failed to allocate connector\n");
+	if (!exynos_connector)
 		return NULL;
-	}
 
 	connector = &exynos_connector->drm_connector;
 

+ 5 - 5
drivers/gpu/drm/exynos/exynos_drm_crtc.c

@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
+#include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_encoder.h"
 #include "exynos_drm_plane.h"
@@ -184,8 +185,9 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
 };
 
 static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
-				      struct drm_framebuffer *fb,
-				      struct drm_pending_vblank_event *event)
+				     struct drm_framebuffer *fb,
+				     struct drm_pending_vblank_event *event,
+				     uint32_t page_flip_flags)
 {
 	struct drm_device *dev = crtc->dev;
 	struct exynos_drm_private *dev_priv = dev->dev_private;
@@ -323,10 +325,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
 	struct drm_crtc *crtc;
 
 	exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
-	if (!exynos_crtc) {
-		DRM_ERROR("failed to allocate exynos crtc\n");
+	if (!exynos_crtc)
 		return -ENOMEM;
-	}
 
 	exynos_crtc->pipe = nr;
 	exynos_crtc->dpms = DRM_MODE_DPMS_OFF;

+ 10 - 27
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c

@@ -11,6 +11,7 @@
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
+#include "exynos_drm_dmabuf.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 
@@ -22,6 +23,11 @@ struct exynos_drm_dmabuf_attachment {
 	bool is_mapped;
 };
 
+static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
+{
+	return to_exynos_gem_obj(buf->priv);
+}
+
 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
 					struct device *dev,
 					struct dma_buf_attachment *attach)
@@ -63,7 +69,7 @@ static struct sg_table *
 					enum dma_data_direction dir)
 {
 	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-	struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
 	struct drm_device *dev = gem_obj->base.dev;
 	struct exynos_drm_gem_buf *buf;
 	struct scatterlist *rd, *wr;
@@ -127,27 +133,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 	/* Nothing to do. */
 }
 
-static void exynos_dmabuf_release(struct dma_buf *dmabuf)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
-
-	/*
-	 * exynos_dmabuf_release() call means that file object's
-	 * f_count is 0 and it calls drm_gem_object_handle_unreference()
-	 * to drop the references that these values had been increased
-	 * at drm_prime_handle_to_fd()
-	 */
-	if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
-		exynos_gem_obj->base.export_dma_buf = NULL;
-
-		/*
-		 * drop this gem object refcount to release allocated buffer
-		 * and resources.
-		 */
-		drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
-	}
-}
-
 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
 						unsigned long page_num)
 {
@@ -193,7 +178,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
 	.kunmap			= exynos_gem_dmabuf_kunmap,
 	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
 	.mmap			= exynos_gem_dmabuf_mmap,
-	.release		= exynos_dmabuf_release,
+	.release		= drm_gem_dmabuf_release,
 };
 
 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -201,7 +186,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+	return dma_buf_export(obj, &exynos_dmabuf_ops,
 				exynos_gem_obj->base.size, flags);
 }
 
@@ -219,8 +204,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 	if (dma_buf->ops == &exynos_dmabuf_ops) {
 		struct drm_gem_object *obj;
 
-		exynos_gem_obj = dma_buf->priv;
-		obj = &exynos_gem_obj->base;
+		obj = dma_buf->priv;
 
 		/* is it from our device? */
 		if (obj->dev == drm_dev) {
@@ -247,7 +231,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 
 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
 	if (!buffer) {
-		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
 		ret = -ENOMEM;
 		goto err_unmap_attach;
 	}

+ 4 - 6
drivers/gpu/drm/exynos/exynos_drm_drv.c

@@ -47,10 +47,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 	int nr;
 
 	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
-	if (!private) {
-		DRM_ERROR("failed to allocate private\n");
+	if (!private)
 		return -ENOMEM;
-	}
 
 	INIT_LIST_HEAD(&private->pageflip_event_list);
 	dev->dev_private = (void *)private;
@@ -213,7 +211,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
 	.close = drm_gem_vm_close,
 };
 
-static struct drm_ioctl_desc exynos_ioctls[] = {
+static const struct drm_ioctl_desc exynos_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
 			DRM_UNLOCKED | DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +269,13 @@ static struct drm_driver exynos_drm_driver = {
 	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
 	.dumb_create		= exynos_drm_gem_dumb_create,
 	.dumb_map_offset	= exynos_drm_gem_dumb_map_offset,
-	.dumb_destroy		= exynos_drm_gem_dumb_destroy,
+	.dumb_destroy		= drm_gem_dumb_destroy,
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
 	.gem_prime_export	= exynos_dmabuf_prime_export,
 	.gem_prime_import	= exynos_dmabuf_prime_import,
 	.ioctls			= exynos_ioctls,
+	.num_ioctls		= ARRAY_SIZE(exynos_ioctls),
 	.fops			= &exynos_drm_driver_fops,
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
@@ -288,7 +287,6 @@ static struct drm_driver exynos_drm_driver = {
 static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-	exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
 
 	return drm_platform_init(&exynos_drm_driver, pdev);
 }

+ 1 - 3
drivers/gpu/drm/exynos/exynos_drm_encoder.c

@@ -324,10 +324,8 @@ exynos_drm_encoder_create(struct drm_device *dev,
 		return NULL;
 
 	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
-	if (!exynos_encoder) {
-		DRM_ERROR("failed to allocate encoder\n");
+	if (!exynos_encoder)
 		return NULL;
-	}
 
 	exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
 	exynos_encoder->manager = manager;

+ 2 - 6
drivers/gpu/drm/exynos/exynos_drm_fb.c

@@ -156,10 +156,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
 	}
 
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-	if (!exynos_fb) {
-		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+	if (!exynos_fb)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
 	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
@@ -220,10 +218,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 	int i, ret;
 
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-	if (!exynos_fb) {
-		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+	if (!exynos_fb)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
 	if (!obj) {

+ 15 - 5
drivers/gpu/drm/exynos/exynos_drm_fbdev.c

@@ -16,9 +16,11 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
+#include "exynos_drm_fbdev.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
@@ -165,8 +167,18 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	/* 0 means to allocate physically continuous memory */
-	exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
+	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+	/*
+	 * If physically contiguous memory allocation fails and if IOMMU is
+	 * supported then try to get buffer from non physically contiguous
+	 * memory area.
+	 */
+	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
+							size);
+	}
+
 	if (IS_ERR(exynos_gem_obj)) {
 		ret = PTR_ERR(exynos_gem_obj);
 		goto err_release_framebuffer;
@@ -236,10 +248,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
 		return 0;
 
 	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
-	if (!fbdev) {
-		DRM_ERROR("failed to allocate drm fbdev.\n");
+	if (!fbdev)
 		return -ENOMEM;
-	}
 
 	private->fb_helper = helper = &fbdev->drm_fb_helper;
 	helper->funcs = &exynos_drm_fb_helper_funcs;

+ 3 - 3
drivers/gpu/drm/exynos/exynos_drm_fimc.c

@@ -17,10 +17,12 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "regs-fimc.h"
+#include "exynos_drm_drv.h"
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_fimc.h"
 
@@ -1343,10 +1345,8 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
 	struct drm_exynos_ipp_prop_list *prop_list;
 
 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
-	if (!prop_list) {
-		DRM_ERROR("failed to alloc property list.\n");
+	if (!prop_list)
 		return -ENOMEM;
-	}
 
 	prop_list->version = 1;
 	prop_list->writeback = 1;

+ 120 - 143
drivers/gpu/drm/exynos/exynos_drm_fimd.c

@@ -16,10 +16,12 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <video/of_display_timing.h>
+#include <video/of_videomode.h>
 #include <video/samsung_fimd.h>
 #include <drm/exynos_drm.h>
 
@@ -35,6 +37,8 @@
  * CPU Interface.
  */
 
+#define FIMD_DEFAULT_FRAMERATE 60
+
 /* position control register for hardware window 0, 2 ~ 4.*/
 #define VIDOSD_A(win)		(VIDOSD_BASE + 0x00 + (win) * 16)
 #define VIDOSD_B(win)		(VIDOSD_BASE + 0x04 + (win) * 16)
@@ -65,11 +69,13 @@ struct fimd_driver_data {
 
 	unsigned int has_shadowcon:1;
 	unsigned int has_clksel:1;
+	unsigned int has_limited_fmt:1;
 };
 
 static struct fimd_driver_data s3c64xx_fimd_driver_data = {
 	.timing_base = 0x0,
 	.has_clksel = 1,
+	.has_limited_fmt = 1,
 };
 
 static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -90,6 +96,7 @@ struct fimd_win_data {
 	unsigned int		fb_width;
 	unsigned int		fb_height;
 	unsigned int		bpp;
+	unsigned int		pixel_format;
 	dma_addr_t		dma_addr;
 	unsigned int		buf_offsize;
 	unsigned int		line_size;	/* bytes */
@@ -115,11 +122,10 @@ struct fimd_context {
 	wait_queue_head_t		wait_vsync_queue;
 	atomic_t			wait_vsync_event;
 
-	struct exynos_drm_panel_info *panel;
+	struct exynos_drm_panel_info panel;
 	struct fimd_driver_data *driver_data;
 };
 
-#ifdef CONFIG_OF
 static const struct of_device_id fimd_driver_dt_match[] = {
 	{ .compatible = "samsung,s3c6400-fimd",
 	  .data = &s3c64xx_fimd_driver_data },
@@ -129,21 +135,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
 	  .data = &exynos5_fimd_driver_data },
 	{},
 };
-#endif
 
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 	struct platform_device *pdev)
 {
-#ifdef CONFIG_OF
 	const struct of_device_id *of_id =
 			of_match_device(fimd_driver_dt_match, &pdev->dev);
 
-	if (of_id)
-		return (struct fimd_driver_data *)of_id->data;
-#endif
-
-	return (struct fimd_driver_data *)
-		platform_get_device_id(pdev)->driver_data;
+	return (struct fimd_driver_data *)of_id->data;
 }
 
 static bool fimd_display_is_connected(struct device *dev)
@@ -157,7 +156,7 @@ static void *fimd_get_panel(struct device *dev)
 {
 	struct fimd_context *ctx = get_fimd_context(dev);
 
-	return ctx->panel;
+	return &ctx->panel;
 }
 
 static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
@@ -237,8 +236,8 @@ static void fimd_apply(struct device *subdrv_dev)
 static void fimd_commit(struct device *dev)
 {
 	struct fimd_context *ctx = get_fimd_context(dev);
-	struct exynos_drm_panel_info *panel = ctx->panel;
-	struct fb_videomode *timing = &panel->timing;
+	struct exynos_drm_panel_info *panel = &ctx->panel;
+	struct videomode *vm = &panel->vm;
 	struct fimd_driver_data *driver_data;
 	u32 val;
 
@@ -250,22 +249,22 @@ static void fimd_commit(struct device *dev)
 	writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
 
 	/* setup vertical timing values. */
-	val = VIDTCON0_VBPD(timing->upper_margin - 1) |
-	       VIDTCON0_VFPD(timing->lower_margin - 1) |
-	       VIDTCON0_VSPW(timing->vsync_len - 1);
+	val = VIDTCON0_VBPD(vm->vback_porch - 1) |
+	       VIDTCON0_VFPD(vm->vfront_porch - 1) |
+	       VIDTCON0_VSPW(vm->vsync_len - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
 
 	/* setup horizontal timing values.  */
-	val = VIDTCON1_HBPD(timing->left_margin - 1) |
-	       VIDTCON1_HFPD(timing->right_margin - 1) |
-	       VIDTCON1_HSPW(timing->hsync_len - 1);
+	val = VIDTCON1_HBPD(vm->hback_porch - 1) |
+	       VIDTCON1_HFPD(vm->hfront_porch - 1) |
+	       VIDTCON1_HSPW(vm->hsync_len - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
 
 	/* setup horizontal and vertical display size. */
-	val = VIDTCON2_LINEVAL(timing->yres - 1) |
-	       VIDTCON2_HOZVAL(timing->xres - 1) |
-	       VIDTCON2_LINEVAL_E(timing->yres - 1) |
-	       VIDTCON2_HOZVAL_E(timing->xres - 1);
+	val = VIDTCON2_LINEVAL(vm->vactive - 1) |
+	       VIDTCON2_HOZVAL(vm->hactive - 1) |
+	       VIDTCON2_LINEVAL_E(vm->vactive - 1) |
+	       VIDTCON2_HOZVAL_E(vm->hactive - 1);
 	writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
 	/* setup clock source, clock divider, enable dma. */
@@ -396,6 +395,7 @@ static void fimd_win_mode_set(struct device *dev,
 	win_data->fb_height = overlay->fb_height;
 	win_data->dma_addr = overlay->dma_addr[0] + offset;
 	win_data->bpp = overlay->bpp;
+	win_data->pixel_format = overlay->pixel_format;
 	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
 				(overlay->bpp >> 3);
 	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
@@ -417,39 +417,38 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
 
 	val = WINCONx_ENWIN;
 
-	switch (win_data->bpp) {
-	case 1:
-		val |= WINCON0_BPPMODE_1BPP;
-		val |= WINCONx_BITSWP;
-		val |= WINCONx_BURSTLEN_4WORD;
-		break;
-	case 2:
-		val |= WINCON0_BPPMODE_2BPP;
-		val |= WINCONx_BITSWP;
-		val |= WINCONx_BURSTLEN_8WORD;
-		break;
-	case 4:
-		val |= WINCON0_BPPMODE_4BPP;
-		val |= WINCONx_BITSWP;
-		val |= WINCONx_BURSTLEN_8WORD;
-		break;
-	case 8:
+	/*
+	 * In case of s3c64xx, window 0 doesn't support alpha channel.
+	 * So the request format is ARGB8888 then change it to XRGB8888.
+	 */
+	if (ctx->driver_data->has_limited_fmt && !win) {
+		if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
+			win_data->pixel_format = DRM_FORMAT_XRGB8888;
+	}
+
+	switch (win_data->pixel_format) {
+	case DRM_FORMAT_C8:
 		val |= WINCON0_BPPMODE_8BPP_PALETTE;
 		val |= WINCONx_BURSTLEN_8WORD;
 		val |= WINCONx_BYTSWP;
 		break;
-	case 16:
+	case DRM_FORMAT_XRGB1555:
+		val |= WINCON0_BPPMODE_16BPP_1555;
+		val |= WINCONx_HAWSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	case DRM_FORMAT_RGB565:
 		val |= WINCON0_BPPMODE_16BPP_565;
 		val |= WINCONx_HAWSWP;
 		val |= WINCONx_BURSTLEN_16WORD;
 		break;
-	case 24:
+	case DRM_FORMAT_XRGB8888:
 		val |= WINCON0_BPPMODE_24BPP_888;
 		val |= WINCONx_WSWP;
 		val |= WINCONx_BURSTLEN_16WORD;
 		break;
-	case 32:
-		val |= WINCON1_BPPMODE_28BPP_A4888
+	case DRM_FORMAT_ARGB8888:
+		val |= WINCON1_BPPMODE_25BPP_A1888
 			| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
 		val |= WINCONx_WSWP;
 		val |= WINCONx_BURSTLEN_16WORD;
@@ -746,45 +745,54 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 		drm_iommu_detach_device(drm_dev, dev);
 }
 
-static int fimd_calc_clkdiv(struct fimd_context *ctx,
-			    struct fb_videomode *timing)
+static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
 {
-	unsigned long clk = clk_get_rate(ctx->lcd_clk);
-	u32 retrace;
-	u32 clkdiv;
-	u32 best_framerate = 0;
-	u32 framerate;
-
-	retrace = timing->left_margin + timing->hsync_len +
-				timing->right_margin + timing->xres;
-	retrace *= timing->upper_margin + timing->vsync_len +
-				timing->lower_margin + timing->yres;
-
-	/* default framerate is 60Hz */
-	if (!timing->refresh)
-		timing->refresh = 60;
-
-	clk /= retrace;
-
-	for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
-		int tmp;
-
-		/* get best framerate */
-		framerate = clk / clkdiv;
-		tmp = timing->refresh - framerate;
-		if (tmp < 0) {
-			best_framerate = framerate;
-			continue;
-		} else {
-			if (!best_framerate)
-				best_framerate = framerate;
-			else if (tmp < (best_framerate - framerate))
-				best_framerate = framerate;
-			break;
+	struct videomode *vm = &ctx->panel.vm;
+	unsigned long clk;
+
+	ctx->bus_clk = devm_clk_get(dev, "fimd");
+	if (IS_ERR(ctx->bus_clk)) {
+		dev_err(dev, "failed to get bus clock\n");
+		return PTR_ERR(ctx->bus_clk);
+	}
+
+	ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+	if (IS_ERR(ctx->lcd_clk)) {
+		dev_err(dev, "failed to get lcd clock\n");
+		return PTR_ERR(ctx->lcd_clk);
+	}
+
+	clk = clk_get_rate(ctx->lcd_clk);
+	if (clk == 0) {
+		dev_err(dev, "error getting sclk_fimd clock rate\n");
+		return -EINVAL;
+	}
+
+	if (vm->pixelclock == 0) {
+		unsigned long c;
+		c = vm->hactive + vm->hback_porch + vm->hfront_porch +
+		    vm->hsync_len;
+		c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
+		     vm->vsync_len;
+		vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
+		if (vm->pixelclock == 0) {
+			dev_err(dev, "incorrect display timings\n");
+			return -EINVAL;
 		}
+		dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
+			 vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
 	}
+	ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
+	if (ctx->clkdiv > 256) {
+		dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
+			 ctx->clkdiv);
+		ctx->clkdiv = 256;
+	}
+	vm->pixelclock = clk / ctx->clkdiv;
+	DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
+		      ctx->clkdiv);
 
-	return clkdiv;
+	return 0;
 }
 
 static void fimd_clear_win(struct fimd_context *ctx, int win)
@@ -876,59 +884,53 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
 	return 0;
 }
 
+static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev)
+{
+	struct videomode *vm;
+	int ret;
+
+	vm = &ctx->panel.vm;
+	ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE);
+	if (ret) {
+		DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+		return ret;
+	}
+
+	if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+		ctx->vidcon1 |= VIDCON1_INV_VSYNC;
+	if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+		ctx->vidcon1 |= VIDCON1_INV_HSYNC;
+	if (vm->flags & DISPLAY_FLAGS_DE_LOW)
+		ctx->vidcon1 |= VIDCON1_INV_VDEN;
+	if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+		ctx->vidcon1 |= VIDCON1_INV_VCLK;
+
+	return 0;
+}
+
 static int fimd_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct fimd_context *ctx;
 	struct exynos_drm_subdrv *subdrv;
-	struct exynos_drm_fimd_pdata *pdata;
-	struct exynos_drm_panel_info *panel;
 	struct resource *res;
 	int win;
 	int ret = -EINVAL;
 
-	if (dev->of_node) {
-		pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-		if (!pdata) {
-			DRM_ERROR("memory allocation for pdata failed\n");
-			return -ENOMEM;
-		}
-
-		ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
-					OF_USE_NATIVE_MODE);
-		if (ret) {
-			DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
-			return ret;
-		}
-	} else {
-		pdata = dev->platform_data;
-		if (!pdata) {
-			DRM_ERROR("no platform data specified\n");
-			return -EINVAL;
-		}
-	}
-
-	panel = &pdata->panel;
-	if (!panel) {
-		dev_err(dev, "panel is null.\n");
-		return -EINVAL;
-	}
+	if (!dev->of_node)
+		return -ENODEV;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->bus_clk = devm_clk_get(dev, "fimd");
-	if (IS_ERR(ctx->bus_clk)) {
-		dev_err(dev, "failed to get bus clock\n");
-		return PTR_ERR(ctx->bus_clk);
-	}
+	ret = fimd_get_platform_data(ctx, dev);
+	if (ret)
+		return ret;
 
-	ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
-	if (IS_ERR(ctx->lcd_clk)) {
-		dev_err(dev, "failed to get lcd clock\n");
-		return PTR_ERR(ctx->lcd_clk);
-	}
+	ret = fimd_configure_clocks(ctx, dev);
+	if (ret)
+		return ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
@@ -952,10 +954,6 @@ static int fimd_probe(struct platform_device *pdev)
 	}
 
 	ctx->driver_data = drm_fimd_get_driver_data(pdev);
-	ctx->vidcon0 = pdata->vidcon0;
-	ctx->vidcon1 = pdata->vidcon1;
-	ctx->default_win = pdata->default_win;
-	ctx->panel = panel;
 	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
 	atomic_set(&ctx->wait_vsync_event, 0);
 
@@ -973,12 +971,6 @@ static int fimd_probe(struct platform_device *pdev)
 	pm_runtime_enable(dev);
 	pm_runtime_get_sync(dev);
 
-	ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
-	panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
-
-	DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
-			panel->timing.pixclock, ctx->clkdiv);
-
 	for (win = 0; win < WINDOWS_NR; win++)
 		fimd_clear_win(ctx, win);
 
@@ -1067,20 +1059,6 @@ static int fimd_runtime_resume(struct device *dev)
 }
 #endif
 
-static struct platform_device_id fimd_driver_ids[] = {
-	{
-		.name		= "s3c64xx-fb",
-		.driver_data	= (unsigned long)&s3c64xx_fimd_driver_data,
-	}, {
-		.name		= "exynos4-fb",
-		.driver_data	= (unsigned long)&exynos4_fimd_driver_data,
-	}, {
-		.name		= "exynos5-fb",
-		.driver_data	= (unsigned long)&exynos5_fimd_driver_data,
-	},
-	{},
-};
-
 static const struct dev_pm_ops fimd_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
 	SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
@@ -1089,11 +1067,10 @@ static const struct dev_pm_ops fimd_pm_ops = {
 struct platform_driver fimd_driver = {
 	.probe		= fimd_probe,
 	.remove		= fimd_remove,
-	.id_table       = fimd_driver_ids,
 	.driver		= {
 		.name	= "exynos4-fb",
 		.owner	= THIS_MODULE,
 		.pm	= &fimd_pm_ops,
-		.of_match_table = of_match_ptr(fimd_driver_dt_match),
+		.of_match_table = fimd_driver_dt_match,
 	},
 };

+ 33 - 27
drivers/gpu/drm/exynos/exynos_drm_g2d.c

@@ -23,6 +23,7 @@
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
+#include "exynos_drm_g2d.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
@@ -446,10 +447,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
 	}
 
 	g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
-	if (!g2d_userptr) {
-		DRM_ERROR("failed to allocate g2d_userptr.\n");
+	if (!g2d_userptr)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	atomic_set(&g2d_userptr->refcount, 1);
 
@@ -499,7 +498,6 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
 
 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 	if (!sgt) {
-		DRM_ERROR("failed to allocate sg table.\n");
 		ret = -ENOMEM;
 		goto err_free_userptr;
 	}
@@ -808,17 +806,8 @@ static void g2d_dma_start(struct g2d_data *g2d,
 	int ret;
 
 	ret = pm_runtime_get_sync(g2d->dev);
-	if (ret < 0) {
-		dev_warn(g2d->dev, "failed pm power on.\n");
-		return;
-	}
-
-	ret = clk_prepare_enable(g2d->gate_clk);
-	if (ret < 0) {
-		dev_warn(g2d->dev, "failed to enable clock.\n");
-		pm_runtime_put_sync(g2d->dev);
+	if (ret < 0)
 		return;
-	}
 
 	writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
 	writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -871,7 +860,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
 					    runqueue_work);
 
 	mutex_lock(&g2d->runqueue_mutex);
-	clk_disable_unprepare(g2d->gate_clk);
 	pm_runtime_put_sync(g2d->dev);
 
 	complete(&g2d->runqueue_node->complete);
@@ -1096,8 +1084,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
 
 		e = kzalloc(sizeof(*node->event), GFP_KERNEL);
 		if (!e) {
-			dev_err(dev, "failed to allocate event\n");
-
 			spin_lock_irqsave(&drm_dev->event_lock, flags);
 			file->event_space += sizeof(e->event);
 			spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1327,10 +1313,8 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
 	struct exynos_drm_g2d_private *g2d_priv;
 
 	g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
-	if (!g2d_priv) {
-		dev_err(dev, "failed to allocate g2d private data\n");
+	if (!g2d_priv)
 		return -ENOMEM;
-	}
 
 	g2d_priv->dev = dev;
 	file_priv->g2d_priv = g2d_priv;
@@ -1386,10 +1370,8 @@ static int g2d_probe(struct platform_device *pdev)
 	int ret;
 
 	g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
-	if (!g2d) {
-		dev_err(dev, "failed to allocate driver data\n");
+	if (!g2d)
 		return -ENOMEM;
-	}
 
 	g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
 			sizeof(struct g2d_runqueue_node), 0, 0, NULL);
@@ -1524,14 +1506,38 @@ static int g2d_resume(struct device *dev)
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int g2d_runtime_suspend(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(g2d->gate_clk);
+
+	return 0;
+}
+
+static int g2d_runtime_resume(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(g2d->gate_clk);
+	if (ret < 0)
+		dev_warn(dev, "failed to enable clock.\n");
+
+	return ret;
+}
+#endif
+
+static const struct dev_pm_ops g2d_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+	SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+};
 
-#ifdef CONFIG_OF
 static const struct of_device_id exynos_g2d_match[] = {
 	{ .compatible = "samsung,exynos5250-g2d" },
 	{},
 };
-#endif
 
 struct platform_driver g2d_driver = {
 	.probe		= g2d_probe,
@@ -1540,6 +1546,6 @@ struct platform_driver g2d_driver = {
 		.name	= "s5p-g2d",
 		.owner	= THIS_MODULE,
 		.pm	= &g2d_pm_ops,
-		.of_match_table = of_match_ptr(exynos_g2d_match),
+		.of_match_table = exynos_g2d_match,
 	},
 };

+ 21 - 32
drivers/gpu/drm/exynos/exynos_drm_gem.c

@@ -10,6 +10,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 
 #include <linux/shmem_fs.h>
 #include <drm/exynos_drm.h>
@@ -17,6 +18,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
 
 static unsigned int convert_to_vm_err_msg(int msg)
 {
@@ -135,7 +137,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 	obj = &exynos_gem_obj->base;
 	buf = exynos_gem_obj->buffer;
 
-	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
 
 	/*
 	 * do not release memory region from exporter.
@@ -152,8 +154,7 @@ out:
 	exynos_drm_fini_buf(obj->dev, buf);
 	exynos_gem_obj->buffer = NULL;
 
-	if (obj->map_list.map)
-		drm_gem_free_mmap_offset(obj);
+	drm_gem_free_mmap_offset(obj);
 
 	/* release file pointer to gem object. */
 	drm_gem_object_release(obj);
@@ -191,10 +192,8 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
 	int ret;
 
 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
-	if (!exynos_gem_obj) {
-		DRM_ERROR("failed to allocate exynos gem object\n");
+	if (!exynos_gem_obj)
 		return NULL;
-	}
 
 	exynos_gem_obj->size = size;
 	obj = &exynos_gem_obj->base;
@@ -668,6 +667,18 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
 
 	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
 						EXYNOS_BO_WC, args->size);
+	/*
+	 * If physically contiguous memory allocation fails and if IOMMU is
+	 * supported then try to get buffer from non physically contiguous
+	 * memory area.
+	 */
+	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+		dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+		exynos_gem_obj = exynos_drm_gem_create(dev,
+					EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
+					args->size);
+	}
+
 	if (IS_ERR(exynos_gem_obj))
 		return PTR_ERR(exynos_gem_obj);
 
@@ -703,13 +714,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
 		goto unlock;
 	}
 
-	if (!obj->map_list.map) {
-		ret = drm_gem_create_mmap_offset(obj);
-		if (ret)
-			goto out;
-	}
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret)
+		goto out;
 
-	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+	*offset = drm_vma_node_offset_addr(&obj->vma_node);
 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
 out:
@@ -719,26 +728,6 @@ unlock:
 	return ret;
 }
 
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-				struct drm_device *dev,
-				unsigned int handle)
-{
-	int ret;
-
-	/*
-	 * obj->refcount and obj->handle_count are decreased and
-	 * if both them are 0 then exynos_drm_gem_free_object()
-	 * would be called by callback to release resources.
-	 */
-	ret = drm_gem_handle_delete(file_priv, handle);
-	if (ret < 0) {
-		DRM_ERROR("failed to delete drm_gem_handle.\n");
-		return ret;
-	}
-
-	return 0;
-}
-
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;

+ 0 - 9
drivers/gpu/drm/exynos/exynos_drm_gem.h

@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
 				   struct drm_device *dev, uint32_t handle,
 				   uint64_t *offset);
 
-/*
- * destroy memory region allocated.
- *	- a gem handle and physical memory region pointed by a gem object
- *	would be released by drm_gem_handle_delete().
- */
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-				struct drm_device *dev,
-				unsigned int handle);
-
 /* page fault handler and mmap fault address(virtual) to physical memory. */
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 

+ 2 - 3
drivers/gpu/drm/exynos/exynos_drm_gsc.c

@@ -20,6 +20,7 @@
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "regs-gsc.h"
+#include "exynos_drm_drv.h"
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_gsc.h"
 
@@ -1337,10 +1338,8 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
 	struct drm_exynos_ipp_prop_list *prop_list;
 
 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
-	if (!prop_list) {
-		DRM_ERROR("failed to alloc property list.\n");
+	if (!prop_list)
 		return -ENOMEM;
-	}
 
 	prop_list->version = 1;
 	prop_list->writeback = 1;

+ 1 - 3
drivers/gpu/drm/exynos/exynos_drm_hdmi.c

@@ -403,10 +403,8 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
 	struct drm_hdmi_context *ctx;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
-	if (!ctx) {
-		DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+	if (!ctx)
 		return -ENOMEM;
-	}
 
 	subdrv = &ctx->subdrv;
 

+ 9 - 0
drivers/gpu/drm/exynos/exynos_drm_iommu.c

@@ -47,10 +47,16 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
 
 	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
 					GFP_KERNEL);
+	if (!dev->dma_parms)
+		goto error;
+
 	dma_set_max_seg_size(dev, 0xffffffffu);
 	dev->archdata.mapping = mapping;
 
 	return 0;
+error:
+	arm_iommu_release_mapping(mapping);
+	return -ENOMEM;
 }
 
 /*
@@ -91,6 +97,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
 	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
 					sizeof(*subdrv_dev->dma_parms),
 					GFP_KERNEL);
+	if (!subdrv_dev->dma_parms)
+		return -ENOMEM;
+
 	dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
 
 	ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);

+ 5 - 17
drivers/gpu/drm/exynos/exynos_drm_ipp.c

@@ -408,10 +408,8 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
 	struct drm_exynos_ipp_cmd_work *cmd_work;
 
 	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
-	if (!cmd_work) {
-		DRM_ERROR("failed to alloc cmd_work.\n");
+	if (!cmd_work)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
 
@@ -423,10 +421,8 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
 	struct drm_exynos_ipp_event_work *event_work;
 
 	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
-	if (!event_work) {
-		DRM_ERROR("failed to alloc event_work.\n");
+	if (!event_work)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
 
@@ -482,10 +478,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
 
 	/* allocate command node */
 	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
-	if (!c_node) {
-		DRM_ERROR("failed to allocate map node.\n");
+	if (!c_node)
 		return -ENOMEM;
-	}
 
 	/* create property id */
 	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
@@ -694,10 +688,8 @@ static struct drm_exynos_ipp_mem_node
 	mutex_lock(&c_node->mem_lock);
 
 	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
-	if (!m_node) {
-		DRM_ERROR("failed to allocate queue node.\n");
+	if (!m_node)
 		goto err_unlock;
-	}
 
 	/* clear base address for error handling */
 	memset(&buf_info, 0x0, sizeof(buf_info));
@@ -798,9 +790,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
 	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
 
 	e = kzalloc(sizeof(*e), GFP_KERNEL);
-
 	if (!e) {
-		DRM_ERROR("failed to allocate event.\n");
 		spin_lock_irqsave(&drm_dev->event_lock, flags);
 		file->event_space += sizeof(e->event);
 		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1780,10 +1770,8 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
 	struct exynos_drm_ipp_private *priv;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		DRM_ERROR("failed to allocate priv.\n");
+	if (!priv)
 		return -ENOMEM;
-	}
 	priv->dev = dev;
 	file_priv->ipp_priv = priv;
 

+ 2 - 3
drivers/gpu/drm/exynos/exynos_drm_plane.c

@@ -16,6 +16,7 @@
 #include "exynos_drm_encoder.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
 
 #define to_exynos_plane(x)	container_of(x, struct exynos_plane, base)
 
@@ -264,10 +265,8 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
 	int err;
 
 	exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
-	if (!exynos_plane) {
-		DRM_ERROR("failed to allocate plane\n");
+	if (!exynos_plane)
 		return NULL;
-	}
 
 	err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
 			      &exynos_plane_funcs, formats, ARRAY_SIZE(formats),

+ 83 - 34
drivers/gpu/drm/exynos/exynos_drm_rotator.c

@@ -21,6 +21,7 @@
 #include <drm/exynos_drm.h>
 #include "regs-rotator.h"
 #include "exynos_drm.h"
+#include "exynos_drm_drv.h"
 #include "exynos_drm_ipp.h"
 
 /*
@@ -471,10 +472,8 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
 	struct drm_exynos_ipp_prop_list *prop_list;
 
 	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
-	if (!prop_list) {
-		DRM_ERROR("failed to alloc property list.\n");
+	if (!prop_list)
 		return -ENOMEM;
-	}
 
 	prop_list->version = 1;
 	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -631,21 +630,96 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
 	return 0;
 }
 
+static struct rot_limit_table rot_limit_tbl_4210 = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_64K,
+		.max_h = SZ_64K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_16K,
+		.max_h = SZ_16K,
+		.align = 2,
+	},
+};
+
+static struct rot_limit_table rot_limit_tbl_4x12 = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_32K,
+		.max_h = SZ_32K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_8K,
+		.max_h = SZ_8K,
+		.align = 2,
+	},
+};
+
+static struct rot_limit_table rot_limit_tbl_5250 = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_32K,
+		.max_h = SZ_32K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_8K,
+		.max_h = SZ_8K,
+		.align = 1,
+	},
+};
+
+static const struct of_device_id exynos_rotator_match[] = {
+	{
+		.compatible = "samsung,exynos4210-rotator",
+		.data = &rot_limit_tbl_4210,
+	},
+	{
+		.compatible = "samsung,exynos4212-rotator",
+		.data = &rot_limit_tbl_4x12,
+	},
+	{
+		.compatible = "samsung,exynos5250-rotator",
+		.data = &rot_limit_tbl_5250,
+	},
+	{},
+};
+
 static int rotator_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct rot_context *rot;
 	struct exynos_drm_ippdrv *ippdrv;
+	const struct of_device_id *match;
 	int ret;
 
+	if (!dev->of_node) {
+		dev_err(dev, "cannot find of_node.\n");
+		return -ENODEV;
+	}
+
 	rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
-	if (!rot) {
-		dev_err(dev, "failed to allocate rot\n");
+	if (!rot)
 		return -ENOMEM;
-	}
 
-	rot->limit_tbl = (struct rot_limit_table *)
-				platform_get_device_id(pdev)->driver_data;
+	match = of_match_node(exynos_rotator_match, dev->of_node);
+	if (!match) {
+		dev_err(dev, "failed to match node\n");
+		return -ENODEV;
+	}
+	rot->limit_tbl = (struct rot_limit_table *)match->data;
 
 	rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	rot->regs = devm_ioremap_resource(dev, rot->regs_res);
@@ -717,31 +791,6 @@ static int rotator_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static struct rot_limit_table rot_limit_tbl = {
-	.ycbcr420_2p = {
-		.min_w = 32,
-		.min_h = 32,
-		.max_w = SZ_32K,
-		.max_h = SZ_32K,
-		.align = 3,
-	},
-	.rgb888 = {
-		.min_w = 8,
-		.min_h = 8,
-		.max_w = SZ_8K,
-		.max_h = SZ_8K,
-		.align = 2,
-	},
-};
-
-static struct platform_device_id rotator_driver_ids[] = {
-	{
-		.name		= "exynos-rot",
-		.driver_data	= (unsigned long)&rot_limit_tbl,
-	},
-	{},
-};
-
 static int rotator_clk_crtl(struct rot_context *rot, bool enable)
 {
 	if (enable) {
@@ -803,10 +852,10 @@ static const struct dev_pm_ops rotator_pm_ops = {
 struct platform_driver rotator_driver = {
 	.probe		= rotator_probe,
 	.remove		= rotator_remove,
-	.id_table	= rotator_driver_ids,
 	.driver		= {
 		.name	= "exynos-rot",
 		.owner	= THIS_MODULE,
 		.pm	= &rotator_pm_ops,
+		.of_match_table = exynos_rotator_match,
 	},
 };

+ 1 - 0
drivers/gpu/drm/exynos/exynos_drm_vidi.c

@@ -23,6 +23,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_encoder.h"
+#include "exynos_drm_vidi.h"
 
 /* vidi has totally three virtual windows. */
 #define WINDOWS_NR		3

+ 17 - 70
drivers/gpu/drm/exynos/exynos_hdmi.c

@@ -32,6 +32,7 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/of_gpio.h>
 
 #include <drm/exynos_drm.h>
@@ -1824,10 +1825,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
 
 	res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
 		sizeof(res->regul_bulk[0]), GFP_KERNEL);
-	if (!res->regul_bulk) {
-		DRM_ERROR("failed to get memory for regulators\n");
+	if (!res->regul_bulk)
 		goto fail;
-	}
 	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
 		res->regul_bulk[i].supply = supply[i];
 		res->regul_bulk[i].consumer = NULL;
@@ -1859,7 +1858,6 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
 		hdmi_hdmiphy = hdmiphy;
 }
 
-#ifdef CONFIG_OF
 static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
 					(struct device *dev)
 {
@@ -1868,10 +1866,8 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
 	u32 value;
 
 	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
-	if (!pd) {
-		DRM_ERROR("memory allocation for pdata failed\n");
+	if (!pd)
 		goto err_data;
-	}
 
 	if (!of_find_property(np, "hpd-gpio", &value)) {
 		DRM_ERROR("no hpd gpio property found\n");
@@ -1885,33 +1881,7 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
 err_data:
 	return NULL;
 }
-#else
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
-					(struct device *dev)
-{
-	return NULL;
-}
-#endif
-
-static struct platform_device_id hdmi_driver_types[] = {
-	{
-		.name		= "s5pv210-hdmi",
-		.driver_data    = HDMI_TYPE13,
-	}, {
-		.name		= "exynos4-hdmi",
-		.driver_data    = HDMI_TYPE13,
-	}, {
-		.name		= "exynos4-hdmi14",
-		.driver_data	= HDMI_TYPE14,
-	}, {
-		.name		= "exynos5-hdmi",
-		.driver_data	= HDMI_TYPE14,
-	}, {
-		/* end node */
-	}
-};
 
-#ifdef CONFIG_OF
 static struct of_device_id hdmi_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmi",
@@ -1923,7 +1893,6 @@ static struct of_device_id hdmi_match_types[] = {
 		/* end node */
 	}
 };
-#endif
 
 static int hdmi_probe(struct platform_device *pdev)
 {
@@ -1932,36 +1901,23 @@ static int hdmi_probe(struct platform_device *pdev)
 	struct hdmi_context *hdata;
 	struct s5p_hdmi_platform_data *pdata;
 	struct resource *res;
+	const struct of_device_id *match;
 	int ret;
 
-	if (dev->of_node) {
-		pdata = drm_hdmi_dt_parse_pdata(dev);
-		if (IS_ERR(pdata)) {
-			DRM_ERROR("failed to parse dt\n");
-			return PTR_ERR(pdata);
-		}
-	} else {
-		pdata = dev->platform_data;
-	}
+	 if (!dev->of_node)
+		return -ENODEV;
 
-	if (!pdata) {
-		DRM_ERROR("no platform data specified\n");
+	pdata = drm_hdmi_dt_parse_pdata(dev);
+	if (!pdata)
 		return -EINVAL;
-	}
 
-	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
-								GFP_KERNEL);
-	if (!drm_hdmi_ctx) {
-		DRM_ERROR("failed to allocate common hdmi context.\n");
+	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+	if (!drm_hdmi_ctx)
 		return -ENOMEM;
-	}
 
-	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
-								GFP_KERNEL);
-	if (!hdata) {
-		DRM_ERROR("out of memory\n");
+	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
+	if (!hdata)
 		return -ENOMEM;
-	}
 
 	mutex_init(&hdata->hdmi_mutex);
 
@@ -1970,23 +1926,15 @@ static int hdmi_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, drm_hdmi_ctx);
 
-	if (dev->of_node) {
-		const struct of_device_id *match;
-		match = of_match_node(of_match_ptr(hdmi_match_types),
-					dev->of_node);
-		if (match == NULL)
-			return -ENODEV;
-		hdata->type = (enum hdmi_type)match->data;
-	} else {
-		hdata->type = (enum hdmi_type)platform_get_device_id
-					(pdev)->driver_data;
-	}
+	match = of_match_node(hdmi_match_types, dev->of_node);
+	if (!match)
+		return -ENODEV;
+	hdata->type = (enum hdmi_type)match->data;
 
 	hdata->hpd_gpio = pdata->hpd_gpio;
 	hdata->dev = dev;
 
 	ret = hdmi_resources_init(hdata);
-
 	if (ret) {
 		DRM_ERROR("hdmi_resources_init failed\n");
 		return -EINVAL;
@@ -2141,11 +2089,10 @@ static const struct dev_pm_ops hdmi_pm_ops = {
 struct platform_driver hdmi_driver = {
 	.probe		= hdmi_probe,
 	.remove		= hdmi_remove,
-	.id_table = hdmi_driver_types,
 	.driver		= {
 		.name	= "exynos-hdmi",
 		.owner	= THIS_MODULE,
 		.pm	= &hdmi_pm_ops,
-		.of_match_table = of_match_ptr(hdmi_match_types),
+		.of_match_table = hdmi_match_types,
 	},
 };

+ 2 - 10
drivers/gpu/drm/exynos/exynos_hdmiphy.c

@@ -15,6 +15,7 @@
 
 #include <linux/kernel.h>
 #include <linux/i2c.h>
+#include <linux/of.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_hdmi.h"
@@ -39,13 +40,6 @@ static int hdmiphy_remove(struct i2c_client *client)
 	return 0;
 }
 
-static const struct i2c_device_id hdmiphy_id[] = {
-	{ "s5p_hdmiphy", 0 },
-	{ "exynos5-hdmiphy", 0 },
-	{ },
-};
-
-#ifdef CONFIG_OF
 static struct of_device_id hdmiphy_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmiphy",
@@ -57,15 +51,13 @@ static struct of_device_id hdmiphy_match_types[] = {
 		/* end node */
 	}
 };
-#endif
 
 struct i2c_driver hdmiphy_driver = {
 	.driver = {
 		.name	= "exynos-hdmiphy",
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(hdmiphy_match_types),
+		.of_match_table = hdmiphy_match_types,
 	},
-	.id_table = hdmiphy_id,
 	.probe		= hdmiphy_probe,
 	.remove		= hdmiphy_remove,
 	.command		= NULL,

+ 3 - 6
drivers/gpu/drm/exynos/exynos_mixer.c

@@ -30,6 +30,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
+#include <linux/of.h>
 
 #include <drm/exynos_drm.h>
 
@@ -1185,16 +1186,12 @@ static int mixer_probe(struct platform_device *pdev)
 
 	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
 								GFP_KERNEL);
-	if (!drm_hdmi_ctx) {
-		DRM_ERROR("failed to allocate common hdmi context.\n");
+	if (!drm_hdmi_ctx)
 		return -ENOMEM;
-	}
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
-	if (!ctx) {
-		DRM_ERROR("failed to alloc mixer context.\n");
+	if (!ctx)
 		return -ENOMEM;
-	}
 
 	mutex_init(&ctx->mixer_mutex);
 

+ 1 - 0
drivers/gpu/drm/gma500/Makefile

@@ -15,6 +15,7 @@ gma500_gfx-y += \
 	  mmu.o \
 	  power.o \
 	  psb_drv.o \
+	  gma_display.o \
 	  psb_intel_display.o \
 	  psb_intel_lvds.o \
 	  psb_intel_modes.o \

+ 3 - 0
drivers/gpu/drm/gma500/cdv_device.c

@@ -641,6 +641,7 @@ const struct psb_ops cdv_chip_ops = {
 
 	.crtc_helper = &cdv_intel_helper_funcs,
 	.crtc_funcs = &cdv_intel_crtc_funcs,
+	.clock_funcs = &cdv_clock_funcs,
 
 	.output_init = cdv_output_init,
 	.hotplug = cdv_hotplug_event,
@@ -655,4 +656,6 @@ const struct psb_ops cdv_chip_ops = {
 	.restore_regs = cdv_restore_display_registers,
 	.power_down = cdv_power_down,
 	.power_up = cdv_power_up,
+	.update_wm = cdv_update_wm,
+	.disable_sr = cdv_disable_sr,
 };

+ 3 - 9
drivers/gpu/drm/gma500/cdv_device.h

@@ -17,6 +17,7 @@
 
 extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
 extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern const struct gma_clock_funcs cdv_clock_funcs;
 extern void cdv_intel_crt_init(struct drm_device *dev,
 			struct psb_intel_mode_device *mode_dev);
 extern void cdv_intel_lvds_init(struct drm_device *dev,
@@ -25,12 +26,5 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *
 			int reg);
 extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
 					     struct drm_crtc *crtc);
-
-static inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
-{
-	/* Wait for 20ms, i.e. one cycle at 50hz. */
-        /* FIXME: msleep ?? */
-	mdelay(20);
-}
-
-
+extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc);
+extern void cdv_disable_sr(struct drm_device *dev);

+ 27 - 30
drivers/gpu/drm/gma500/cdv_intel_crt.c

@@ -95,13 +95,12 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
 
 	struct drm_device *dev = encoder->dev;
 	struct drm_crtc *crtc = encoder->crtc;
-	struct psb_intel_crtc *psb_intel_crtc =
-					to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	int dpll_md_reg;
 	u32 adpa, dpll_md;
 	u32 adpa_reg;
 
-	if (psb_intel_crtc->pipe == 0)
+	if (gma_crtc->pipe == 0)
 		dpll_md_reg = DPLL_A_MD;
 	else
 		dpll_md_reg = DPLL_B_MD;
@@ -124,7 +123,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
-	if (psb_intel_crtc->pipe == 0)
+	if (gma_crtc->pipe == 0)
 		adpa |= ADPA_PIPE_A_SELECT;
 	else
 		adpa |= ADPA_PIPE_B_SELECT;
@@ -197,10 +196,9 @@ static enum drm_connector_status cdv_intel_crt_detect(
 
 static void cdv_intel_crt_destroy(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 
-	psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+	psb_intel_i2c_destroy(gma_encoder->ddc_bus);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
@@ -208,9 +206,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
 
 static int cdv_intel_crt_get_modes(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-				psb_intel_attached_encoder(connector);
-	return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	return psb_intel_ddc_get_modes(connector,
+				       &gma_encoder->ddc_bus->adapter);
 }
 
 static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -227,8 +225,8 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
 static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
 	.dpms = cdv_intel_crt_dpms,
 	.mode_fixup = cdv_intel_crt_mode_fixup,
-	.prepare = psb_intel_encoder_prepare,
-	.commit = psb_intel_encoder_commit,
+	.prepare = gma_encoder_prepare,
+	.commit = gma_encoder_commit,
 	.mode_set = cdv_intel_crt_mode_set,
 };
 
@@ -244,7 +242,7 @@ static const struct drm_connector_helper_funcs
 				cdv_intel_crt_connector_helper_funcs = {
 	.mode_valid = cdv_intel_crt_mode_valid,
 	.get_modes = cdv_intel_crt_get_modes,
-	.best_encoder = psb_intel_best_encoder,
+	.best_encoder = gma_best_encoder,
 };
 
 static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
@@ -260,32 +258,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
 			struct psb_intel_mode_device *mode_dev)
 {
 
-	struct psb_intel_connector *psb_intel_connector;
-	struct psb_intel_encoder *psb_intel_encoder;
+	struct gma_connector *gma_connector;
+	struct gma_encoder *gma_encoder;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 
 	u32 i2c_reg;
 
-	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
-	if (!psb_intel_encoder)
+	gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+	if (!gma_encoder)
 		return;
 
-	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
-	if (!psb_intel_connector)
+	gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+	if (!gma_connector)
 		goto failed_connector;
 
-	connector = &psb_intel_connector->base;
+	connector = &gma_connector->base;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 	drm_connector_init(dev, connector,
 		&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
-	encoder = &psb_intel_encoder->base;
+	encoder = &gma_encoder->base;
 	drm_encoder_init(dev, encoder,
 		&cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
 
-	psb_intel_connector_attach_encoder(psb_intel_connector,
-					   psb_intel_encoder);
+	gma_connector_attach_encoder(gma_connector, gma_encoder);
 
 	/* Set up the DDC bus. */
 	i2c_reg = GPIOA;
@@ -294,15 +291,15 @@ void cdv_intel_crt_init(struct drm_device *dev,
 	if (dev_priv->crt_ddc_bus != 0)
 		i2c_reg = dev_priv->crt_ddc_bus;
 	}*/
-	psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+	gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
 							  i2c_reg, "CRTDDC_A");
-	if (!psb_intel_encoder->ddc_bus) {
+	if (!gma_encoder->ddc_bus) {
 		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
 			   "failed.\n");
 		goto failed_ddc;
 	}
 
-	psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+	gma_encoder->type = INTEL_OUTPUT_ANALOG;
 	/*
 	psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
 	psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -318,10 +315,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
 
 	return;
 failed_ddc:
-	drm_encoder_cleanup(&psb_intel_encoder->base);
-	drm_connector_cleanup(&psb_intel_connector->base);
-	kfree(psb_intel_connector);
+	drm_encoder_cleanup(&gma_encoder->base);
+	drm_connector_cleanup(&gma_connector->base);
+	kfree(gma_connector);
 failed_connector:
-	kfree(psb_intel_encoder);
+	kfree(gma_encoder);
 	return;
 }

File diff suppressed because it is too large
+ 57 - 799
drivers/gpu/drm/gma500/cdv_intel_display.c


+ 77 - 77
drivers/gpu/drm/gma500/cdv_intel_dp.c

@@ -34,6 +34,7 @@
 #include "psb_drv.h"
 #include "psb_intel_drv.h"
 #include "psb_intel_reg.h"
+#include "gma_display.h"
 #include <drm/drm_dp_helper.h>
 
 #define _wait_for(COND, MS, W) ({ \
@@ -68,7 +69,7 @@ struct cdv_intel_dp {
 	uint8_t link_bw;
 	uint8_t lane_count;
 	uint8_t dpcd[4];
-	struct psb_intel_encoder *encoder;
+	struct gma_encoder *encoder;
 	struct i2c_adapter adapter;
 	struct i2c_algo_dp_aux_data algo;
 	uint8_t	train_set[4];
@@ -114,18 +115,18 @@ static uint32_t dp_vswing_premph_table[] = {
  * If a CPU or PCH DP output is attached to an eDP panel, this function
  * will return true, and false otherwise.
  */
-static bool is_edp(struct psb_intel_encoder *encoder)
+static bool is_edp(struct gma_encoder *encoder)
 {
 	return encoder->type == INTEL_OUTPUT_EDP;
 }
 
 
-static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
-static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
-static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
 
 static int
-cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	int max_lane_count = 4;
@@ -143,7 +144,7 @@ cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
 }
 
 static int
-cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -180,7 +181,7 @@ cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 	return (max_link_clock * max_lanes * 19) / 20;
 }
 
-static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -200,7 +201,7 @@ static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
 	msleep(intel_dp->panel_power_up_delay);
 }
 
-static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	u32 pp;
@@ -215,7 +216,7 @@ static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
 }
 
 /* Returns true if the panel was already on when called */
-static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -242,7 +243,7 @@ static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
 	return false;
 }
 
-static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	u32 pp, idle_off_mask = PP_ON ;
@@ -274,7 +275,7 @@ static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
 	DRM_DEBUG_KMS("Over\n");
 }
 
-static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	u32 pp;
@@ -294,7 +295,7 @@ static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
 	gma_backlight_enable(dev);
 }
 
-static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -314,7 +315,7 @@ static int
 cdv_intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
-	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct gma_encoder *encoder = gma_attached_encoder(connector);
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
 	int max_lanes = cdv_intel_dp_max_lane_count(encoder);
@@ -370,7 +371,7 @@ unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
 }
 
 static int
-cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
 		uint8_t *send, int send_bytes,
 		uint8_t *recv, int recv_size)
 {
@@ -472,7 +473,7 @@ cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
 
 /* Write data to the aux channel in native mode */
 static int
-cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
 			  uint16_t address, uint8_t *send, int send_bytes)
 {
 	int ret;
@@ -504,7 +505,7 @@ cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
 
 /* Write a single byte to the aux channel in native mode */
 static int
-cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
 			    uint16_t address, uint8_t byte)
 {
 	return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
@@ -512,7 +513,7 @@ cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
 
 /* read bytes from a native aux channel */
 static int
-cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
 			 uint16_t address, uint8_t *recv, int recv_bytes)
 {
 	uint8_t msg[4];
@@ -557,7 +558,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 	struct cdv_intel_dp *intel_dp = container_of(adapter,
 						struct cdv_intel_dp,
 						adapter);
-	struct psb_intel_encoder *encoder = intel_dp->encoder;
+	struct gma_encoder *encoder = intel_dp->encoder;
 	uint16_t address = algo_data->address;
 	uint8_t msg[5];
 	uint8_t reply[2];
@@ -647,7 +648,8 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 }
 
 static int
-cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+cdv_intel_dp_i2c_init(struct gma_connector *connector,
+		      struct gma_encoder *encoder, const char *name)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	int ret;
@@ -698,7 +700,7 @@ cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mo
 		    struct drm_display_mode *adjusted_mode)
 {
 	struct drm_psb_private *dev_priv = encoder->dev->dev_private;
-	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
 	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
 	int lane_count, clock;
 	int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
@@ -792,22 +794,22 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_encoder *encoder;
-	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	int lane_count = 4, bpp = 24;
 	struct cdv_intel_dp_m_n m_n;
-	int pipe = intel_crtc->pipe;
+	int pipe = gma_crtc->pipe;
 
 	/*
 	 * Find the lane count in the intel_encoder private
 	 */
 	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-		struct psb_intel_encoder *intel_encoder;
+		struct gma_encoder *intel_encoder;
 		struct cdv_intel_dp *intel_dp;
 
 		if (encoder->crtc != crtc)
 			continue;
 
-		intel_encoder = to_psb_intel_encoder(encoder);
+		intel_encoder = to_gma_encoder(encoder);
 		intel_dp = intel_encoder->dev_priv;
 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
 			lane_count = intel_dp->lane_count;
@@ -841,9 +843,9 @@ static void
 cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		  struct drm_display_mode *adjusted_mode)
 {
-	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
 	struct drm_crtc *crtc = encoder->crtc;
-	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
 	struct drm_device *dev = encoder->dev;
 
@@ -885,7 +887,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
 	}
 
 	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
-	if (intel_crtc->pipe == 1)
+	if (gma_crtc->pipe == 1)
 		intel_dp->DP |= DP_PIPEB_SELECT;
 
 	REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
@@ -900,7 +902,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
 		else
 			pfit_control = 0;
 
-		pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+		pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
 
 		REG_WRITE(PFIT_CONTROL, pfit_control);
 	}
@@ -908,7 +910,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
 
 
 /* If the sink supports it, try to set the power state appropriately */
-static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	int ret, i;
@@ -940,7 +942,7 @@ static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
 
 static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
 {
-	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
 	int edp = is_edp(intel_encoder);
 
 	if (edp) {
@@ -957,7 +959,7 @@ static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
 
 static void cdv_intel_dp_commit(struct drm_encoder *encoder)
 {
-	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
 	int edp = is_edp(intel_encoder);
 
 	if (edp)
@@ -971,7 +973,7 @@ static void cdv_intel_dp_commit(struct drm_encoder *encoder)
 static void
 cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
 {
-	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
 	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
 	struct drm_device *dev = encoder->dev;
 	uint32_t dp_reg = REG_READ(intel_dp->output_reg);
@@ -1006,7 +1008,7 @@ cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
  * cases where the sink may still be asleep.
  */
 static bool
-cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
 			       uint8_t *recv, int recv_bytes)
 {
 	int ret, i;
@@ -1031,7 +1033,7 @@ cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t a
  * link status information
  */
 static bool
-cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	return cdv_intel_dp_aux_native_read_retry(encoder,
@@ -1105,7 +1107,7 @@ cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
 }
 */
 static void
-cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+cdv_intel_get_adjust_train(struct gma_encoder *encoder)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	uint8_t v = 0;
@@ -1164,7 +1166,7 @@ cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_c
 			 DP_LANE_CHANNEL_EQ_DONE|\
 			 DP_LANE_SYMBOL_LOCKED)
 static bool
-cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	uint8_t lane_align;
@@ -1184,7 +1186,7 @@ cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
 }
 
 static bool
-cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
 			uint32_t dp_reg_value,
 			uint8_t dp_train_pat)
 {
@@ -1211,7 +1213,7 @@ cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
 
 
 static bool
-cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+cdv_intel_dplink_set_level(struct gma_encoder *encoder,
 			uint8_t dp_train_pat)
 {
 	
@@ -1232,7 +1234,7 @@ cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
 }
 
 static void
-cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1298,7 +1300,7 @@ cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal
 
 /* Enable corresponding port and start training pattern 1 */
 static void
-cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1317,7 +1319,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
 	/* Enable output, wait for it to become active */
 	REG_WRITE(intel_dp->output_reg, reg);
 	REG_READ(intel_dp->output_reg);
-	psb_intel_wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 
 	DRM_DEBUG_KMS("Link config\n");
 	/* Write the link configuration data */
@@ -1392,7 +1394,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
 }
 
 static void
-cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1478,7 +1480,7 @@ cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
 }
 
 static void
-cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+cdv_intel_dp_link_down(struct gma_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1502,8 +1504,7 @@ cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
 	REG_READ(intel_dp->output_reg);
 }
 
-static enum drm_connector_status
-cdv_dp_detect(struct psb_intel_encoder *encoder)
+static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
 {
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	enum drm_connector_status status;
@@ -1531,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder)
 static enum drm_connector_status
 cdv_intel_dp_detect(struct drm_connector *connector, bool force)
 {
-	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct gma_encoder *encoder = gma_attached_encoder(connector);
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	enum drm_connector_status status;
 	struct edid *edid = NULL;
@@ -1565,7 +1566,7 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force)
 
 static int cdv_intel_dp_get_modes(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+	struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
 	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
 	struct edid *edid = NULL;
 	int ret = 0;
@@ -1621,7 +1622,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
 static bool
 cdv_intel_dp_detect_audio(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct gma_encoder *encoder = gma_attached_encoder(connector);
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	struct edid *edid;
 	bool has_audio = false;
@@ -1647,7 +1648,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
 		      uint64_t val)
 {
 	struct drm_psb_private *dev_priv = connector->dev->dev_private;
-	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct gma_encoder *encoder = gma_attached_encoder(connector);
 	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
 	int ret;
 
@@ -1700,11 +1701,10 @@ done:
 static void
 cdv_intel_dp_destroy(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
-	struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
 
-	if (is_edp(psb_intel_encoder)) {
+	if (is_edp(gma_encoder)) {
 	/*	cdv_intel_panel_destroy_backlight(connector->dev); */
 		if (intel_dp->panel_fixed_mode) {
 			kfree(intel_dp->panel_fixed_mode);
@@ -1741,7 +1741,7 @@ static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
 static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
 	.get_modes = cdv_intel_dp_get_modes,
 	.mode_valid = cdv_intel_dp_mode_valid,
-	.best_encoder = psb_intel_best_encoder,
+	.best_encoder = gma_best_encoder,
 };
 
 static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
@@ -1800,19 +1800,19 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev)
 void
 cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
 {
-	struct psb_intel_encoder *psb_intel_encoder;
-	struct psb_intel_connector *psb_intel_connector;
+	struct gma_encoder *gma_encoder;
+	struct gma_connector *gma_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct cdv_intel_dp *intel_dp;
 	const char *name = NULL;
 	int type = DRM_MODE_CONNECTOR_DisplayPort;
 
-	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
-	if (!psb_intel_encoder)
+	gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+	if (!gma_encoder)
 		return;
-        psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
-        if (!psb_intel_connector)
+        gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+        if (!gma_connector)
                 goto err_connector;
 	intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
 	if (!intel_dp)
@@ -1821,22 +1821,22 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
 	if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
 		type = DRM_MODE_CONNECTOR_eDP;
 
-	connector = &psb_intel_connector->base;
-	encoder = &psb_intel_encoder->base;
+	connector = &gma_connector->base;
+	encoder = &gma_encoder->base;
 
 	drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
 	drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
 
-	psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+	gma_connector_attach_encoder(gma_connector, gma_encoder);
 
 	if (type == DRM_MODE_CONNECTOR_DisplayPort)
-        	psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+		gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
         else
-		psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+		gma_encoder->type = INTEL_OUTPUT_EDP;
 
 
-	psb_intel_encoder->dev_priv=intel_dp;
-	intel_dp->encoder = psb_intel_encoder;
+	gma_encoder->dev_priv=intel_dp;
+	intel_dp->encoder = gma_encoder;
 	intel_dp->output_reg = output_reg;
 	
 	drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
@@ -1852,21 +1852,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
 	switch (output_reg) {
 		case DP_B:
 			name = "DPDDC-B";
-			psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+			gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
 			break;
 		case DP_C:
 			name = "DPDDC-C";
-			psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+			gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
 			break;
 	}
 
 	cdv_disable_intel_clock_gating(dev);
 
-	cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+	cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
         /* FIXME:fail check */
 	cdv_intel_dp_add_properties(connector);
 
-	if (is_edp(psb_intel_encoder)) {
+	if (is_edp(gma_encoder)) {
 		int ret;
 		struct edp_power_seq cur;
                 u32 pp_on, pp_off, pp_div;
@@ -1920,11 +1920,11 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
                               intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
 
 
-		cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
-		ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+		cdv_intel_edp_panel_vdd_on(gma_encoder);
+		ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
 					       intel_dp->dpcd,
 					       sizeof(intel_dp->dpcd));
-		cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+		cdv_intel_edp_panel_vdd_off(gma_encoder);
 		if (ret == 0) {
 			/* if this fails, presume the device is a ghost */
 			DRM_INFO("failed to retrieve link info, disabling eDP\n");
@@ -1945,7 +1945,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
 	return;
 
 err_priv:
-	kfree(psb_intel_connector);
+	kfree(gma_connector);
 err_connector:
-	kfree(psb_intel_encoder);
+	kfree(gma_encoder);
 }

+ 40 - 49
drivers/gpu/drm/gma500/cdv_intel_hdmi.c

@@ -64,11 +64,11 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
 			struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
-	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+	struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
 	u32 hdmib;
 	struct drm_crtc *crtc = encoder->crtc;
-	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 
 	hdmib = (2 << 10);
 
@@ -77,7 +77,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 		hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
 
-	if (intel_crtc->pipe == 1)
+	if (gma_crtc->pipe == 1)
 		hdmib |= HDMIB_PIPE_B_SELECT;
 
 	if (hdmi_priv->has_hdmi_audio) {
@@ -99,9 +99,8 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
 static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct psb_intel_encoder *psb_intel_encoder =
-						to_psb_intel_encoder(encoder);
-	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+	struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
 	u32 hdmib;
 
 	hdmib = REG_READ(hdmi_priv->hdmi_reg);
@@ -116,9 +115,8 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
 static void cdv_hdmi_save(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
-	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
 
 	hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
 }
@@ -126,9 +124,8 @@ static void cdv_hdmi_save(struct drm_connector *connector)
 static void cdv_hdmi_restore(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
-	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
 
 	REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
 	REG_READ(hdmi_priv->hdmi_reg);
@@ -137,13 +134,12 @@ static void cdv_hdmi_restore(struct drm_connector *connector)
 static enum drm_connector_status cdv_hdmi_detect(
 				struct drm_connector *connector, bool force)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
-	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
 	struct edid *edid = NULL;
 	enum drm_connector_status status = connector_status_disconnected;
 
-	edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+	edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
 
 	hdmi_priv->has_hdmi_sink = false;
 	hdmi_priv->has_hdmi_audio = false;
@@ -167,7 +163,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
 	struct drm_encoder *encoder = connector->encoder;
 
 	if (!strcmp(property->name, "scaling mode") && encoder) {
-		struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+		struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
 		bool centre;
 		uint64_t curValue;
 
@@ -221,12 +217,11 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
  */
 static int cdv_hdmi_get_modes(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 	struct edid *edid = NULL;
 	int ret = 0;
 
-	edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+	edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
@@ -256,11 +251,10 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
 
 static void cdv_hdmi_destroy(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 
-	if (psb_intel_encoder->i2c_bus)
-		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+	if (gma_encoder->i2c_bus)
+		psb_intel_i2c_destroy(gma_encoder->i2c_bus);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
@@ -269,16 +263,16 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
 static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
 	.dpms = cdv_hdmi_dpms,
 	.mode_fixup = cdv_hdmi_mode_fixup,
-	.prepare = psb_intel_encoder_prepare,
+	.prepare = gma_encoder_prepare,
 	.mode_set = cdv_hdmi_mode_set,
-	.commit = psb_intel_encoder_commit,
+	.commit = gma_encoder_commit,
 };
 
 static const struct drm_connector_helper_funcs
 					cdv_hdmi_connector_helper_funcs = {
 	.get_modes = cdv_hdmi_get_modes,
 	.mode_valid = cdv_hdmi_mode_valid,
-	.best_encoder = psb_intel_best_encoder,
+	.best_encoder = gma_best_encoder,
 };
 
 static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
@@ -294,23 +288,22 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
 void cdv_hdmi_init(struct drm_device *dev,
 			struct psb_intel_mode_device *mode_dev, int reg)
 {
-	struct psb_intel_encoder *psb_intel_encoder;
-	struct psb_intel_connector *psb_intel_connector;
+	struct gma_encoder *gma_encoder;
+	struct gma_connector *gma_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct mid_intel_hdmi_priv *hdmi_priv;
 	int ddc_bus;
 
-	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
-				    GFP_KERNEL);
+	gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
 
-	if (!psb_intel_encoder)
+	if (!gma_encoder)
 		return;
 
-	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+	gma_connector = kzalloc(sizeof(struct gma_connector),
 				      GFP_KERNEL);
 
-	if (!psb_intel_connector)
+	if (!gma_connector)
 		goto err_connector;
 
 	hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
@@ -318,9 +311,9 @@ void cdv_hdmi_init(struct drm_device *dev,
 	if (!hdmi_priv)
 		goto err_priv;
 
-	connector = &psb_intel_connector->base;
+	connector = &gma_connector->base;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
-	encoder = &psb_intel_encoder->base;
+	encoder = &gma_encoder->base;
 	drm_connector_init(dev, connector,
 			   &cdv_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_DVID);
@@ -328,12 +321,11 @@ void cdv_hdmi_init(struct drm_device *dev,
 	drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
 			 DRM_MODE_ENCODER_TMDS);
 
-	psb_intel_connector_attach_encoder(psb_intel_connector,
-					   psb_intel_encoder);
-	psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+	gma_connector_attach_encoder(gma_connector, gma_encoder);
+	gma_encoder->type = INTEL_OUTPUT_HDMI;
 	hdmi_priv->hdmi_reg = reg;
 	hdmi_priv->has_hdmi_sink = false;
-	psb_intel_encoder->dev_priv = hdmi_priv;
+	gma_encoder->dev_priv = hdmi_priv;
 
 	drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
 	drm_connector_helper_add(connector,
@@ -349,11 +341,11 @@ void cdv_hdmi_init(struct drm_device *dev,
 	switch (reg) {
 	case SDVOB:
 		ddc_bus = GPIOE;
-		psb_intel_encoder->ddi_select = DDI0_SELECT;
+		gma_encoder->ddi_select = DDI0_SELECT;
 		break;
 	case SDVOC:
 		ddc_bus = GPIOD;
-		psb_intel_encoder->ddi_select = DDI1_SELECT;
+		gma_encoder->ddi_select = DDI1_SELECT;
 		break;
 	default:
 		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
@@ -361,16 +353,15 @@ void cdv_hdmi_init(struct drm_device *dev,
 		break;
 	}
 
-	psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+	gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
 				ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
 
-	if (!psb_intel_encoder->i2c_bus) {
+	if (!gma_encoder->i2c_bus) {
 		dev_err(dev->dev, "No ddc adapter available!\n");
 		goto failed_ddc;
 	}
 
-	hdmi_priv->hdmi_i2c_adapter =
-				&(psb_intel_encoder->i2c_bus->adapter);
+	hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
 	hdmi_priv->dev = dev;
 	drm_sysfs_connector_add(connector);
 	return;
@@ -379,7 +370,7 @@ failed_ddc:
 	drm_encoder_cleanup(encoder);
 	drm_connector_cleanup(connector);
 err_priv:
-	kfree(psb_intel_connector);
+	kfree(gma_connector);
 err_connector:
-	kfree(psb_intel_encoder);
+	kfree(gma_encoder);
 }

+ 33 - 38
drivers/gpu/drm/gma500/cdv_intel_lvds.c

@@ -356,8 +356,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
-							encoder->crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
 	u32 pfit_control;
 
 	/*
@@ -379,7 +378,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
 	else
 		pfit_control = 0;
 
-	pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
+	pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
 
 	if (dev_priv->lvds_dither)
 		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -407,12 +406,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
 	int ret;
 
-	ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+	ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
 
 	if (ret)
 		return ret;
@@ -444,11 +442,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
  */
 static void cdv_intel_lvds_destroy(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 
-	if (psb_intel_encoder->i2c_bus)
-		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+	if (gma_encoder->i2c_bus)
+		psb_intel_i2c_destroy(gma_encoder->i2c_bus);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
@@ -461,8 +458,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
 	struct drm_encoder *encoder = connector->encoder;
 
 	if (!strcmp(property->name, "scaling mode") && encoder) {
-		struct psb_intel_crtc *crtc =
-					to_psb_intel_crtc(encoder->crtc);
+		struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
 		uint64_t curValue;
 
 		if (!crtc)
@@ -529,7 +525,7 @@ static const struct drm_connector_helper_funcs
 				cdv_intel_lvds_connector_helper_funcs = {
 	.get_modes = cdv_intel_lvds_get_modes,
 	.mode_valid = cdv_intel_lvds_mode_valid,
-	.best_encoder = psb_intel_best_encoder,
+	.best_encoder = gma_best_encoder,
 };
 
 static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
@@ -612,8 +608,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
 void cdv_intel_lvds_init(struct drm_device *dev,
 		     struct psb_intel_mode_device *mode_dev)
 {
-	struct psb_intel_encoder *psb_intel_encoder;
-	struct psb_intel_connector *psb_intel_connector;
+	struct gma_encoder *gma_encoder;
+	struct gma_connector *gma_connector;
 	struct cdv_intel_lvds_priv *lvds_priv;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
@@ -630,24 +626,24 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 		return;
 	}
 
-	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+	gma_encoder = kzalloc(sizeof(struct gma_encoder),
 				    GFP_KERNEL);
-	if (!psb_intel_encoder)
+	if (!gma_encoder)
 		return;
 
-	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+	gma_connector = kzalloc(sizeof(struct gma_connector),
 				      GFP_KERNEL);
-	if (!psb_intel_connector)
+	if (!gma_connector)
 		goto failed_connector;
 
 	lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
 	if (!lvds_priv)
 		goto failed_lvds_priv;
 
-	psb_intel_encoder->dev_priv = lvds_priv;
+	gma_encoder->dev_priv = lvds_priv;
 
-	connector = &psb_intel_connector->base;
-	encoder = &psb_intel_encoder->base;
+	connector = &gma_connector->base;
+	encoder = &gma_encoder->base;
 
 
 	drm_connector_init(dev, connector,
@@ -659,9 +655,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 			 DRM_MODE_ENCODER_LVDS);
 
 
-	psb_intel_connector_attach_encoder(psb_intel_connector,
-					   psb_intel_encoder);
-	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+	gma_connector_attach_encoder(gma_connector, gma_encoder);
+	gma_encoder->type = INTEL_OUTPUT_LVDS;
 
 	drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
 	drm_connector_helper_add(connector,
@@ -682,16 +677,16 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 	 * Set up I2C bus
 	 * FIXME: distroy i2c_bus when exit
 	 */
-	psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+	gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
 							 GPIOB,
 							 "LVDSBLC_B");
-	if (!psb_intel_encoder->i2c_bus) {
+	if (!gma_encoder->i2c_bus) {
 		dev_printk(KERN_ERR,
 			&dev->pdev->dev, "I2C bus registration failed.\n");
 		goto failed_blc_i2c;
 	}
-	psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
-	dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+	gma_encoder->i2c_bus->slave_addr = 0x2C;
+	dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
 
 	/*
 	 * LVDS discovery:
@@ -704,10 +699,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 	 */
 
 	/* Set up the DDC bus. */
-	psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+	gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
 							 GPIOC,
 							 "LVDSDDC_C");
-	if (!psb_intel_encoder->ddc_bus) {
+	if (!gma_encoder->ddc_bus) {
 		dev_printk(KERN_ERR, &dev->pdev->dev,
 			   "DDC bus registration " "failed.\n");
 		goto failed_ddc;
@@ -718,7 +713,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
 	 * preferred mode is the right one.
 	 */
 	psb_intel_ddc_get_modes(connector,
-				&psb_intel_encoder->ddc_bus->adapter);
+				&gma_encoder->ddc_bus->adapter);
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
 			mode_dev->panel_fixed_mode =
@@ -782,19 +777,19 @@ out:
 
 failed_find:
 	printk(KERN_ERR "Failed find\n");
-	if (psb_intel_encoder->ddc_bus)
-		psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+	if (gma_encoder->ddc_bus)
+		psb_intel_i2c_destroy(gma_encoder->ddc_bus);
 failed_ddc:
 	printk(KERN_ERR "Failed DDC\n");
-	if (psb_intel_encoder->i2c_bus)
-		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+	if (gma_encoder->i2c_bus)
+		psb_intel_i2c_destroy(gma_encoder->i2c_bus);
 failed_blc_i2c:
 	printk(KERN_ERR "Failed BLC\n");
 	drm_encoder_cleanup(encoder);
 	drm_connector_cleanup(connector);
 	kfree(lvds_priv);
 failed_lvds_priv:
-	kfree(psb_intel_connector);
+	kfree(gma_connector);
 failed_connector:
-	kfree(psb_intel_encoder);
+	kfree(gma_encoder);
 }

+ 14 - 17
drivers/gpu/drm/gma500/framebuffer.c

@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
 	/* Begin by trying to use stolen memory backing */
 	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
 	if (backing) {
-		if (drm_gem_private_object_init(dev,
-					&backing->gem, aligned_size) == 0)
-			return backing;
-		psb_gtt_free_range(dev, backing);
+		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
+		return backing;
 	}
 	return NULL;
 }
@@ -522,21 +520,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create
 static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 							u16 blue, int regno)
 {
-	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 
-	intel_crtc->lut_r[regno] = red >> 8;
-	intel_crtc->lut_g[regno] = green >> 8;
-	intel_crtc->lut_b[regno] = blue >> 8;
+	gma_crtc->lut_r[regno] = red >> 8;
+	gma_crtc->lut_g[regno] = green >> 8;
+	gma_crtc->lut_b[regno] = blue >> 8;
 }
 
 static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
 					u16 *green, u16 *blue, int regno)
 {
-	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 
-	*red = intel_crtc->lut_r[regno] << 8;
-	*green = intel_crtc->lut_g[regno] << 8;
-	*blue = intel_crtc->lut_b[regno] << 8;
+	*red = gma_crtc->lut_r[regno] << 8;
+	*green = gma_crtc->lut_g[regno] << 8;
+	*blue = gma_crtc->lut_b[regno] << 8;
 }
 
 static int psbfb_probe(struct drm_fb_helper *helper,
@@ -705,13 +703,12 @@ static void psb_setup_outputs(struct drm_device *dev)
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list,
 			    head) {
-		struct psb_intel_encoder *psb_intel_encoder =
-			psb_intel_attached_encoder(connector);
-		struct drm_encoder *encoder = &psb_intel_encoder->base;
+		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+		struct drm_encoder *encoder = &gma_encoder->base;
 		int crtc_mask = 0, clone_mask = 0;
 
 		/* valid crtcs */
-		switch (psb_intel_encoder->type) {
+		switch (gma_encoder->type) {
 		case INTEL_OUTPUT_ANALOG:
 			crtc_mask = (1 << 0);
 			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
@@ -746,7 +743,7 @@ static void psb_setup_outputs(struct drm_device *dev)
 		}
 		encoder->possible_crtcs = crtc_mask;
 		encoder->possible_clones =
-		    psb_intel_connector_clones(dev, clone_mask);
+		    gma_connector_clones(dev, clone_mask);
 	}
 }
 

+ 1 - 1
drivers/gpu/drm/gma500/framebuffer.h

@@ -41,7 +41,7 @@ struct psb_fbdev {
 
 #define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
 
-extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+extern int gma_connector_clones(struct drm_device *dev, int type_mask);
 
 #endif
 

+ 10 - 29
drivers/gpu/drm/gma500/gem.c

@@ -26,6 +26,7 @@
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/gma_drm.h>
+#include <drm/drm_vma_manager.h>
 #include "psb_drv.h"
 
 int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
 
 	/* Remove the list map if one is present */
-	if (obj->map_list.map)
-		drm_gem_free_mmap_offset(obj);
+	drm_gem_free_mmap_offset(obj);
 	drm_gem_object_release(obj);
 
 	/* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
 	/* What validation is needed here ? */
 
 	/* Make it mmapable */
-	if (!obj->map_list.map) {
-		ret = drm_gem_create_mmap_offset(obj);
-		if (ret)
-			goto out;
-	}
-	/* GEM should really work out the hash offsets for us */
-	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret)
+		goto out;
+	*offset = drm_vma_node_offset_addr(&obj->vma_node);
 out:
 	drm_gem_object_unreference(obj);
 unlock:
@@ -164,23 +161,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 	return psb_gem_create(file, dev, args->size, &args->handle);
 }
 
-/**
- *	psb_gem_dumb_destroy	-	destroy a dumb buffer
- *	@file: client file
- *	@dev: our DRM device
- *	@handle: the object handle
- *
- *	Destroy a handle that was created via psb_gem_dumb_create, at least
- *	we hope it was created that way. i915 seems to assume the caller
- *	does the checking but that might be worth review ! FIXME
- */
-int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-			uint32_t handle)
-{
-	/* No special work needed, drop the reference and see what falls out */
-	return drm_gem_handle_delete(file, handle);
-}
-
 /**
  *	psb_gem_fault		-	pagefault handler for GEM objects
  *	@vma: the VMA of the GEM object
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
 	struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
 	if (gtt == NULL)
 		return -ENOMEM;
-	if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
-		goto free_gtt;
+
+	drm_gem_private_object_init(dev, &gtt->gem, size);
 	if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
 		return 0;
-free_gtt:
+
+	drm_gem_object_release(&gtt->gem);
 	psb_gtt_free_range(dev, gtt);
 	return -ENOMEM;
 }

+ 776 - 0
drivers/gpu/drm/gma500/gma_display.c

@@ -0,0 +1,776 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#include <drm/drmP.h>
+#include "gma_display.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *l_entry;
+
+	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+			struct gma_encoder *gma_encoder =
+						gma_attached_encoder(l_entry);
+			if (gma_encoder->type == type)
+				return true;
+		}
+	}
+
+	return false;
+}
+
+void gma_wait_for_vblank(struct drm_device *dev)
+{
+	/* Wait for 20ms, i.e. one cycle at 50hz. */
+	mdelay(20);
+}
+
+int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+		      struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+	int pipe = gma_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	unsigned long start, offset;
+	u32 dspcntr;
+	int ret = 0;
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	/* no fb bound */
+	if (!crtc->fb) {
+		dev_err(dev->dev, "No FB bound\n");
+		goto gma_pipe_cleaner;
+	}
+
+	/* We are displaying this buffer, make sure it is actually loaded
+	   into the GTT */
+	ret = psb_gtt_pin(psbfb->gtt);
+	if (ret < 0)
+		goto gma_pipe_set_base_exit;
+	start = psbfb->gtt->offset;
+	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
+
+	dspcntr = REG_READ(map->cntr);
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dspcntr |= DISPPLANE_15_16BPP;
+		else
+			dspcntr |= DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	default:
+		dev_err(dev->dev, "Unknown color depth\n");
+		ret = -EINVAL;
+		goto gma_pipe_set_base_exit;
+	}
+	REG_WRITE(map->cntr, dspcntr);
+
+	dev_dbg(dev->dev,
+		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+	/* FIXME: Investigate whether this really is the base for psb and why
+		  the linear offset is named base for the other chips. map->surf
+		  should be the base and map->linoff the offset for all chips */
+	if (IS_PSB(dev)) {
+		REG_WRITE(map->base, offset + start);
+		REG_READ(map->base);
+	} else {
+		REG_WRITE(map->base, offset);
+		REG_READ(map->base);
+		REG_WRITE(map->surf, start);
+		REG_READ(map->surf);
+	}
+
+gma_pipe_cleaner:
+	/* If there was a previous display we can now unpin it */
+	if (old_fb)
+		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+gma_pipe_set_base_exit:
+	gma_power_end(dev);
+	return ret;
+}
+
+/* Loads the palette/gamma unit for the CRTC with the prepared values */
+void gma_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+	int palreg = map->palette;
+	int i;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->enabled)
+		return;
+
+	if (gma_power_begin(dev, false)) {
+		for (i = 0; i < 256; i++) {
+			REG_WRITE(palreg + 4 * i,
+				  ((gma_crtc->lut_r[i] +
+				  gma_crtc->lut_adj[i]) << 16) |
+				  ((gma_crtc->lut_g[i] +
+				  gma_crtc->lut_adj[i]) << 8) |
+				  (gma_crtc->lut_b[i] +
+				  gma_crtc->lut_adj[i]));
+		}
+		gma_power_end(dev);
+	} else {
+		for (i = 0; i < 256; i++) {
+			/* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
+			dev_priv->regs.pipe[0].palette[i] =
+				  ((gma_crtc->lut_r[i] +
+				  gma_crtc->lut_adj[i]) << 16) |
+				  ((gma_crtc->lut_g[i] +
+				  gma_crtc->lut_adj[i]) << 8) |
+				  (gma_crtc->lut_b[i] +
+				  gma_crtc->lut_adj[i]);
+		}
+
+	}
+}
+
+void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+			u32 start, u32 size)
+{
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	int i;
+	int end = (start + size > 256) ? 256 : start + size;
+
+	for (i = start; i < end; i++) {
+		gma_crtc->lut_r[i] = red[i] >> 8;
+		gma_crtc->lut_g[i] = green[i] >> 8;
+		gma_crtc->lut_b[i] = blue[i] >> 8;
+	}
+
+	gma_crtc_load_lut(crtc);
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	int pipe = gma_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 temp;
+
+	/* XXX: When our outputs are all unaware of DPMS modes other than off
+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+	 */
+
+	if (IS_CDV(dev))
+		dev_priv->ops->disable_sr(dev);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		if (gma_crtc->active)
+			break;
+
+		gma_crtc->active = true;
+
+		/* Enable the DPLL */
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) == 0) {
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+		}
+
+		/* Enable the plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(map->cntr,
+				  temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+		}
+
+		udelay(150);
+
+		/* Enable the pipe */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) == 0)
+			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+		temp = REG_READ(map->status);
+		temp &= ~(0xFFFF);
+		temp |= PIPE_FIFO_UNDERRUN;
+		REG_WRITE(map->status, temp);
+		REG_READ(map->status);
+
+		gma_crtc_load_lut(crtc);
+
+		/* Give the overlay scaler a chance to enable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+		break;
+	case DRM_MODE_DPMS_OFF:
+		if (!gma_crtc->active)
+			break;
+
+		gma_crtc->active = false;
+
+		/* Give the overlay scaler a chance to disable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+		/* Turn off vblank interrupts */
+		drm_vblank_off(dev, pipe);
+
+		/* Wait for vblank for the disable to take effect */
+		gma_wait_for_vblank(dev);
+
+		/* Disable plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(map->cntr,
+				  temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
+		}
+
+		/* Disable pipe */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+			REG_READ(map->conf);
+		}
+
+		/* Wait for vblank for the disable to take effect. */
+		gma_wait_for_vblank(dev);
+
+		udelay(150);
+
+		/* Disable DPLL */
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) != 0) {
+			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+		}
+
+		/* Wait for the clocks to turn off. */
+		udelay(150);
+		break;
+	}
+
+	if (IS_CDV(dev))
+		dev_priv->ops->update_wm(dev, crtc);
+
+	/* Set FIFO watermarks */
+	REG_WRITE(DSPARB, 0x3F3E);
+}
+
+int gma_crtc_cursor_set(struct drm_crtc *crtc,
+			struct drm_file *file_priv,
+			uint32_t handle,
+			uint32_t width, uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	int pipe = gma_crtc->pipe;
+	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+	uint32_t temp;
+	size_t addr = 0;
+	struct gtt_range *gt;
+	struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
+	struct drm_gem_object *obj;
+	void *tmp_dst, *tmp_src;
+	int ret = 0, i, cursor_pages;
+
+	/* If we didn't get a handle then turn the cursor off */
+	if (!handle) {
+		temp = CURSOR_MODE_DISABLE;
+
+		if (gma_power_begin(dev, false)) {
+			REG_WRITE(control, temp);
+			REG_WRITE(base, 0);
+			gma_power_end(dev);
+		}
+
+		/* Unpin the old GEM object */
+		if (gma_crtc->cursor_obj) {
+			gt = container_of(gma_crtc->cursor_obj,
+					  struct gtt_range, gem);
+			psb_gtt_unpin(gt);
+			drm_gem_object_unreference(gma_crtc->cursor_obj);
+			gma_crtc->cursor_obj = NULL;
+		}
+
+		return 0;
+	}
+
+	/* Currently we only support 64x64 cursors */
+	if (width != 64 || height != 64) {
+		dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
+	if (obj->size < width * height * 4) {
+		dev_dbg(dev->dev, "Buffer is too small\n");
+		ret = -ENOMEM;
+		goto unref_cursor;
+	}
+
+	gt = container_of(obj, struct gtt_range, gem);
+
+	/* Pin the memory into the GTT */
+	ret = psb_gtt_pin(gt);
+	if (ret) {
+		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+		goto unref_cursor;
+	}
+
+	if (dev_priv->ops->cursor_needs_phys) {
+		if (cursor_gt == NULL) {
+			dev_err(dev->dev, "No hardware cursor mem available");
+			ret = -ENOMEM;
+			goto unref_cursor;
+		}
+
+		/* Prevent overflow */
+		if (gt->npage > 4)
+			cursor_pages = 4;
+		else
+			cursor_pages = gt->npage;
+
+		/* Copy the cursor to cursor mem */
+		tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+		for (i = 0; i < cursor_pages; i++) {
+			tmp_src = kmap(gt->pages[i]);
+			memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+			kunmap(gt->pages[i]);
+			tmp_dst += PAGE_SIZE;
+		}
+
+		addr = gma_crtc->cursor_addr;
+	} else {
+		addr = gt->offset;
+		gma_crtc->cursor_addr = addr;
+	}
+
+	temp = 0;
+	/* set the pipe for the cursor */
+	temp |= (pipe << 28);
+	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+	if (gma_power_begin(dev, false)) {
+		REG_WRITE(control, temp);
+		REG_WRITE(base, addr);
+		gma_power_end(dev);
+	}
+
+	/* unpin the old bo */
+	if (gma_crtc->cursor_obj) {
+		gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
+		psb_gtt_unpin(gt);
+		drm_gem_object_unreference(gma_crtc->cursor_obj);
+	}
+
+	gma_crtc->cursor_obj = obj;
+	return ret;
+
+unref_cursor:
+	drm_gem_object_unreference(obj);
+	return ret;
+}
+
+int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	int pipe = gma_crtc->pipe;
+	uint32_t temp = 0;
+	uint32_t addr;
+
+	if (x < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+		x = -x;
+	}
+	if (y < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+		y = -y;
+	}
+
+	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+	addr = gma_crtc->cursor_addr;
+
+	if (gma_power_begin(dev, false)) {
+		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+		gma_power_end(dev);
+	}
+	return 0;
+}
+
+bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+			 const struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+void gma_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+void gma_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void gma_crtc_disable(struct drm_crtc *crtc)
+{
+	struct gtt_range *gt;
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+	if (crtc->fb) {
+		gt = to_psb_fb(crtc->fb)->gtt;
+		psb_gtt_unpin(gt);
+	}
+}
+
+void gma_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+	kfree(gma_crtc->crtc_state);
+	drm_crtc_cleanup(crtc);
+	kfree(gma_crtc);
+}
+
+int gma_crtc_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev = set->crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!dev_priv->rpm_enabled)
+		return drm_crtc_helper_set_config(set);
+
+	pm_runtime_forbid(&dev->pdev->dev);
+	ret = drm_crtc_helper_set_config(set);
+	pm_runtime_allow(&dev->pdev->dev);
+
+	return ret;
+}
+
+/**
+ * Save HW states of given crtc
+ */
+void gma_crtc_save(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+	uint32_t palette_reg;
+	int i;
+
+	if (!crtc_state) {
+		dev_err(dev->dev, "No CRTC state found\n");
+		return;
+	}
+
+	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+	crtc_state->savePIPECONF = REG_READ(map->conf);
+	crtc_state->savePIPESRC = REG_READ(map->src);
+	crtc_state->saveFP0 = REG_READ(map->fp0);
+	crtc_state->saveFP1 = REG_READ(map->fp1);
+	crtc_state->saveDPLL = REG_READ(map->dpll);
+	crtc_state->saveHTOTAL = REG_READ(map->htotal);
+	crtc_state->saveHBLANK = REG_READ(map->hblank);
+	crtc_state->saveHSYNC = REG_READ(map->hsync);
+	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+	crtc_state->saveVBLANK = REG_READ(map->vblank);
+	crtc_state->saveVSYNC = REG_READ(map->vsync);
+	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
+
+	/* NOTE: DSPSIZE DSPPOS only for psb */
+	crtc_state->saveDSPSIZE = REG_READ(map->size);
+	crtc_state->saveDSPPOS = REG_READ(map->pos);
+
+	crtc_state->saveDSPBASE = REG_READ(map->base);
+
+	palette_reg = map->palette;
+	for (i = 0; i < 256; ++i)
+		crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
+}
+
+/**
+ * Restore HW states of given crtc
+ */
+void gma_crtc_restore(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct gma_crtc *gma_crtc =  to_gma_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+	uint32_t palette_reg;
+	int i;
+
+	if (!crtc_state) {
+		dev_err(dev->dev, "No crtc state\n");
+		return;
+	}
+
+	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+		REG_WRITE(map->dpll,
+			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
+		udelay(150);
+	}
+
+	REG_WRITE(map->fp0, crtc_state->saveFP0);
+	REG_READ(map->fp0);
+
+	REG_WRITE(map->fp1, crtc_state->saveFP1);
+	REG_READ(map->fp1);
+
+	REG_WRITE(map->dpll, crtc_state->saveDPLL);
+	REG_READ(map->dpll);
+	udelay(150);
+
+	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
+
+	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
+
+	REG_WRITE(map->src, crtc_state->savePIPESRC);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+	REG_WRITE(map->conf, crtc_state->savePIPECONF);
+
+	gma_wait_for_vblank(dev);
+
+	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+
+	gma_wait_for_vblank(dev);
+
+	palette_reg = map->palette;
+	for (i = 0; i < 256; ++i)
+		REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
+}
+
+void gma_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+	    encoder->helper_private;
+	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void gma_encoder_commit(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+	    encoder->helper_private;
+	/* lvds has its own version of commit see psb_intel_lvds_commit */
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void gma_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
+}
+
+/* Currently there is only a 1:1 mapping of encoders and connectors */
+struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
+{
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+	return &gma_encoder->base;
+}
+
+void gma_connector_attach_encoder(struct gma_connector *connector,
+				  struct gma_encoder *encoder)
+{
+	connector->encoder = encoder;
+	drm_mode_connector_attach_encoder(&connector->base,
+					  &encoder->base);
+}
+
+#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
+
+bool gma_pll_is_valid(struct drm_crtc *crtc,
+		      const struct gma_limit_t *limit,
+		      struct gma_clock_t *clock)
+{
+	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+		GMA_PLL_INVALID("p1 out of range");
+	if (clock->p < limit->p.min || limit->p.max < clock->p)
+		GMA_PLL_INVALID("p out of range");
+	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+		GMA_PLL_INVALID("m2 out of range");
+	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+		GMA_PLL_INVALID("m1 out of range");
+	/* On CDV m1 is always 0 */
+	if (clock->m1 <= clock->m2 && clock->m1 != 0)
+		GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
+	if (clock->m < limit->m.min || limit->m.max < clock->m)
+		GMA_PLL_INVALID("m out of range");
+	if (clock->n < limit->n.min || limit->n.max < clock->n)
+		GMA_PLL_INVALID("n out of range");
+	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+		GMA_PLL_INVALID("vco out of range");
+	/* XXX: We may need to be checking "Dot clock"
+	 * depending on the multiplier, connector, etc.,
+	 * rather than just a single range.
+	 */
+	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+		GMA_PLL_INVALID("dot out of range");
+
+	return true;
+}
+
+bool gma_find_best_pll(const struct gma_limit_t *limit,
+		       struct drm_crtc *crtc, int target, int refclk,
+		       struct gma_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	const struct gma_clock_funcs *clock_funcs =
+						to_gma_crtc(crtc)->clock_funcs;
+	struct gma_clock_t clock;
+	int err = target;
+
+	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+		/*
+		 * For LVDS, if the panel is on, just rely on its current
+		 * settings for dual-channel.  We haven't figured out how to
+		 * reliably set up different single/dual channel state, if we
+		 * even can.
+		 */
+		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+		    LVDS_CLKB_POWER_UP)
+			clock.p2 = limit->p2.p2_fast;
+		else
+			clock.p2 = limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			clock.p2 = limit->p2.p2_slow;
+		else
+			clock.p2 = limit->p2.p2_fast;
+	}
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	/* m1 is always 0 on CDV so the outmost loop will run just once */
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     (clock.m2 < clock.m1 || clock.m1 == 0) &&
+		      clock.m2 <= limit->m2.max; clock.m2++) {
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+				     clock.p1 <= limit->p1.max;
+				     clock.p1++) {
+					int this_err;
+
+					clock_funcs->clock(refclk, &clock);
+
+					if (!clock_funcs->pll_is_valid(crtc,
+								limit, &clock))
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err) {
+						*best_clock = clock;
+						err = this_err;
+					}
+				}
+			}
+		}
+	}
+
+	return err != target;
+}

+ 103 - 0
drivers/gpu/drm/gma500/gma_display.h

@@ -0,0 +1,103 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#ifndef _GMA_DISPLAY_H_
+#define _GMA_DISPLAY_H_
+
+#include <linux/pm_runtime.h>
+
+struct gma_clock_t {
+	/* given values */
+	int n;
+	int m1, m2;
+	int p1, p2;
+	/* derived values */
+	int dot;
+	int vco;
+	int m;
+	int p;
+};
+
+struct gma_range_t {
+	int min, max;
+};
+
+struct gma_p2_t {
+	int dot_limit;
+	int p2_slow, p2_fast;
+};
+
+struct gma_limit_t {
+	struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
+	struct gma_p2_t p2;
+	bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *,
+			 int target, int refclk,
+			 struct gma_clock_t *best_clock);
+};
+
+struct gma_clock_funcs {
+	void (*clock)(int refclk, struct gma_clock_t *clock);
+	const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk);
+	bool (*pll_is_valid)(struct drm_crtc *crtc,
+			     const struct gma_limit_t *limit,
+			     struct gma_clock_t *clock);
+};
+
+/* Common pipe related functions */
+extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
+extern void gma_wait_for_vblank(struct drm_device *dev);
+extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+			     struct drm_framebuffer *old_fb);
+extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
+			       struct drm_file *file_priv,
+			       uint32_t handle,
+			       uint32_t width, uint32_t height);
+extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+extern void gma_crtc_load_lut(struct drm_crtc *crtc);
+extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+			       u16 *blue, u32 start, u32 size);
+extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
+extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode);
+extern void gma_crtc_prepare(struct drm_crtc *crtc);
+extern void gma_crtc_commit(struct drm_crtc *crtc);
+extern void gma_crtc_disable(struct drm_crtc *crtc);
+extern void gma_crtc_destroy(struct drm_crtc *crtc);
+extern int gma_crtc_set_config(struct drm_mode_set *set);
+
+extern void gma_crtc_save(struct drm_crtc *crtc);
+extern void gma_crtc_restore(struct drm_crtc *crtc);
+
+extern void gma_encoder_prepare(struct drm_encoder *encoder);
+extern void gma_encoder_commit(struct drm_encoder *encoder);
+extern void gma_encoder_destroy(struct drm_encoder *encoder);
+
+/* Common clock related functions */
+extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
+extern void gma_clock(int refclk, struct gma_clock_t *clock);
+extern bool gma_pll_is_valid(struct drm_crtc *crtc,
+			     const struct gma_limit_t *limit,
+			     struct gma_clock_t *clock);
+extern bool gma_find_best_pll(const struct gma_limit_t *limit,
+			      struct drm_crtc *crtc, int target, int refclk,
+			      struct gma_clock_t *best_clock);
+#endif

+ 6 - 32
drivers/gpu/drm/gma500/gtt.c

@@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
  */
 static int psb_gtt_attach_pages(struct gtt_range *gt)
 {
-	struct inode *inode;
-	struct address_space *mapping;
-	int i;
-	struct page *p;
-	int pages = gt->gem.size / PAGE_SIZE;
+	struct page **pages;
 
 	WARN_ON(gt->pages);
 
-	/* This is the shared memory object that backs the GEM resource */
-	inode = file_inode(gt->gem.filp);
-	mapping = inode->i_mapping;
+	pages = drm_gem_get_pages(&gt->gem, 0);
+	if (IS_ERR(pages))
+		return PTR_ERR(pages);
 
-	gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
-	if (gt->pages == NULL)
-		return -ENOMEM;
-	gt->npage = pages;
+	gt->pages = pages;
 
-	for (i = 0; i < pages; i++) {
-		p = shmem_read_mapping_page(mapping, i);
-		if (IS_ERR(p))
-			goto err;
-		gt->pages[i] = p;
-	}
 	return 0;
-
-err:
-	while (i--)
-		page_cache_release(gt->pages[i]);
-	kfree(gt->pages);
-	gt->pages = NULL;
-	return PTR_ERR(p);
 }
 
 /**
@@ -240,13 +220,7 @@ err:
  */
 static void psb_gtt_detach_pages(struct gtt_range *gt)
 {
-	int i;
-	for (i = 0; i < gt->npage; i++) {
-		/* FIXME: do we need to force dirty */
-		set_page_dirty(gt->pages[i]);
-		page_cache_release(gt->pages[i]);
-	}
-	kfree(gt->pages);
+	drm_gem_put_pages(&gt->gem, gt->pages, true, false);
 	gt->pages = NULL;
 }
 

+ 7 - 8
drivers/gpu/drm/gma500/mdfld_dsi_output.c

@@ -249,12 +249,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
 	struct drm_encoder *encoder = connector->encoder;
 
 	if (!strcmp(property->name, "scaling mode") && encoder) {
-		struct psb_intel_crtc *psb_crtc =
-					to_psb_intel_crtc(encoder->crtc);
+		struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
 		bool centerechange;
 		uint64_t val;
 
-		if (!psb_crtc)
+		if (!gma_crtc)
 			goto set_prop_error;
 
 		switch (value) {
@@ -281,11 +280,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
 		centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
 			(value == DRM_MODE_SCALE_NO_SCALE);
 
-		if (psb_crtc->saved_mode.hdisplay != 0 &&
-		    psb_crtc->saved_mode.vdisplay != 0) {
+		if (gma_crtc->saved_mode.hdisplay != 0 &&
+		    gma_crtc->saved_mode.vdisplay != 0) {
 			if (centerechange) {
 				if (!drm_crtc_helper_set_mode(encoder->crtc,
-						&psb_crtc->saved_mode,
+						&gma_crtc->saved_mode,
 						encoder->crtc->x,
 						encoder->crtc->y,
 						encoder->crtc->fb))
@@ -294,8 +293,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
 				struct drm_encoder_helper_funcs *funcs =
 						encoder->helper_private;
 				funcs->mode_set(encoder,
-					&psb_crtc->saved_mode,
-					&psb_crtc->saved_adjusted_mode);
+					&gma_crtc->saved_mode,
+					&gma_crtc->saved_adjusted_mode);
 			}
 		}
 	} else if (!strcmp(property->name, "backlight") && encoder) {

+ 8 - 8
drivers/gpu/drm/gma500/mdfld_dsi_output.h

@@ -227,7 +227,7 @@ enum {
 #define DSI_DPI_DISABLE_BTA					BIT(3)
 
 struct mdfld_dsi_connector {
-	struct psb_intel_connector base;
+	struct gma_connector base;
 
 	int pipe;
 	void *private;
@@ -238,7 +238,7 @@ struct mdfld_dsi_connector {
 };
 
 struct mdfld_dsi_encoder {
-	struct psb_intel_encoder base;
+	struct gma_encoder base;
 	void *private;
 };
 
@@ -269,21 +269,21 @@ struct mdfld_dsi_config {
 static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
 		struct drm_connector *connector)
 {
-	struct psb_intel_connector *psb_connector;
+	struct gma_connector *gma_connector;
 
-	psb_connector = to_psb_intel_connector(connector);
+	gma_connector = to_gma_connector(connector);
 
-	return container_of(psb_connector, struct mdfld_dsi_connector, base);
+	return container_of(gma_connector, struct mdfld_dsi_connector, base);
 }
 
 static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
 		struct drm_encoder *encoder)
 {
-	struct psb_intel_encoder *psb_encoder;
+	struct gma_encoder *gma_encoder;
 
-	psb_encoder = to_psb_intel_encoder(encoder);
+	gma_encoder = to_gma_encoder(encoder);
 
-	return container_of(psb_encoder, struct mdfld_dsi_encoder, base);
+	return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
 }
 
 static inline struct mdfld_dsi_config *

+ 23 - 42
drivers/gpu/drm/gma500/mdfld_intel_display.c

@@ -23,7 +23,7 @@
 
 #include <drm/drmP.h>
 #include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
 #include "framebuffer.h"
 #include "mdfld_output.h"
 #include "mdfld_dsi_output.h"
@@ -65,7 +65,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
 	}
 
 	/* FIXME JLIU7_PO */
-	psb_intel_wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 	return;
 
 	/* Wait for for the pipe disable to take effect. */
@@ -93,7 +93,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
 	}
 
 	/* FIXME JLIU7_PO */
-	psb_intel_wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 	return;
 
 	/* Wait for for the pipe enable to take effect. */
@@ -104,25 +104,6 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
 	}
 }
 
-static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
-{
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void psb_intel_crtc_commit(struct drm_crtc *crtc)
-{
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 /**
  * Return the pipe currently connected to the panel fitter,
  * or -1 if the panel fitter is not present or not in use
@@ -184,9 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
-	int pipe = psb_intel_crtc->pipe;
+	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	unsigned long start, offset;
 	u32 dspcntr;
@@ -324,8 +305,8 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
-	int pipe = psb_intel_crtc->pipe;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 pipeconf = dev_priv->pipeconf[pipe];
 	u32 temp;
@@ -436,7 +417,7 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
 			}
 		}
 
-		psb_intel_crtc_load_lut(crtc);
+		gma_crtc_load_lut(crtc);
 
 		/* Give the overlay scaler a chance to enable
 		   if it's on this pipe */
@@ -611,8 +592,8 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
-	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
-	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+	    || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
 		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
 			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
 		else if (ksel == KSEL_BYPASS_25)
@@ -624,7 +605,7 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
 			 (dev_priv->core_freq == 100 ||
 				dev_priv->core_freq == 200))
 			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
-	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+	} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
 		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
 			limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
 		else if (ksel == KSEL_BYPASS_25)
@@ -688,9 +669,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 			      struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	int pipe = psb_intel_crtc->pipe;
+	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk = 0;
 	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
@@ -700,7 +681,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 	u32 dpll = 0, fp = 0;
 	bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct psb_intel_encoder *psb_intel_encoder = NULL;
+	struct gma_encoder *gma_encoder = NULL;
 	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
@@ -749,9 +730,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 	if (!gma_power_begin(dev, true))
 		return 0;
 
-	memcpy(&psb_intel_crtc->saved_mode, mode,
+	memcpy(&gma_crtc->saved_mode, mode,
 					sizeof(struct drm_display_mode));
-	memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+	memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
 					sizeof(struct drm_display_mode));
 
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
@@ -766,9 +747,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 		if (encoder->crtc != crtc)
 			continue;
 
-		psb_intel_encoder = psb_intel_attached_encoder(connector);
+		gma_encoder = gma_attached_encoder(connector);
 
-		switch (psb_intel_encoder->type) {
+		switch (gma_encoder->type) {
 		case INTEL_OUTPUT_MIPI:
 			is_mipi = true;
 			break;
@@ -819,7 +800,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 
 	REG_WRITE(map->pos, 0);
 
-	if (psb_intel_encoder)
+	if (gma_encoder)
 		drm_object_property_get_value(&connector->base,
 			dev->mode_config.scaling_mode_property, &scalingType);
 
@@ -1034,7 +1015,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 
 	/* Wait for for the pipe enable to take effect. */
 	REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
-	psb_intel_wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 
 mrst_crtc_mode_set_exit:
 
@@ -1045,10 +1026,10 @@ mrst_crtc_mode_set_exit:
 
 const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
 	.dpms = mdfld_crtc_dpms,
-	.mode_fixup = psb_intel_crtc_mode_fixup,
+	.mode_fixup = gma_crtc_mode_fixup,
 	.mode_set = mdfld_crtc_mode_set,
 	.mode_set_base = mdfld__intel_pipe_set_base,
-	.prepare = psb_intel_crtc_prepare,
-	.commit = psb_intel_crtc_commit,
+	.prepare = gma_crtc_prepare,
+	.commit = gma_crtc_commit,
 };
 

+ 22 - 41
drivers/gpu/drm/gma500/oaktrail_crtc.c

@@ -23,7 +23,7 @@
 #include "psb_drv.h"
 #include "psb_intel_drv.h"
 #include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
 #include "power.h"
 
 struct psb_intel_range_t {
@@ -88,8 +88,8 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
-	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
-	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+	    || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
 		switch (dev_priv->core_freq) {
 		case 100:
 			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
@@ -163,8 +163,8 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
-	int pipe = psb_intel_crtc->pipe;
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
 
@@ -212,7 +212,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 			REG_WRITE(map->base, REG_READ(map->base));
 		}
 
-		psb_intel_crtc_load_lut(crtc);
+		gma_crtc_load_lut(crtc);
 
 		/* Give the overlay scaler a chance to enable
 		   if it's on this pipe */
@@ -242,7 +242,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 			REG_READ(map->conf);
 		}
 		/* Wait for for the pipe disable to take effect. */
-		psb_intel_wait_for_vblank(dev);
+		gma_wait_for_vblank(dev);
 
 		temp = REG_READ(map->dpll);
 		if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -292,9 +292,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 			      struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	int pipe = psb_intel_crtc->pipe;
+	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk = 0;
 	struct oaktrail_clock_t clock;
@@ -303,7 +303,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	bool is_lvds = false;
 	bool is_mipi = false;
 	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct psb_intel_encoder *psb_intel_encoder = NULL;
+	struct gma_encoder *gma_encoder = NULL;
 	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
 	struct drm_connector *connector;
 
@@ -313,10 +313,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	if (!gma_power_begin(dev, true))
 		return 0;
 
-	memcpy(&psb_intel_crtc->saved_mode,
+	memcpy(&gma_crtc->saved_mode,
 		mode,
 		sizeof(struct drm_display_mode));
-	memcpy(&psb_intel_crtc->saved_adjusted_mode,
+	memcpy(&gma_crtc->saved_adjusted_mode,
 		adjusted_mode,
 		sizeof(struct drm_display_mode));
 
@@ -324,9 +324,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 		if (!connector->encoder || connector->encoder->crtc != crtc)
 			continue;
 
-		psb_intel_encoder = psb_intel_attached_encoder(connector);
+		gma_encoder = gma_attached_encoder(connector);
 
-		switch (psb_intel_encoder->type) {
+		switch (gma_encoder->type) {
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
 			break;
@@ -350,7 +350,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 		  ((mode->crtc_hdisplay - 1) << 16) |
 		  (mode->crtc_vdisplay - 1));
 
-	if (psb_intel_encoder)
+	if (gma_encoder)
 		drm_object_property_get_value(&connector->base,
 			dev->mode_config.scaling_mode_property, &scalingType);
 
@@ -484,31 +484,24 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 
 	REG_WRITE(map->conf, pipeconf);
 	REG_READ(map->conf);
-	psb_intel_wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 
 	REG_WRITE(map->cntr, dspcntr);
-	psb_intel_wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 
 oaktrail_crtc_mode_set_exit:
 	gma_power_end(dev);
 	return 0;
 }
 
-static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
 static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
 			    int x, int y, struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
-	int pipe = psb_intel_crtc->pipe;
+	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	unsigned long start, offset;
 
@@ -563,24 +556,12 @@ pipe_set_base_exit:
 	return ret;
 }
 
-static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
-{
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void oaktrail_crtc_commit(struct drm_crtc *crtc)
-{
-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
 const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
 	.dpms = oaktrail_crtc_dpms,
-	.mode_fixup = oaktrail_crtc_mode_fixup,
+	.mode_fixup = gma_crtc_mode_fixup,
 	.mode_set = oaktrail_crtc_mode_set,
 	.mode_set_base = oaktrail_pipe_set_base,
-	.prepare = oaktrail_crtc_prepare,
-	.commit = oaktrail_crtc_commit,
+	.prepare = gma_crtc_prepare,
+	.commit = gma_crtc_commit,
 };
 

+ 18 - 25
drivers/gpu/drm/gma500/oaktrail_hdmi.c

@@ -155,12 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
 	HDMI_READ(HDMI_HCR);
 }
 
-static void wait_for_vblank(struct drm_device *dev)
-{
-	/* Wait for 20ms, i.e. one cycle at 50hz. */
-	mdelay(20);
-}
-
 static unsigned int htotal_calculate(struct drm_display_mode *mode)
 {
 	u32 htotal, new_crtc_htotal;
@@ -372,10 +366,10 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
 
 	REG_WRITE(PCH_PIPEBCONF, pipeconf);
 	REG_READ(PCH_PIPEBCONF);
-	wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 
 	REG_WRITE(dspcntr_reg, dspcntr);
-	wait_for_vblank(dev);
+	gma_wait_for_vblank(dev);
 
 	gma_power_end(dev);
 
@@ -459,7 +453,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
 			REG_READ(PCH_PIPEBCONF);
 		}
 
-		wait_for_vblank(dev);
+		gma_wait_for_vblank(dev);
 
 		/* Enable plane */
 		temp = REG_READ(DSPBCNTR);
@@ -470,7 +464,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
 			REG_READ(DSPBSURF);
 		}
 
-		psb_intel_crtc_load_lut(crtc);
+		gma_crtc_load_lut(crtc);
 	}
 
 	/* DSPARB */
@@ -615,16 +609,16 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
 static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
 	.dpms = oaktrail_hdmi_dpms,
 	.mode_fixup = oaktrail_hdmi_mode_fixup,
-	.prepare = psb_intel_encoder_prepare,
+	.prepare = gma_encoder_prepare,
 	.mode_set = oaktrail_hdmi_mode_set,
-	.commit = psb_intel_encoder_commit,
+	.commit = gma_encoder_commit,
 };
 
 static const struct drm_connector_helper_funcs
 					oaktrail_hdmi_connector_helper_funcs = {
 	.get_modes = oaktrail_hdmi_get_modes,
 	.mode_valid = oaktrail_hdmi_mode_valid,
-	.best_encoder = psb_intel_best_encoder,
+	.best_encoder = gma_best_encoder,
 };
 
 static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
@@ -646,21 +640,21 @@ static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
 void oaktrail_hdmi_init(struct drm_device *dev,
 					struct psb_intel_mode_device *mode_dev)
 {
-	struct psb_intel_encoder *psb_intel_encoder;
-	struct psb_intel_connector *psb_intel_connector;
+	struct gma_encoder *gma_encoder;
+	struct gma_connector *gma_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 
-	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
-	if (!psb_intel_encoder)
+	gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+	if (!gma_encoder)
 		return;
 
-	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
-	if (!psb_intel_connector)
+	gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+	if (!gma_connector)
 		goto failed_connector;
 
-	connector = &psb_intel_connector->base;
-	encoder = &psb_intel_encoder->base;
+	connector = &gma_connector->base;
+	encoder = &gma_encoder->base;
 	drm_connector_init(dev, connector,
 			   &oaktrail_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_DVID);
@@ -669,10 +663,9 @@ void oaktrail_hdmi_init(struct drm_device *dev,
 			 &oaktrail_hdmi_enc_funcs,
 			 DRM_MODE_ENCODER_TMDS);
 
-	psb_intel_connector_attach_encoder(psb_intel_connector,
-					   psb_intel_encoder);
+	gma_connector_attach_encoder(gma_connector, gma_encoder);
 
-	psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+	gma_encoder->type = INTEL_OUTPUT_HDMI;
 	drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
 	drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
 
@@ -685,7 +678,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
 	return;
 
 failed_connector:
-	kfree(psb_intel_encoder);
+	kfree(gma_encoder);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {

+ 22 - 26
drivers/gpu/drm/gma500/oaktrail_lvds.c

@@ -43,7 +43,7 @@
  * Sets the power state for the panel.
  */
 static void oaktrail_lvds_set_power(struct drm_device *dev,
-				struct psb_intel_encoder *psb_intel_encoder,
+				struct gma_encoder *gma_encoder,
 				bool on)
 {
 	u32 pp_status;
@@ -78,13 +78,12 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
 static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct psb_intel_encoder *psb_intel_encoder =
-						to_psb_intel_encoder(encoder);
+	struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
 
 	if (mode == DRM_MODE_DPMS_ON)
-		oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+		oaktrail_lvds_set_power(dev, gma_encoder, true);
 	else
-		oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+		oaktrail_lvds_set_power(dev, gma_encoder, false);
 
 	/* XXX: We never power down the LVDS pairs. */
 }
@@ -166,8 +165,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_encoder *psb_intel_encoder =
-						to_psb_intel_encoder(encoder);
+	struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
 
 	if (!gma_power_begin(dev, true))
@@ -176,7 +174,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
 	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
 	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
 					  BACKLIGHT_DUTY_CYCLE_MASK);
-	oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+	oaktrail_lvds_set_power(dev, gma_encoder, false);
 	gma_power_end(dev);
 }
 
@@ -203,14 +201,13 @@ static void oaktrail_lvds_commit(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_encoder *psb_intel_encoder =
-						to_psb_intel_encoder(encoder);
+	struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
 
 	if (mode_dev->backlight_duty_cycle == 0)
 		mode_dev->backlight_duty_cycle =
 					oaktrail_lvds_get_max_backlight(dev);
-	oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+	oaktrail_lvds_set_power(dev, gma_encoder, true);
 }
 
 static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
@@ -325,8 +322,8 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
 void oaktrail_lvds_init(struct drm_device *dev,
 		    struct psb_intel_mode_device *mode_dev)
 {
-	struct psb_intel_encoder *psb_intel_encoder;
-	struct psb_intel_connector *psb_intel_connector;
+	struct gma_encoder *gma_encoder;
+	struct gma_connector *gma_connector;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
 	struct drm_psb_private *dev_priv = dev->dev_private;
@@ -334,16 +331,16 @@ void oaktrail_lvds_init(struct drm_device *dev,
 	struct i2c_adapter *i2c_adap;
 	struct drm_display_mode *scan;	/* *modes, *bios_mode; */
 
-	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
-	if (!psb_intel_encoder)
+	gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+	if (!gma_encoder)
 		return;
 
-	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
-	if (!psb_intel_connector)
+	gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+	if (!gma_connector)
 		goto failed_connector;
 
-	connector = &psb_intel_connector->base;
-	encoder = &psb_intel_encoder->base;
+	connector = &gma_connector->base;
+	encoder = &gma_encoder->base;
 	dev_priv->is_lvds_on = true;
 	drm_connector_init(dev, connector,
 			   &psb_intel_lvds_connector_funcs,
@@ -352,9 +349,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
 	drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
 			 DRM_MODE_ENCODER_LVDS);
 
-	psb_intel_connector_attach_encoder(psb_intel_connector,
-					   psb_intel_encoder);
-	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+	gma_connector_attach_encoder(gma_connector, gma_encoder);
+	gma_encoder->type = INTEL_OUTPUT_LVDS;
 
 	drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
 	drm_connector_helper_add(connector,
@@ -434,15 +430,15 @@ out:
 
 failed_find:
 	dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
-	if (psb_intel_encoder->ddc_bus)
-		psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+	if (gma_encoder->ddc_bus)
+		psb_intel_i2c_destroy(gma_encoder->ddc_bus);
 
 /* failed_ddc: */
 
 	drm_encoder_cleanup(encoder);
 	drm_connector_cleanup(connector);
-	kfree(psb_intel_connector);
+	kfree(gma_connector);
 failed_connector:
-	kfree(psb_intel_encoder);
+	kfree(gma_encoder);
 }
 

+ 2 - 1
drivers/gpu/drm/gma500/psb_device.c

@@ -25,7 +25,7 @@
 #include "psb_reg.h"
 #include "psb_intel_reg.h"
 #include "intel_bios.h"
-
+#include "psb_device.h"
 
 static int psb_output_init(struct drm_device *dev)
 {
@@ -380,6 +380,7 @@ const struct psb_ops psb_chip_ops = {
 
 	.crtc_helper = &psb_intel_helper_funcs,
 	.crtc_funcs = &psb_intel_crtc_funcs,
+	.clock_funcs = &psb_clock_funcs,
 
 	.output_init = psb_output_init,
 

+ 6 - 7
drivers/gpu/drm/gma500/psb_intel_display.h → drivers/gpu/drm/gma500/psb_device.h

@@ -1,4 +1,6 @@
-/* copyright (c) 2008, Intel Corporation
+/*
+ * Copyright © 2013 Patrik Jakobsson
+ * Copyright © 2011 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,14 +14,11 @@
  * You should have received a copy of the GNU General Public License along with
  * this program; if not, write to the Free Software Foundation, Inc.,
  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
  */
 
-#ifndef _INTEL_DISPLAY_H_
-#define _INTEL_DISPLAY_H_
+#ifndef _PSB_DEVICE_H_
+#define _PSB_DEVICE_H_
 
-bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+extern const struct gma_clock_funcs psb_clock_funcs;
 
 #endif

+ 10 - 11
drivers/gpu/drm/gma500/psb_drv.c

@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
 static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 
-static struct drm_ioctl_desc psb_ioctls[] = {
+static const struct drm_ioctl_desc psb_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
 		      DRM_AUTH),
@@ -270,7 +270,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 	unsigned long irqflags;
 	int ret = -ENOMEM;
 	struct drm_connector *connector;
-	struct psb_intel_encoder *psb_intel_encoder;
+	struct gma_encoder *gma_encoder;
 
 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (dev_priv == NULL)
@@ -372,9 +372,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 	/* Only add backlight support if we have LVDS output */
 	list_for_each_entry(connector, &dev->mode_config.connector_list,
 			    head) {
-		psb_intel_encoder = psb_intel_attached_encoder(connector);
+		gma_encoder = gma_attached_encoder(connector);
 
-		switch (psb_intel_encoder->type) {
+		switch (gma_encoder->type) {
 		case INTEL_OUTPUT_LVDS:
 		case INTEL_OUTPUT_MIPI:
 			ret = gma_backlight_init(dev);
@@ -441,7 +441,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
 	struct drm_mode_object *obj;
 	struct drm_crtc *crtc;
 	struct drm_connector *connector;
-	struct psb_intel_crtc *psb_intel_crtc;
+	struct gma_crtc *gma_crtc;
 	int i = 0;
 	int32_t obj_id;
 
@@ -454,12 +454,12 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
 
 	connector = obj_to_connector(obj);
 	crtc = connector->encoder->crtc;
-	psb_intel_crtc = to_psb_intel_crtc(crtc);
+	gma_crtc = to_gma_crtc(crtc);
 
 	for (i = 0; i < 256; i++)
-		psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+		gma_crtc->lut_adj[i] = lut_arg->lut[i];
 
-	psb_intel_crtc_load_lut(crtc);
+	gma_crtc_load_lut(crtc);
 
 	return 0;
 }
@@ -622,13 +622,12 @@ static const struct file_operations psb_gem_fops = {
 	.unlocked_ioctl = psb_unlocked_ioctl,
 	.mmap = drm_gem_mmap,
 	.poll = drm_poll,
-	.fasync = drm_fasync,
 	.read = drm_read,
 };
 
 static struct drm_driver driver = {
 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
-			   DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+			   DRIVER_MODESET | DRIVER_GEM ,
 	.load = psb_driver_load,
 	.unload = psb_driver_unload,
 
@@ -652,7 +651,7 @@ static struct drm_driver driver = {
 	.gem_vm_ops = &psb_gem_vm_ops,
 	.dumb_create = psb_gem_dumb_create,
 	.dumb_map_offset = psb_gem_dumb_map_gtt,
-	.dumb_destroy = psb_gem_dumb_destroy,
+	.dumb_destroy = drm_gem_dumb_destroy,
 	.fops = &psb_gem_fops,
 	.name = DRIVER_NAME,
 	.desc = DRIVER_DESC,

+ 5 - 2
drivers/gpu/drm/gma500/psb_drv.h

@@ -27,6 +27,7 @@
 #include <drm/gma_drm.h>
 #include "psb_reg.h"
 #include "psb_intel_drv.h"
+#include "gma_display.h"
 #include "intel_bios.h"
 #include "gtt.h"
 #include "power.h"
@@ -46,6 +47,7 @@ enum {
 #define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
 #define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
 #define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
 
 /*
  * Driver definitions
@@ -675,6 +677,7 @@ struct psb_ops {
 	/* Sub functions */
 	struct drm_crtc_helper_funcs const *crtc_helper;
 	struct drm_crtc_funcs const *crtc_funcs;
+	const struct gma_clock_funcs *clock_funcs;
 
 	/* Setup hooks */
 	int (*chip_setup)(struct drm_device *dev);
@@ -692,6 +695,8 @@ struct psb_ops {
 	int (*restore_regs)(struct drm_device *dev);
 	int (*power_up)(struct drm_device *dev);
 	int (*power_down)(struct drm_device *dev);
+	void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
+	void (*disable_sr)(struct drm_device *dev);
 
 	void (*lvds_bl_power)(struct drm_device *dev, bool on);
 #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -838,8 +843,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
 			struct drm_file *file);
 extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 			struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-			uint32_t handle);
 extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
 			uint32_t handle, uint64_t *offset);
 extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);

File diff suppressed because it is too large
+ 86 - 824
drivers/gpu/drm/gma500/psb_intel_display.c


+ 19 - 25
drivers/gpu/drm/gma500/psb_intel_drv.h

@@ -24,6 +24,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <linux/gpio.h>
+#include "gma_display.h"
 
 /*
  * Display related stuff
@@ -116,11 +117,11 @@ struct psb_intel_i2c_chan {
 	u8 slave_addr;
 };
 
-struct psb_intel_encoder {
+struct gma_encoder {
 	struct drm_encoder base;
 	int type;
 	bool needs_tv_clock;
-	void (*hot_plug)(struct psb_intel_encoder *);
+	void (*hot_plug)(struct gma_encoder *);
 	int crtc_mask;
 	int clone_mask;
 	u32 ddi_select;	/* Channel info */
@@ -136,9 +137,9 @@ struct psb_intel_encoder {
 	struct psb_intel_i2c_chan *ddc_bus;
 };
 
-struct psb_intel_connector {
+struct gma_connector {
 	struct drm_connector base;
-	struct psb_intel_encoder *encoder;
+	struct gma_encoder *encoder;
 };
 
 struct psb_intel_crtc_state {
@@ -161,7 +162,7 @@ struct psb_intel_crtc_state {
 	uint32_t savePalette[256];
 };
 
-struct psb_intel_crtc {
+struct gma_crtc {
 	struct drm_crtc base;
 	int pipe;
 	int plane;
@@ -188,14 +189,16 @@ struct psb_intel_crtc {
 
 	/* Saved Crtc HW states */
 	struct psb_intel_crtc_state *crtc_state;
+
+	const struct gma_clock_funcs *clock_funcs;
 };
 
-#define to_psb_intel_crtc(x)	\
-		container_of(x, struct psb_intel_crtc, base)
-#define to_psb_intel_connector(x) \
-		container_of(x, struct psb_intel_connector, base)
-#define to_psb_intel_encoder(x)	\
-		container_of(x, struct psb_intel_encoder, base)
+#define to_gma_crtc(x)	\
+		container_of(x, struct gma_crtc, base)
+#define to_gma_connector(x) \
+		container_of(x, struct gma_connector, base)
+#define to_gma_encoder(x)	\
+		container_of(x, struct gma_encoder, base)
 #define to_psb_intel_framebuffer(x)	\
 		container_of(x, struct psb_intel_framebuffer, base)
 
@@ -223,27 +226,18 @@ extern void oaktrail_dsi_init(struct drm_device *dev,
 extern void mid_dsi_init(struct drm_device *dev,
 		    struct psb_intel_mode_device *mode_dev, int dsi_num);
 
-extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
-extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
-extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector);
+extern void gma_connector_attach_encoder(struct gma_connector *connector,
+					 struct gma_encoder *encoder);
 
-static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+static inline struct gma_encoder *gma_attached_encoder(
 						struct drm_connector *connector)
 {
-	return to_psb_intel_connector(connector)->encoder;
+	return to_gma_connector(connector)->encoder;
 }
 
-extern void psb_intel_connector_attach_encoder(
-					struct psb_intel_connector *connector,
-					struct psb_intel_encoder *encoder);
-
-extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
-					      *connector);
-
 extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
 						    struct drm_crtc *crtc);
-extern void psb_intel_wait_for_vblank(struct drm_device *dev);
 extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,

+ 32 - 43
drivers/gpu/drm/gma500/psb_intel_lvds.c

@@ -267,10 +267,9 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
 	struct drm_device *dev = connector->dev;
 	struct drm_psb_private *dev_priv =
 		(struct drm_psb_private *)dev->dev_private;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 	struct psb_intel_lvds_priv *lvds_priv =
-		(struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+		(struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
 
 	lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
 	lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
@@ -307,10 +306,9 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	u32 pp_status;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 	struct psb_intel_lvds_priv *lvds_priv =
-		(struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+		(struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
 
 	dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
 			lvds_priv->savePP_ON,
@@ -349,12 +347,11 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 {
 	struct drm_psb_private *dev_priv = connector->dev->dev_private;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
 	struct drm_display_mode *fixed_mode =
 					dev_priv->mode_dev.panel_fixed_mode;
 
-	if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+	if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
 		fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
 
 	/* just in case */
@@ -381,22 +378,20 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	struct drm_device *dev = encoder->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
-	struct psb_intel_crtc *psb_intel_crtc =
-				to_psb_intel_crtc(encoder->crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
 	struct drm_encoder *tmp_encoder;
 	struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
-	struct psb_intel_encoder *psb_intel_encoder =
-						to_psb_intel_encoder(encoder);
+	struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
 
-	if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+	if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
 		panel_fixed_mode = mode_dev->panel_fixed_mode2;
 
 	/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
-	if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+	if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
 		printk(KERN_ERR "Can't support LVDS on pipe A\n");
 		return false;
 	}
-	if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+	if (IS_MRST(dev) && gma_crtc->pipe != 0) {
 		printk(KERN_ERR "Must use PIPE A\n");
 		return false;
 	}
@@ -525,9 +520,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
 	struct drm_device *dev = connector->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
-	struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
 	int ret = 0;
 
 	if (!IS_MRST(dev))
@@ -564,9 +558,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
  */
 void psb_intel_lvds_destroy(struct drm_connector *connector)
 {
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
-	struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
 
 	if (lvds_priv->ddc_bus)
 		psb_intel_i2c_destroy(lvds_priv->ddc_bus);
@@ -585,8 +578,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
 		return -1;
 
 	if (!strcmp(property->name, "scaling mode")) {
-		struct psb_intel_crtc *crtc =
-					to_psb_intel_crtc(encoder->crtc);
+		struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
 		uint64_t curval;
 
 		if (!crtc)
@@ -656,7 +648,7 @@ const struct drm_connector_helper_funcs
 				psb_intel_lvds_connector_helper_funcs = {
 	.get_modes = psb_intel_lvds_get_modes,
 	.mode_valid = psb_intel_lvds_mode_valid,
-	.best_encoder = psb_intel_best_encoder,
+	.best_encoder = gma_best_encoder,
 };
 
 const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
@@ -691,8 +683,8 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
 void psb_intel_lvds_init(struct drm_device *dev,
 			 struct psb_intel_mode_device *mode_dev)
 {
-	struct psb_intel_encoder *psb_intel_encoder;
-	struct psb_intel_connector *psb_intel_connector;
+	struct gma_encoder *gma_encoder;
+	struct gma_connector *gma_connector;
 	struct psb_intel_lvds_priv *lvds_priv;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
@@ -702,17 +694,15 @@ void psb_intel_lvds_init(struct drm_device *dev,
 	u32 lvds;
 	int pipe;
 
-	psb_intel_encoder =
-			kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
-	if (!psb_intel_encoder) {
-		dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+	gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+	if (!gma_encoder) {
+		dev_err(dev->dev, "gma_encoder allocation error\n");
 		return;
 	}
 
-	psb_intel_connector =
-		kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
-	if (!psb_intel_connector) {
-		dev_err(dev->dev, "psb_intel_connector allocation error\n");
+	gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+	if (!gma_connector) {
+		dev_err(dev->dev, "gma_connector allocation error\n");
 		goto failed_encoder;
 	}
 
@@ -722,10 +712,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
 		goto failed_connector;
 	}
 
-	psb_intel_encoder->dev_priv = lvds_priv;
+	gma_encoder->dev_priv = lvds_priv;
 
-	connector = &psb_intel_connector->base;
-	encoder = &psb_intel_encoder->base;
+	connector = &gma_connector->base;
+	encoder = &gma_encoder->base;
 	drm_connector_init(dev, connector,
 			   &psb_intel_lvds_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
@@ -734,9 +724,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
 			 &psb_intel_lvds_enc_funcs,
 			 DRM_MODE_ENCODER_LVDS);
 
-	psb_intel_connector_attach_encoder(psb_intel_connector,
-					   psb_intel_encoder);
-	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+	gma_connector_attach_encoder(gma_connector, gma_encoder);
+	gma_encoder->type = INTEL_OUTPUT_LVDS;
 
 	drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
 	drm_connector_helper_add(connector,
@@ -851,8 +840,8 @@ failed_blc_i2c:
 	drm_encoder_cleanup(encoder);
 	drm_connector_cleanup(connector);
 failed_connector:
-	kfree(psb_intel_connector);
+	kfree(gma_connector);
 failed_encoder:
-	kfree(psb_intel_encoder);
+	kfree(gma_encoder);
 }
 

+ 25 - 28
drivers/gpu/drm/gma500/psb_intel_sdvo.c

@@ -65,7 +65,7 @@ static const char *tv_format_names[] = {
 #define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
 
 struct psb_intel_sdvo {
-	struct psb_intel_encoder base;
+	struct gma_encoder base;
 
 	struct i2c_adapter *i2c;
 	u8 slave_addr;
@@ -140,7 +140,7 @@ struct psb_intel_sdvo {
 };
 
 struct psb_intel_sdvo_connector {
-	struct psb_intel_connector base;
+	struct gma_connector base;
 
 	/* Mark the type of connector */
 	uint16_t output_flag;
@@ -200,13 +200,13 @@ static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
 
 static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
 {
-	return container_of(psb_intel_attached_encoder(connector),
+	return container_of(gma_attached_encoder(connector),
 			    struct psb_intel_sdvo, base);
 }
 
 static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
 {
-	return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+	return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
 }
 
 static bool
@@ -988,7 +988,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_crtc *crtc = encoder->crtc;
-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
 	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
 	u32 sdvox;
 	struct psb_intel_sdvo_in_out_map in_out;
@@ -1071,7 +1071,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
 	}
 	sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
 
-	if (psb_intel_crtc->pipe == 1)
+	if (gma_crtc->pipe == 1)
 		sdvox |= SDVO_PIPE_B_SELECT;
 	if (psb_intel_sdvo->has_hdmi_audio)
 		sdvox |= SDVO_AUDIO_ENABLE;
@@ -1122,7 +1122,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 		if ((temp & SDVO_ENABLE) == 0)
 			psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
 		for (i = 0; i < 2; i++)
-			psb_intel_wait_for_vblank(dev);
+			gma_wait_for_vblank(dev);
 
 		status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
 		/* Warn if the device reported failure to sync.
@@ -1837,10 +1837,8 @@ done:
 static void psb_intel_sdvo_save(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct psb_intel_encoder *psb_intel_encoder =
-					psb_intel_attached_encoder(connector);
-	struct psb_intel_sdvo *sdvo =
-				to_psb_intel_sdvo(&psb_intel_encoder->base);
+	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+	struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
 
 	sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
 }
@@ -1848,8 +1846,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector)
 static void psb_intel_sdvo_restore(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
-	struct drm_encoder *encoder =
-				&psb_intel_attached_encoder(connector)->base;
+	struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
 	struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
 	struct drm_crtc *crtc = encoder->crtc;
 
@@ -1865,9 +1862,9 @@ static void psb_intel_sdvo_restore(struct drm_connector *connector)
 static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
 	.dpms = psb_intel_sdvo_dpms,
 	.mode_fixup = psb_intel_sdvo_mode_fixup,
-	.prepare = psb_intel_encoder_prepare,
+	.prepare = gma_encoder_prepare,
 	.mode_set = psb_intel_sdvo_mode_set,
-	.commit = psb_intel_encoder_commit,
+	.commit = gma_encoder_commit,
 };
 
 static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
@@ -1883,7 +1880,7 @@ static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
 static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
 	.get_modes = psb_intel_sdvo_get_modes,
 	.mode_valid = psb_intel_sdvo_mode_valid,
-	.best_encoder = psb_intel_best_encoder,
+	.best_encoder = gma_best_encoder,
 };
 
 static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -1895,7 +1892,7 @@ static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 				 psb_intel_sdvo->sdvo_lvds_fixed_mode);
 
 	i2c_del_adapter(&psb_intel_sdvo->ddc);
-	psb_intel_encoder_destroy(encoder);
+	gma_encoder_destroy(encoder);
 }
 
 static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
@@ -2056,7 +2053,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
 	connector->base.base.doublescan_allowed = 0;
 	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
 
-	psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+	gma_connector_attach_encoder(&connector->base, &encoder->base);
 	drm_sysfs_connector_add(&connector->base.base);
 }
 
@@ -2076,7 +2073,7 @@ psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
 {
 	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
 	struct drm_connector *connector;
-	struct psb_intel_connector *intel_connector;
+	struct gma_connector *intel_connector;
 	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
 
 	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2116,7 +2113,7 @@ psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
 {
 	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
 	struct drm_connector *connector;
-	struct psb_intel_connector *intel_connector;
+	struct gma_connector *intel_connector;
 	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
 
 	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2155,7 +2152,7 @@ psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
 {
 	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
 	struct drm_connector *connector;
-	struct psb_intel_connector *intel_connector;
+	struct gma_connector *intel_connector;
 	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
 
 	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2189,7 +2186,7 @@ psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
 {
 	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
 	struct drm_connector *connector;
-	struct psb_intel_connector *intel_connector;
+	struct gma_connector *intel_connector;
 	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
 
 	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2541,7 +2538,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
 bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct psb_intel_encoder *psb_intel_encoder;
+	struct gma_encoder *gma_encoder;
 	struct psb_intel_sdvo *psb_intel_sdvo;
 	int i;
 
@@ -2558,9 +2555,9 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 	}
 
 	/* encoder type will be decided later */
-	psb_intel_encoder = &psb_intel_sdvo->base;
-	psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
-	drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+	gma_encoder = &psb_intel_sdvo->base;
+	gma_encoder->type = INTEL_OUTPUT_SDVO;
+	drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
 
 	/* Read the regs to test if we can talk to the device */
 	for (i = 0; i < 0x40; i++) {
@@ -2578,7 +2575,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 	else
 		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
 
-	drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+	drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
 
 	/* In default case sdvo lvds is false */
 	if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
@@ -2621,7 +2618,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
 	return true;
 
 err:
-	drm_encoder_cleanup(&psb_intel_encoder->base);
+	drm_encoder_cleanup(&gma_encoder->base);
 	i2c_del_adapter(&psb_intel_sdvo->ddc);
 	kfree(psb_intel_sdvo);
 

+ 409 - 76
drivers/gpu/drm/i2c/tda998x_drv.c

@@ -23,7 +23,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder_slave.h>
 #include <drm/drm_edid.h>
-
+#include <drm/i2c/tda998x.h>
 
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
@@ -32,6 +32,11 @@ struct tda998x_priv {
 	uint16_t rev;
 	uint8_t current_page;
 	int dpms;
+	bool is_hdmi_sink;
+	u8 vip_cntrl_0;
+	u8 vip_cntrl_1;
+	u8 vip_cntrl_2;
+	struct tda998x_encoder_params params;
 };
 
 #define to_tda998x_priv(x)  ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
 # define I2C_MASTER_DIS_MM        (1 << 0)
 # define I2C_MASTER_DIS_FILT      (1 << 1)
 # define I2C_MASTER_APP_STRT_LAT  (1 << 2)
+#define REG_FEAT_POWERDOWN        REG(0x00, 0x0e)     /* read/write */
+# define FEAT_POWERDOWN_SPDIF     (1 << 3)
 #define REG_INT_FLAGS_0           REG(0x00, 0x0f)     /* read/write */
 #define REG_INT_FLAGS_1           REG(0x00, 0x10)     /* read/write */
 #define REG_INT_FLAGS_2           REG(0x00, 0x11)     /* read/write */
 # define INT_FLAGS_2_EDID_BLK_RD  (1 << 1)
+#define REG_ENA_ACLK              REG(0x00, 0x16)     /* read/write */
 #define REG_ENA_VP_0              REG(0x00, 0x18)     /* read/write */
 #define REG_ENA_VP_1              REG(0x00, 0x19)     /* read/write */
 #define REG_ENA_VP_2              REG(0x00, 0x1a)     /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
 #define REG_VIP_CNTRL_5           REG(0x00, 0x25)     /* write */
 # define VIP_CNTRL_5_CKCASE       (1 << 0)
 # define VIP_CNTRL_5_SP_CNT(x)    (((x) & 3) << 1)
+#define REG_MUX_AP                REG(0x00, 0x26)     /* read/write */
+#define REG_MUX_VP_VIP_OUT        REG(0x00, 0x27)     /* read/write */
 #define REG_MAT_CONTRL            REG(0x00, 0x80)     /* write */
 # define MAT_CONTRL_MAT_SC(x)     (((x) & 3) << 0)
 # define MAT_CONTRL_MAT_BP        (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
 #define REG_VS_LINE_END_1_LSB     REG(0x00, 0xae)     /* write */
 #define REG_VS_PIX_END_1_MSB      REG(0x00, 0xaf)     /* write */
 #define REG_VS_PIX_END_1_LSB      REG(0x00, 0xb0)     /* write */
+#define REG_VS_LINE_STRT_2_MSB    REG(0x00, 0xb1)     /* write */
+#define REG_VS_LINE_STRT_2_LSB    REG(0x00, 0xb2)     /* write */
 #define REG_VS_PIX_STRT_2_MSB     REG(0x00, 0xb3)     /* write */
 #define REG_VS_PIX_STRT_2_LSB     REG(0x00, 0xb4)     /* write */
+#define REG_VS_LINE_END_2_MSB     REG(0x00, 0xb5)     /* write */
+#define REG_VS_LINE_END_2_LSB     REG(0x00, 0xb6)     /* write */
 #define REG_VS_PIX_END_2_MSB      REG(0x00, 0xb7)     /* write */
 #define REG_VS_PIX_END_2_LSB      REG(0x00, 0xb8)     /* write */
 #define REG_HS_PIX_START_MSB      REG(0x00, 0xb9)     /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
 #define REG_VWIN_START_1_LSB      REG(0x00, 0xbe)     /* write */
 #define REG_VWIN_END_1_MSB        REG(0x00, 0xbf)     /* write */
 #define REG_VWIN_END_1_LSB        REG(0x00, 0xc0)     /* write */
+#define REG_VWIN_START_2_MSB      REG(0x00, 0xc1)     /* write */
+#define REG_VWIN_START_2_LSB      REG(0x00, 0xc2)     /* write */
+#define REG_VWIN_END_2_MSB        REG(0x00, 0xc3)     /* write */
+#define REG_VWIN_END_2_LSB        REG(0x00, 0xc4)     /* write */
 #define REG_DE_START_MSB          REG(0x00, 0xc5)     /* write */
 #define REG_DE_START_LSB          REG(0x00, 0xc6)     /* write */
 #define REG_DE_STOP_MSB           REG(0x00, 0xc7)     /* write */
 #define REG_DE_STOP_LSB           REG(0x00, 0xc8)     /* write */
 #define REG_TBG_CNTRL_0           REG(0x00, 0xca)     /* write */
+# define TBG_CNTRL_0_TOP_TGL      (1 << 0)
+# define TBG_CNTRL_0_TOP_SEL      (1 << 1)
+# define TBG_CNTRL_0_DE_EXT       (1 << 2)
+# define TBG_CNTRL_0_TOP_EXT      (1 << 3)
 # define TBG_CNTRL_0_FRAME_DIS    (1 << 5)
 # define TBG_CNTRL_0_SYNC_MTHD    (1 << 6)
 # define TBG_CNTRL_0_SYNC_ONCE    (1 << 7)
 #define REG_TBG_CNTRL_1           REG(0x00, 0xcb)     /* write */
-# define TBG_CNTRL_1_VH_TGL_0     (1 << 0)
-# define TBG_CNTRL_1_VH_TGL_1     (1 << 1)
-# define TBG_CNTRL_1_VH_TGL_2     (1 << 2)
-# define TBG_CNTRL_1_VHX_EXT_DE   (1 << 3)
-# define TBG_CNTRL_1_VHX_EXT_HS   (1 << 4)
-# define TBG_CNTRL_1_VHX_EXT_VS   (1 << 5)
+# define TBG_CNTRL_1_H_TGL        (1 << 0)
+# define TBG_CNTRL_1_V_TGL        (1 << 1)
+# define TBG_CNTRL_1_TGL_EN       (1 << 2)
+# define TBG_CNTRL_1_X_EXT        (1 << 3)
+# define TBG_CNTRL_1_H_EXT        (1 << 4)
+# define TBG_CNTRL_1_V_EXT        (1 << 5)
 # define TBG_CNTRL_1_DWIN_DIS     (1 << 6)
 #define REG_ENABLE_SPACE          REG(0x00, 0xd6)     /* write */
 #define REG_HVF_CNTRL_0           REG(0x00, 0xe4)     /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
 # define HVF_CNTRL_1_PAD(x)       (((x) & 3) << 4)
 # define HVF_CNTRL_1_SEMI_PLANAR  (1 << 6)
 #define REG_RPT_CNTRL             REG(0x00, 0xf0)     /* write */
+#define REG_I2S_FORMAT            REG(0x00, 0xfc)     /* read/write */
+# define I2S_FORMAT(x)            (((x) & 3) << 0)
+#define REG_AIP_CLKSEL            REG(0x00, 0xfd)     /* write */
+# define AIP_CLKSEL_FS(x)         (((x) & 3) << 0)
+# define AIP_CLKSEL_CLK_POL(x)    (((x) & 1) << 2)
+# define AIP_CLKSEL_AIP(x)        (((x) & 7) << 3)
 
 
 /* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
 #define REG_PLL_SCGR1             REG(0x02, 0x09)     /* read/write */
 #define REG_PLL_SCGR2             REG(0x02, 0x0a)     /* read/write */
 #define REG_AUDIO_DIV             REG(0x02, 0x0e)     /* read/write */
+# define AUDIO_DIV_SERCLK_1       0
+# define AUDIO_DIV_SERCLK_2       1
+# define AUDIO_DIV_SERCLK_4       2
+# define AUDIO_DIV_SERCLK_8       3
+# define AUDIO_DIV_SERCLK_16      4
+# define AUDIO_DIV_SERCLK_32      5
 #define REG_SEL_CLK               REG(0x02, 0x11)     /* read/write */
 # define SEL_CLK_SEL_CLK1         (1 << 0)
 # define SEL_CLK_SEL_VRF_CLK(x)   (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
 
 
 /* Page 10h: information frames and packets */
+#define REG_IF1_HB0               REG(0x10, 0x20)     /* read/write */
+#define REG_IF2_HB0               REG(0x10, 0x40)     /* read/write */
+#define REG_IF3_HB0               REG(0x10, 0x60)     /* read/write */
+#define REG_IF4_HB0               REG(0x10, 0x80)     /* read/write */
+#define REG_IF5_HB0               REG(0x10, 0xa0)     /* read/write */
 
 
 /* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
 # define AIP_CNTRL_0_LAYOUT       (1 << 2)
 # define AIP_CNTRL_0_ACR_MAN      (1 << 5)
 # define AIP_CNTRL_0_RST_CTS      (1 << 6)
+#define REG_CA_I2S                REG(0x11, 0x01)     /* read/write */
+# define CA_I2S_CA_I2S(x)         (((x) & 31) << 0)
+# define CA_I2S_HBR_CHSTAT        (1 << 6)
+#define REG_LATENCY_RD            REG(0x11, 0x04)     /* read/write */
+#define REG_ACR_CTS_0             REG(0x11, 0x05)     /* read/write */
+#define REG_ACR_CTS_1             REG(0x11, 0x06)     /* read/write */
+#define REG_ACR_CTS_2             REG(0x11, 0x07)     /* read/write */
+#define REG_ACR_N_0               REG(0x11, 0x08)     /* read/write */
+#define REG_ACR_N_1               REG(0x11, 0x09)     /* read/write */
+#define REG_ACR_N_2               REG(0x11, 0x0a)     /* read/write */
+#define REG_CTS_N                 REG(0x11, 0x0c)     /* read/write */
+# define CTS_N_K(x)               (((x) & 7) << 0)
+# define CTS_N_M(x)               (((x) & 3) << 4)
 #define REG_ENC_CNTRL             REG(0x11, 0x0d)     /* read/write */
 # define ENC_CNTRL_RST_ENC        (1 << 0)
 # define ENC_CNTRL_RST_SEL        (1 << 1)
 # define ENC_CNTRL_CTL_CODE(x)    (((x) & 3) << 2)
+#define REG_DIP_FLAGS             REG(0x11, 0x0e)     /* read/write */
+# define DIP_FLAGS_ACR            (1 << 0)
+# define DIP_FLAGS_GC             (1 << 1)
+#define REG_DIP_IF_FLAGS          REG(0x11, 0x0f)     /* read/write */
+# define DIP_IF_FLAGS_IF1         (1 << 1)
+# define DIP_IF_FLAGS_IF2         (1 << 2)
+# define DIP_IF_FLAGS_IF3         (1 << 3)
+# define DIP_IF_FLAGS_IF4         (1 << 4)
+# define DIP_IF_FLAGS_IF5         (1 << 5)
+#define REG_CH_STAT_B(x)          REG(0x11, 0x14 + (x)) /* read/write */
 
 
 /* Page 12h: HDCP and OTP */
 #define REG_TX3                   REG(0x12, 0x9a)     /* read/write */
+#define REG_TX4                   REG(0x12, 0x9b)     /* read/write */
+# define TX4_PD_RAM               (1 << 1)
 #define REG_TX33                  REG(0x12, 0xb8)     /* read/write */
 # define TX33_HDMI                (1 << 1)
 
@@ -338,6 +402,23 @@ fail:
 	return ret;
 }
 
+static void
+reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	uint8_t buf[cnt+1];
+	int ret;
+
+	buf[0] = REG2ADDR(reg);
+	memcpy(&buf[1], p, cnt);
+
+	set_page(encoder, reg);
+
+	ret = i2c_master_send(client, buf, cnt + 1);
+	if (ret < 0)
+		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
 static uint8_t
 reg_read(struct drm_encoder *encoder, uint16_t reg)
 {
@@ -406,13 +487,176 @@ tda998x_reset(struct drm_encoder *encoder)
 	reg_write(encoder, REG_SERIALIZER,   0x00);
 	reg_write(encoder, REG_BUFFER_OUT,   0x00);
 	reg_write(encoder, REG_PLL_SCG1,     0x00);
-	reg_write(encoder, REG_AUDIO_DIV,    0x03);
+	reg_write(encoder, REG_AUDIO_DIV,    AUDIO_DIV_SERCLK_8);
 	reg_write(encoder, REG_SEL_CLK,      SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
 	reg_write(encoder, REG_PLL_SCGN1,    0xfa);
 	reg_write(encoder, REG_PLL_SCGN2,    0x00);
 	reg_write(encoder, REG_PLL_SCGR1,    0x5b);
 	reg_write(encoder, REG_PLL_SCGR2,    0x00);
 	reg_write(encoder, REG_PLL_SCG2,     0x10);
+
+	/* Write the default value MUX register */
+	reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
+}
+
+static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
+{
+	uint8_t sum = 0;
+
+	while (bytes--)
+		sum += *buf++;
+	return (255 - sum) + 1;
+}
+
+#define HB(x) (x)
+#define PB(x) (HB(2) + 1 + (x))
+
+static void
+tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
+		 uint8_t *buf, size_t size)
+{
+	buf[PB(0)] = tda998x_cksum(buf, size);
+
+	reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
+	reg_write_range(encoder, addr, buf, size);
+	reg_set(encoder, REG_DIP_IF_FLAGS, bit);
+}
+
+static void
+tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
+{
+	uint8_t buf[PB(5) + 1];
+
+	buf[HB(0)] = 0x84;
+	buf[HB(1)] = 0x01;
+	buf[HB(2)] = 10;
+	buf[PB(0)] = 0;
+	buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
+	buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
+	buf[PB(4)] = p->audio_frame[4];
+	buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+
+	tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
+			 sizeof(buf));
+}
+
+static void
+tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	uint8_t buf[PB(13) + 1];
+
+	memset(buf, 0, sizeof(buf));
+	buf[HB(0)] = 0x82;
+	buf[HB(1)] = 0x02;
+	buf[HB(2)] = 13;
+	buf[PB(4)] = drm_match_cea_mode(mode);
+
+	tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
+			 sizeof(buf));
+}
+
+static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
+{
+	if (on) {
+		reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+		reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+		reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+	} else {
+		reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+	}
+}
+
+static void
+tda998x_configure_audio(struct drm_encoder *encoder,
+		struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+{
+	uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
+	uint32_t n;
+
+	/* Enable audio ports */
+	reg_write(encoder, REG_ENA_AP, p->audio_cfg);
+	reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
+
+	/* Set audio input source */
+	switch (p->audio_format) {
+	case AFMT_SPDIF:
+		reg_write(encoder, REG_MUX_AP, 0x40);
+		clksel_aip = AIP_CLKSEL_AIP(0);
+		/* FS64SPDIF */
+		clksel_fs = AIP_CLKSEL_FS(2);
+		cts_n = CTS_N_M(3) | CTS_N_K(3);
+		ca_i2s = 0;
+		break;
+
+	case AFMT_I2S:
+		reg_write(encoder, REG_MUX_AP, 0x64);
+		clksel_aip = AIP_CLKSEL_AIP(1);
+		/* ACLK */
+		clksel_fs = AIP_CLKSEL_FS(0);
+		cts_n = CTS_N_M(3) | CTS_N_K(3);
+		ca_i2s = CA_I2S_CA_I2S(0);
+		break;
+
+	default:
+		BUG();
+		return;
+	}
+
+	reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
+	reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
+
+	/* Enable automatic CTS generation */
+	reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
+	reg_write(encoder, REG_CTS_N, cts_n);
+
+	/*
+	 * Audio input somehow depends on HDMI line rate which is
+	 * related to pixclk. Testing showed that modes with pixclk
+	 * >100MHz need a larger divider while <40MHz need the default.
+	 * There is no detailed info in the datasheet, so we just
+	 * assume 100MHz requires larger divider.
+	 */
+	if (mode->clock > 100000)
+		adiv = AUDIO_DIV_SERCLK_16;
+	else
+		adiv = AUDIO_DIV_SERCLK_8;
+	reg_write(encoder, REG_AUDIO_DIV, adiv);
+
+	/*
+	 * This is the approximate value of N, which happens to be
+	 * the recommended values for non-coherent clocks.
+	 */
+	n = 128 * p->audio_sample_rate / 1000;
+
+	/* Write the CTS and N values */
+	buf[0] = 0x44;
+	buf[1] = 0x42;
+	buf[2] = 0x01;
+	buf[3] = n;
+	buf[4] = n >> 8;
+	buf[5] = n >> 16;
+	reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
+
+	/* Set CTS clock reference */
+	reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
+
+	/* Reset CTS generator */
+	reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+	reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+
+	/* Write the channel status */
+	buf[0] = 0x04;
+	buf[1] = 0x00;
+	buf[2] = 0x00;
+	buf[3] = 0xf1;
+	reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
+
+	tda998x_audio_mute(encoder, true);
+	mdelay(20);
+	tda998x_audio_mute(encoder, false);
+
+	/* Write the audio information packet */
+	tda998x_write_aif(encoder, p);
 }
 
 /* DRM encoder functions */
@@ -420,6 +664,23 @@ tda998x_reset(struct drm_encoder *encoder)
 static void
 tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
 {
+	struct tda998x_priv *priv = to_tda998x_priv(encoder);
+	struct tda998x_encoder_params *p = params;
+
+	priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+			    (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+			    VIP_CNTRL_0_SWAP_B(p->swap_b) |
+			    (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+	priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+			    (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+			    VIP_CNTRL_1_SWAP_D(p->swap_d) |
+			    (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+	priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+			    (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+			    VIP_CNTRL_2_SWAP_F(p->swap_f) |
+			    (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+	priv->params = *p;
 }
 
 static void
@@ -436,18 +697,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
-		/* enable audio and video ports */
-		reg_write(encoder, REG_ENA_AP, 0xff);
+		/* enable video ports, audio will be enabled later */
 		reg_write(encoder, REG_ENA_VP_0, 0xff);
 		reg_write(encoder, REG_ENA_VP_1, 0xff);
 		reg_write(encoder, REG_ENA_VP_2, 0xff);
 		/* set muxing after enabling ports: */
-		reg_write(encoder, REG_VIP_CNTRL_0,
-				VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
-		reg_write(encoder, REG_VIP_CNTRL_1,
-				VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
-		reg_write(encoder, REG_VIP_CNTRL_2,
-				VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+		reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+		reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+		reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
 		break;
 	case DRM_MODE_DPMS_OFF:
 		/* disable audio and video ports */
@@ -494,43 +751,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
 			struct drm_display_mode *adjusted_mode)
 {
 	struct tda998x_priv *priv = to_tda998x_priv(encoder);
-	uint16_t hs_start, hs_end, line_start, line_end;
-	uint16_t vwin_start, vwin_end, de_start, de_end;
-	uint16_t ref_pix, ref_line, pix_start2;
+	uint16_t ref_pix, ref_line, n_pix, n_line;
+	uint16_t hs_pix_s, hs_pix_e;
+	uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
+	uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
+	uint16_t vwin1_line_s, vwin1_line_e;
+	uint16_t vwin2_line_s, vwin2_line_e;
+	uint16_t de_pix_s, de_pix_e;
 	uint8_t reg, div, rep;
 
-	hs_start   = mode->hsync_start - mode->hdisplay;
-	hs_end     = mode->hsync_end - mode->hdisplay;
-	line_start = 1;
-	line_end   = 1 + mode->vsync_end - mode->vsync_start;
-	vwin_start = mode->vtotal - mode->vsync_start;
-	vwin_end   = vwin_start + mode->vdisplay;
-	de_start   = mode->htotal - mode->hdisplay;
-	de_end     = mode->htotal;
-
-	pix_start2 = 0;
-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		pix_start2 = (mode->htotal / 2) + hs_start;
-
-	/* TODO how is this value calculated?  It is 2 for all common
-	 * formats in the tables in out of tree nxp driver (assuming
-	 * I've properly deciphered their byzantine table system)
+	/*
+	 * Internally TDA998x is using ITU-R BT.656 style sync but
+	 * we get VESA style sync. TDA998x is using a reference pixel
+	 * relative to ITU to sync to the input frame and for output
+	 * sync generation. Currently, we are using reference detection
+	 * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
+	 * which is position of rising VS with coincident rising HS.
+	 *
+	 * Now there is some issues to take care of:
+	 * - HDMI data islands require sync-before-active
+	 * - TDA998x register values must be > 0 to be enabled
+	 * - REFLINE needs an additional offset of +1
+	 * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
+	 *
+	 * So we add +1 to all horizontal and vertical register values,
+	 * plus an additional +3 for REFPIX as we are using RGB input only.
 	 */
-	ref_line = 2;
-
-	/* this might changes for other color formats from the CRTC: */
-	ref_pix = 3 + hs_start;
+	n_pix        = mode->htotal;
+	n_line       = mode->vtotal;
+
+	hs_pix_e     = mode->hsync_end - mode->hdisplay;
+	hs_pix_s     = mode->hsync_start - mode->hdisplay;
+	de_pix_e     = mode->htotal;
+	de_pix_s     = mode->htotal - mode->hdisplay;
+	ref_pix      = 3 + hs_pix_s;
+
+	/*
+	 * Attached LCD controllers may generate broken sync. Allow
+	 * those to adjust the position of the rising VS edge by adding
+	 * HSKEW to ref_pix.
+	 */
+	if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
+		ref_pix += adjusted_mode->hskew;
+
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
+		ref_line     = 1 + mode->vsync_start - mode->vdisplay;
+		vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
+		vwin1_line_e = vwin1_line_s + mode->vdisplay;
+		vs1_pix_s    = vs1_pix_e = hs_pix_s;
+		vs1_line_s   = mode->vsync_start - mode->vdisplay;
+		vs1_line_e   = vs1_line_s +
+			       mode->vsync_end - mode->vsync_start;
+		vwin2_line_s = vwin2_line_e = 0;
+		vs2_pix_s    = vs2_pix_e  = 0;
+		vs2_line_s   = vs2_line_e = 0;
+	} else {
+		ref_line     = 1 + (mode->vsync_start - mode->vdisplay)/2;
+		vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
+		vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
+		vs1_pix_s    = vs1_pix_e = hs_pix_s;
+		vs1_line_s   = (mode->vsync_start - mode->vdisplay)/2;
+		vs1_line_e   = vs1_line_s +
+			       (mode->vsync_end - mode->vsync_start)/2;
+		vwin2_line_s = vwin1_line_s + mode->vtotal/2;
+		vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
+		vs2_pix_s    = vs2_pix_e = hs_pix_s + mode->htotal/2;
+		vs2_line_s   = vs1_line_s + mode->vtotal/2 ;
+		vs2_line_e   = vs2_line_s +
+			       (mode->vsync_end - mode->vsync_start)/2;
+	}
 
 	div = 148500 / mode->clock;
 
-	DBG("clock=%d, div=%u", mode->clock, div);
-	DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
-			hs_start, hs_end, line_start, line_end);
-	DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
-			vwin_start, vwin_end, de_start, de_end);
-	DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
-			ref_line, ref_pix, pix_start2);
-
 	/* mute the audio FIFO: */
 	reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
 
@@ -561,9 +853,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
 	reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
 			PLL_SERIAL_2_SRL_PR(rep));
 
-	reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
-	reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
-
 	/* set color matrix bypass flag: */
 	reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
 
@@ -572,47 +861,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
 
 	reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
 
+	/*
+	 * Sync on rising HSYNC/VSYNC
+	 */
 	reg_write(encoder, REG_VIP_CNTRL_3, 0);
 	reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+
+	/*
+	 * TDA19988 requires high-active sync at input stage,
+	 * so invert low-active sync provided by master encoder here
+	 */
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
 	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
 		reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
 
+	/*
+	 * Always generate sync polarity relative to input sync and
+	 * revert input stage toggled sync at output stage
+	 */
+	reg = TBG_CNTRL_1_TGL_EN;
 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-		reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+		reg |= TBG_CNTRL_1_H_TGL;
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		reg |= TBG_CNTRL_1_V_TGL;
+	reg_write(encoder, REG_TBG_CNTRL_1, reg);
 
 	reg_write(encoder, REG_VIDFORMAT, 0x00);
-	reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
-	reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
-	reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
-	reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
-	reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
-	reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
-	reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
-	reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
-	reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
-	reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
-	reg_write16(encoder, REG_DE_START_MSB, de_start);
-	reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+	reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+	reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+	reg_write16(encoder, REG_NPIX_MSB, n_pix);
+	reg_write16(encoder, REG_NLINE_MSB, n_line);
+	reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
+	reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
+	reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
+	reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
+	reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
+	reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
+	reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
+	reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
+	reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
+	reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
+	reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
+	reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
+	reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
+	reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
+	reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
+	reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
 
 	if (priv->rev == TDA19988) {
 		/* let incoming pixels fill the active space (if any) */
 		reg_write(encoder, REG_ENABLE_SPACE, 0x01);
 	}
 
-	reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
-	reg_write16(encoder, REG_REFLINE_MSB, ref_line);
-
-	reg = TBG_CNTRL_1_VHX_EXT_DE |
-			TBG_CNTRL_1_VHX_EXT_HS |
-			TBG_CNTRL_1_VHX_EXT_VS |
-			TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
-			TBG_CNTRL_1_VH_TGL_2;
-	if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
-		reg |= TBG_CNTRL_1_VH_TGL_0;
-	reg_set(encoder, REG_TBG_CNTRL_1, reg);
-
 	/* must be last register set: */
 	reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+
+	/* Only setup the info frames if the sink is HDMI */
+	if (priv->is_hdmi_sink) {
+		/* We need to turn HDMI HDCP stuff on to get audio through */
+		reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+		reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
+		reg_set(encoder, REG_TX33, TX33_HDMI);
+
+		tda998x_write_avi(encoder, adjusted_mode);
+
+		if (priv->params.audio_cfg)
+			tda998x_configure_audio(encoder, adjusted_mode,
+						&priv->params);
+	}
 }
 
 static enum drm_connector_status
@@ -673,6 +990,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
 static uint8_t *
 do_get_edid(struct drm_encoder *encoder)
 {
+	struct tda998x_priv *priv = to_tda998x_priv(encoder);
 	int j = 0, valid_extensions = 0;
 	uint8_t *block, *new;
 	bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +998,9 @@ do_get_edid(struct drm_encoder *encoder)
 	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
 		return NULL;
 
+	if (priv->rev == TDA19988)
+		reg_clear(encoder, REG_TX4, TX4_PD_RAM);
+
 	/* base block fetch */
 	if (read_edid_block(encoder, block, 0))
 		goto fail;
@@ -689,7 +1010,7 @@ do_get_edid(struct drm_encoder *encoder)
 
 	/* if there's no extensions, we're done */
 	if (block[0x7e] == 0)
-		return block;
+		goto done;
 
 	new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
 	if (!new)
@@ -716,9 +1037,15 @@ do_get_edid(struct drm_encoder *encoder)
 		block = new;
 	}
 
+done:
+	if (priv->rev == TDA19988)
+		reg_set(encoder, REG_TX4, TX4_PD_RAM);
+
 	return block;
 
 fail:
+	if (priv->rev == TDA19988)
+		reg_set(encoder, REG_TX4, TX4_PD_RAM);
 	dev_warn(encoder->dev->dev, "failed to read EDID\n");
 	kfree(block);
 	return NULL;
@@ -728,12 +1055,14 @@ static int
 tda998x_encoder_get_modes(struct drm_encoder *encoder,
 			 struct drm_connector *connector)
 {
+	struct tda998x_priv *priv = to_tda998x_priv(encoder);
 	struct edid *edid = (struct edid *)do_get_edid(encoder);
 	int n = 0;
 
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		n = drm_add_edid_modes(connector, edid);
+		priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
 		kfree(edid);
 	}
 
@@ -807,6 +1136,10 @@ tda998x_encoder_init(struct i2c_client *client,
 	if (!priv)
 		return -ENOMEM;
 
+	priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
+	priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
+	priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+
 	priv->current_page = 0;
 	priv->cec = i2c_new_dummy(client->adapter, 0x34);
 	priv->dpms = DRM_MODE_DPMS_OFF;

+ 1 - 2
drivers/gpu/drm/i810/i810_dma.c

@@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = {
 	.release = drm_release,
 	.unlocked_ioctl = drm_ioctl,
 	.mmap = i810_mmap_buffers,
-	.fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = drm_compat_ioctl,
 #endif
@@ -1241,7 +1240,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
 	return 0;
 }
 
-struct drm_ioctl_desc i810_ioctls[] = {
+const struct drm_ioctl_desc i810_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),

+ 1 - 2
drivers/gpu/drm/i810/i810_drv.c

@@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = {
 	.unlocked_ioctl = drm_ioctl,
 	.mmap = drm_mmap,
 	.poll = drm_poll,
-	.fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = drm_compat_ioctl,
 #endif
@@ -58,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
 
 static struct drm_driver driver = {
 	.driver_features =
-	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
 	    DRIVER_HAVE_DMA,
 	.dev_priv_size = sizeof(drm_i810_buf_priv_t),
 	.load = i810_driver_load,

+ 1 - 1
drivers/gpu/drm/i810/i810_drv.h

@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
 extern int i810_driver_device_is_agp(struct drm_device *dev);
 
 extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i810_ioctls[];
+extern const struct drm_ioctl_desc i810_ioctls[];
 extern int i810_max_ioctl;
 
 #define I810_BASE(reg)		((unsigned long) \

+ 2 - 0
drivers/gpu/drm/i915/Makefile

@@ -5,6 +5,7 @@
 ccflags-y := -Iinclude/drm
 i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  i915_debugfs.o \
+	  i915_gpu_error.o \
           i915_suspend.o \
 	  i915_gem.o \
 	  i915_gem_context.o \
@@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  intel_sprite.o \
 	  intel_opregion.o \
 	  intel_sideband.o \
+	  intel_uncore.o \
 	  dvo_ch7xxx.o \
 	  dvo_ch7017.o \
 	  dvo_ivch.o \

+ 1 - 1
drivers/gpu/drm/i915/dvo_ch7xxx.c

@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
 		idf |= CH7xxx_IDF_HSP;
 
 	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-		idf |= CH7xxx_IDF_HSP;
+		idf |= CH7xxx_IDF_VSP;
 
 	ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
 }

File diff suppressed because it is too large
+ 183 - 479
drivers/gpu/drm/i915/i915_debugfs.c


+ 79 - 62
drivers/gpu/drm/i915/i915_dma.c

@@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_LLC:
 		value = HAS_LLC(dev);
 		break;
+	case I915_PARAM_HAS_WT:
+		value = HAS_WT(dev);
+		break;
 	case I915_PARAM_HAS_ALIASING_PPGTT:
 		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
 		break;
@@ -1293,7 +1296,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 	intel_register_dsm_handler();
 
-	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
+	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
 	if (ret)
 		goto cleanup_vga_client;
 
@@ -1323,10 +1326,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	/* Always safe in the mode setting case. */
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
 	dev->vblank_disable_allowed = 1;
-	if (INTEL_INFO(dev)->num_pipes == 0) {
-		dev_priv->mm.suspended = 0;
+	if (INTEL_INFO(dev)->num_pipes == 0)
 		return 0;
-	}
 
 	ret = intel_fbdev_init(dev);
 	if (ret)
@@ -1352,9 +1353,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 	drm_kms_helper_poll_init(dev);
 
-	/* We're off and running w/KMS */
-	dev_priv->mm.suspended = 0;
-
 	return 0;
 
 cleanup_gem:
@@ -1363,7 +1361,7 @@ cleanup_gem:
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
 	i915_gem_cleanup_aliasing_ppgtt(dev);
-	drm_mm_takedown(&dev_priv->mm.gtt_space);
+	drm_mm_takedown(&dev_priv->gtt.base.mm);
 cleanup_irq:
 	drm_irq_uninstall(dev);
 cleanup_gem_stolen:
@@ -1440,22 +1438,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 #undef SEP_COMMA
 }
 
-/**
- * intel_early_sanitize_regs - clean up BIOS state
- * @dev: DRM device
- *
- * This function must be called before we do any I915_READ or I915_WRITE. Its
- * purpose is to clean up any state left by the BIOS that may affect us when
- * reading and/or writing registers.
- */
-static void intel_early_sanitize_regs(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (HAS_FPGA_DBG_UNCLAIMED(dev))
-		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1497,15 +1479,31 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
-	spin_lock_init(&dev_priv->rps.lock);
-	spin_lock_init(&dev_priv->gt_lock);
 	spin_lock_init(&dev_priv->backlight.lock);
+	spin_lock_init(&dev_priv->uncore.lock);
+	spin_lock_init(&dev_priv->mm.object_stat_lock);
 	mutex_init(&dev_priv->dpio_lock);
 	mutex_init(&dev_priv->rps.hw_lock);
 	mutex_init(&dev_priv->modeset_restore_lock);
 
+	mutex_init(&dev_priv->pc8.lock);
+	dev_priv->pc8.requirements_met = false;
+	dev_priv->pc8.gpu_idle = false;
+	dev_priv->pc8.irqs_disabled = false;
+	dev_priv->pc8.enabled = false;
+	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+	INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+
 	i915_dump_device_info(dev_priv);
 
+	/* Not all pre-production machines fall into this category, only the
+	 * very first ones. Almost everything should work, except for maybe
+	 * suspend/resume. And we don't implement workarounds that affect only
+	 * pre-production machines. */
+	if (IS_HSW_EARLY_SDV(dev))
+		DRM_INFO("This is an early pre-production Haswell machine. "
+			 "It may not be fully functional.\n");
+
 	if (i915_get_bridge_dev(dev)) {
 		ret = -EIO;
 		goto free_priv;
@@ -1531,7 +1529,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		goto put_bridge;
 	}
 
-	intel_early_sanitize_regs(dev);
+	intel_uncore_early_sanitize(dev);
+
+	if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
+		/* The docs do not explain exactly how the calculation can be
+		 * made. It is somewhat guessable, but for now, it's always
+		 * 128MB.
+		 * NB: We can't write IDICR yet because we do not have gt funcs
+		 * set up */
+		dev_priv->ellc_size = 128;
+		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+	}
 
 	ret = i915_gem_gtt_init(dev);
 	if (ret)
@@ -1567,8 +1575,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		goto out_rmmap;
 	}
 
-	dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
-						 aperture_size);
+	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+					      aperture_size);
 
 	/* The i915 workqueue is primarily used for batched retirement of
 	 * requests (and thus managing bo) once the task has been completed
@@ -1595,8 +1603,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	intel_irq_init(dev);
 	intel_pm_init(dev);
-	intel_gt_sanitize(dev);
-	intel_gt_init(dev);
+	intel_uncore_sanitize(dev);
+	intel_uncore_init(dev);
 
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	intel_setup_mchbar(dev);
@@ -1631,9 +1639,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 			goto out_gem_unload;
 	}
 
-	/* Start out suspended */
-	dev_priv->mm.suspended = 1;
-
 	if (HAS_POWER_WELL(dev))
 		i915_init_power_well(dev);
 
@@ -1643,6 +1648,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 			DRM_ERROR("failed to init modeset\n");
 			goto out_gem_unload;
 		}
+	} else {
+		/* Start out suspended in ums mode. */
+		dev_priv->ums.mm_suspended = 1;
 	}
 
 	i915_setup_sysfs(dev);
@@ -1669,9 +1677,9 @@ out_gem_unload:
 	intel_teardown_mchbar(dev);
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
-	arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+	arch_phys_wc_del(dev_priv->gtt.mtrr);
 	io_mapping_free(dev_priv->gtt.mappable);
-	dev_priv->gtt.gtt_remove(dev);
+	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 out_rmmap:
 	pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
@@ -1688,8 +1696,13 @@ int i915_driver_unload(struct drm_device *dev)
 
 	intel_gpu_ips_teardown();
 
-	if (HAS_POWER_WELL(dev))
+	if (HAS_POWER_WELL(dev)) {
+		/* The i915.ko module is still not prepared to be loaded when
+		 * the power well is not enabled, so just enable it in case
+		 * we're going to unload/reload. */
+		intel_set_power_well(dev, true);
 		i915_remove_power_well(dev);
+	}
 
 	i915_teardown_sysfs(dev);
 
@@ -1707,7 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
 	io_mapping_free(dev_priv->gtt.mappable);
-	arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+	arch_phys_wc_del(dev_priv->gtt.mtrr);
 
 	acpi_video_unregister();
 
@@ -1735,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev)
 	cancel_work_sync(&dev_priv->gpu_error.work);
 	i915_destroy_error_state(dev);
 
+	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
+
 	if (dev->pdev->msi_enabled)
 		pci_disable_msi(dev->pdev);
 
@@ -1756,7 +1771,9 @@ int i915_driver_unload(struct drm_device *dev)
 			i915_free_hws(dev);
 	}
 
-	drm_mm_takedown(&dev_priv->mm.gtt_space);
+	list_del(&dev_priv->gtt.base.global_link);
+	WARN_ON(!list_empty(&dev_priv->vm_list));
+	drm_mm_takedown(&dev_priv->gtt.base.mm);
 	if (dev_priv->regs != NULL)
 		pci_iounmap(dev->pdev, dev_priv->regs);
 
@@ -1766,7 +1783,7 @@ int i915_driver_unload(struct drm_device *dev)
 	destroy_workqueue(dev_priv->wq);
 	pm_qos_remove_request(&dev_priv->pm_qos);
 
-	dev_priv->gtt.gtt_remove(dev);
+	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
 	if (dev_priv->slab)
 		kmem_cache_destroy(dev_priv->slab);
@@ -1842,14 +1859,14 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
 	kfree(file_priv);
 }
 
-struct drm_ioctl_desc i915_ioctls[] = {
+const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -1862,35 +1879,35 @@ struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

Some files were not shown because too many files changed in this diff