Browse Source

Merge branch 'drm-next' of ../main_line/linux-drm into dave-drm-next

Inki Dae 13 years ago
parent
commit
e728519ea8
100 changed files with 16642 additions and 3803 deletions
  1. 2 2
      MAINTAINERS
  2. 3 0
      arch/x86/pci/fixup.c
  3. 4 0
      drivers/char/agp/intel-agp.c
  4. 6 0
      drivers/gpu/drm/Kconfig
  5. 3 0
      drivers/gpu/drm/Makefile
  6. 16 0
      drivers/gpu/drm/ast/Kconfig
  7. 9 0
      drivers/gpu/drm/ast/Makefile
  8. 144 0
      drivers/gpu/drm/ast/ast_dram_tables.h
  9. 244 0
      drivers/gpu/drm/ast/ast_drv.c
  10. 356 0
      drivers/gpu/drm/ast/ast_drv.h
  11. 341 0
      drivers/gpu/drm/ast/ast_fb.c
  12. 527 0
      drivers/gpu/drm/ast/ast_main.c
  13. 1160 0
      drivers/gpu/drm/ast/ast_mode.c
  14. 1780 0
      drivers/gpu/drm/ast/ast_post.c
  15. 265 0
      drivers/gpu/drm/ast/ast_tables.h
  16. 453 0
      drivers/gpu/drm/ast/ast_ttm.c
  17. 12 0
      drivers/gpu/drm/cirrus/Kconfig
  18. 5 0
      drivers/gpu/drm/cirrus/Makefile
  19. 108 0
      drivers/gpu/drm/cirrus/cirrus_drv.c
  20. 246 0
      drivers/gpu/drm/cirrus/cirrus_drv.h
  21. 307 0
      drivers/gpu/drm/cirrus/cirrus_fbdev.c
  22. 335 0
      drivers/gpu/drm/cirrus/cirrus_main.c
  23. 629 0
      drivers/gpu/drm/cirrus/cirrus_mode.c
  24. 453 0
      drivers/gpu/drm/cirrus/cirrus_ttm.c
  25. 210 92
      drivers/gpu/drm/drm_crtc.c
  26. 3 1
      drivers/gpu/drm/drm_drv.c
  27. 3 3
      drivers/gpu/drm/drm_gem.c
  28. 4 0
      drivers/gpu/drm/drm_ioctl.c
  29. 8 10
      drivers/gpu/drm/drm_vm.c
  30. 2 1
      drivers/gpu/drm/gma500/Makefile
  31. 59 1
      drivers/gpu/drm/gma500/cdv_device.c
  32. 144 177
      drivers/gpu/drm/gma500/cdv_intel_display.c
  33. 11 6
      drivers/gpu/drm/gma500/framebuffer.c
  34. 3 9
      drivers/gpu/drm/gma500/gtt.c
  35. 3 21
      drivers/gpu/drm/gma500/intel_bios.c
  36. 153 296
      drivers/gpu/drm/gma500/mdfld_device.c
  37. 9 15
      drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
  38. 102 228
      drivers/gpu/drm/gma500/mdfld_intel_display.c
  39. 60 74
      drivers/gpu/drm/gma500/oaktrail_crtc.c
  40. 91 35
      drivers/gpu/drm/gma500/oaktrail_device.c
  41. 31 29
      drivers/gpu/drm/gma500/oaktrail_hdmi.c
  42. 6 12
      drivers/gpu/drm/gma500/opregion.c
  43. 21 1
      drivers/gpu/drm/gma500/opregion.h
  44. 64 0
      drivers/gpu/drm/gma500/psb_device.c
  45. 17 42
      drivers/gpu/drm/gma500/psb_drv.c
  46. 63 111
      drivers/gpu/drm/gma500/psb_drv.h
  47. 121 165
      drivers/gpu/drm/gma500/psb_intel_display.c
  48. 1 2
      drivers/gpu/drm/gma500/psb_intel_sdvo.c
  49. 2 0
      drivers/gpu/drm/i915/Makefile
  50. 164 115
      drivers/gpu/drm/i915/i915_debugfs.c
  51. 249 810
      drivers/gpu/drm/i915/i915_dma.c
  52. 103 62
      drivers/gpu/drm/i915/i915_drv.c
  53. 64 69
      drivers/gpu/drm/i915/i915_drv.h
  54. 224 247
      drivers/gpu/drm/i915/i915_gem.c
  55. 0 16
      drivers/gpu/drm/i915/i915_gem_debug.c
  56. 17 21
      drivers/gpu/drm/i915/i915_gem_evict.c
  57. 17 10
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  58. 1 1
      drivers/gpu/drm/i915/i915_gem_gtt.c
  59. 202 0
      drivers/gpu/drm/i915/i915_gem_stolen.c
  60. 15 3
      drivers/gpu/drm/i915/i915_gem_tiling.c
  61. 187 520
      drivers/gpu/drm/i915/i915_irq.c
  62. 57 13
      drivers/gpu/drm/i915/i915_reg.h
  63. 1 7
      drivers/gpu/drm/i915/i915_suspend.c
  64. 5 1
      drivers/gpu/drm/i915/intel_crt.c
  65. 755 0
      drivers/gpu/drm/i915/intel_ddi.c
  66. 479 164
      drivers/gpu/drm/i915/intel_display.c
  67. 13 6
      drivers/gpu/drm/i915/intel_dp.c
  68. 46 11
      drivers/gpu/drm/i915/intel_drv.h
  69. 213 71
      drivers/gpu/drm/i915/intel_hdmi.c
  70. 4 0
      drivers/gpu/drm/i915/intel_i2c.c
  71. 38 29
      drivers/gpu/drm/i915/intel_opregion.c
  72. 112 97
      drivers/gpu/drm/i915/intel_overlay.c
  73. 744 23
      drivers/gpu/drm/i915/intel_pm.c
  74. 102 68
      drivers/gpu/drm/i915/intel_ringbuffer.c
  75. 11 10
      drivers/gpu/drm/i915/intel_ringbuffer.h
  76. 17 13
      drivers/gpu/drm/i915/intel_sdvo.c
  77. 17 12
      drivers/gpu/drm/i915/intel_sprite.c
  78. 1 4
      drivers/gpu/drm/i915/intel_tv.c
  79. 15 0
      drivers/gpu/drm/mgag200/Kconfig
  80. 5 0
      drivers/gpu/drm/mgag200/Makefile
  81. 116 0
      drivers/gpu/drm/mgag200/mgag200_drv.c
  82. 276 0
      drivers/gpu/drm/mgag200/mgag200_drv.h
  83. 294 0
      drivers/gpu/drm/mgag200/mgag200_fb.c
  84. 156 0
      drivers/gpu/drm/mgag200/mgag200_i2c.c
  85. 388 0
      drivers/gpu/drm/mgag200/mgag200_main.c
  86. 1533 0
      drivers/gpu/drm/mgag200/mgag200_mode.c
  87. 661 0
      drivers/gpu/drm/mgag200/mgag200_reg.h
  88. 452 0
      drivers/gpu/drm/mgag200/mgag200_ttm.c
  89. 7 3
      drivers/gpu/drm/nouveau/nouveau_state.c
  90. 3 2
      drivers/gpu/drm/radeon/Makefile
  91. 14 2
      drivers/gpu/drm/radeon/atombios_encoders.c
  92. 0 5
      drivers/gpu/drm/radeon/evergreen.c
  93. 0 1
      drivers/gpu/drm/radeon/evergreen_blit_kms.c
  94. 5 5
      drivers/gpu/drm/radeon/evergreen_cs.c
  95. 216 0
      drivers/gpu/drm/radeon/evergreen_hdmi.c
  96. 0 5
      drivers/gpu/drm/radeon/ni.c
  97. 21 19
      drivers/gpu/drm/radeon/r100.c
  98. 1 1
      drivers/gpu/drm/radeon/r200.c
  99. 2 2
      drivers/gpu/drm/radeon/r300.c
  100. 20 22
      drivers/gpu/drm/radeon/r600.c

+ 2 - 2
MAINTAINERS

@@ -2373,10 +2373,10 @@ F:	drivers/gpu/drm/
 F:	include/drm/
 
 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
-M:	Keith Packard <keithp@keithp.com>
+M:	Daniel Vetter <daniel.vetter@ffwll.ch>
 L:	intel-gfx@lists.freedesktop.org (subscribers-only)
 L:	dri-devel@lists.freedesktop.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux.git
+T:	git git://people.freedesktop.org/~danvet/drm-intel
 S:	Supported
 F:	drivers/gpu/drm/i915
 F:	include/drm/i915*

+ 3 - 0
arch/x86/pci/fixup.c

@@ -6,6 +6,7 @@
 #include <linux/dmi.h>
 #include <linux/pci.h>
 #include <linux/init.h>
+#include <linux/vgaarb.h>
 #include <asm/pci_x86.h>
 
 static void __devinit pci_fixup_i450nx(struct pci_dev *d)
@@ -348,6 +349,8 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
 	if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
 		pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
 		dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
+		if (!vga_default_device())
+			vga_set_default_device(pdev);
 	}
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,

+ 4 - 0
drivers/char/agp/intel-agp.c

@@ -908,6 +908,10 @@ static struct pci_device_id agp_intel_pci_table[] = {
 	ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
 	ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
 	ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
+	ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
+	ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
+	ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
+	ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
 	{ }
 };
 

+ 6 - 0
drivers/gpu/drm/Kconfig

@@ -186,3 +186,9 @@ source "drivers/gpu/drm/vmwgfx/Kconfig"
 source "drivers/gpu/drm/gma500/Kconfig"
 
 source "drivers/gpu/drm/udl/Kconfig"
+
+source "drivers/gpu/drm/ast/Kconfig"
+
+source "drivers/gpu/drm/mgag200/Kconfig"
+
+source "drivers/gpu/drm/cirrus/Kconfig"

+ 3 - 0
drivers/gpu/drm/Makefile

@@ -34,6 +34,8 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
 obj-$(CONFIG_DRM_MGA)	+= mga/
 obj-$(CONFIG_DRM_I810)	+= i810/
 obj-$(CONFIG_DRM_I915)  += i915/
+obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
 obj-$(CONFIG_DRM_SIS)   += sis/
 obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
@@ -42,4 +44,5 @@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
 obj-$(CONFIG_DRM_EXYNOS) +=exynos/
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_AST) += ast/
 obj-y			+= i2c/

+ 16 - 0
drivers/gpu/drm/ast/Kconfig

@@ -0,0 +1,16 @@
+config DRM_AST
+	tristate "AST server chips"
+	depends on DRM && PCI && EXPERIMENTAL
+	select DRM_TTM
+	select FB_SYS_COPYAREA
+	select FB_SYS_FILLRECT
+	select FB_SYS_IMAGEBLIT
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	help
+	 Say yes for experimental AST GPU driver. Do not enable
+	 this driver without having a working -modesetting,
+	 and a version of AST that knows to fail if KMS
+	 is bound to the driver. These GPUs are commonly found
+	 in server chipsets.
+

+ 9 - 0
drivers/gpu/drm/ast/Makefile

@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
+
+obj-$(CONFIG_DRM_AST) := ast.o

+ 144 - 0
drivers/gpu/drm/ast/ast_dram_tables.h

@@ -0,0 +1,144 @@
+#ifndef AST_DRAM_TABLES_H
+#define AST_DRAM_TABLES_H
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+	u16 index;
+	u32 data;
+};
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+	{ 0x0108, 0x00000000 },
+	{ 0x0120, 0x00004a21 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0000, 0xFFFFFFFF },
+	{ 0x0004, 0x00000089 },
+	{ 0x0008, 0x22331353 },
+	{ 0x000C, 0x0d07000b },
+	{ 0x0010, 0x11113333 },
+	{ 0x0020, 0x00110350 },
+	{ 0x0028, 0x1e0828f0 },
+	{ 0x0024, 0x00000001 },
+	{ 0x001C, 0x00000000 },
+	{ 0x0014, 0x00000003 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0018, 0x00000131 },
+	{ 0x0014, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0018, 0x00000031 },
+	{ 0x0014, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0028, 0x1e0828f1 },
+	{ 0x0024, 0x00000003 },
+	{ 0x002C, 0x1f0f28fb },
+	{ 0x0030, 0xFFFFFE01 },
+	{ 0xFFFF, 0xFFFFFFFF }
+};
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+	{ 0x2000, 0x1688a8a8 },
+	{ 0x2020, 0x000041f0 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0000, 0xfc600309 },
+	{ 0x006C, 0x00909090 },
+	{ 0x0064, 0x00050000 },
+	{ 0x0004, 0x00000585 },
+	{ 0x0008, 0x0011030f },
+	{ 0x0010, 0x22201724 },
+	{ 0x0018, 0x1e29011a },
+	{ 0x0020, 0x00c82222 },
+	{ 0x0014, 0x01001523 },
+	{ 0x001C, 0x1024010d },
+	{ 0x0024, 0x00cb2522 },
+	{ 0x0038, 0xffffff82 },
+	{ 0x003C, 0x00000000 },
+	{ 0x0040, 0x00000000 },
+	{ 0x0044, 0x00000000 },
+	{ 0x0048, 0x00000000 },
+	{ 0x004C, 0x00000000 },
+	{ 0x0050, 0x00000000 },
+	{ 0x0054, 0x00000000 },
+	{ 0x0058, 0x00000000 },
+	{ 0x005C, 0x00000000 },
+	{ 0x0060, 0x032aa02a },
+	{ 0x0064, 0x002d3000 },
+	{ 0x0068, 0x00000000 },
+	{ 0x0070, 0x00000000 },
+	{ 0x0074, 0x00000000 },
+	{ 0x0078, 0x00000000 },
+	{ 0x007C, 0x00000000 },
+	{ 0x0034, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x002C, 0x00000732 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000005 },
+	{ 0x0028, 0x00000007 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0028, 0x00000001 },
+	{ 0x000C, 0x00005a08 },
+	{ 0x002C, 0x00000632 },
+	{ 0x0028, 0x00000001 },
+	{ 0x0030, 0x000003c0 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000003 },
+	{ 0x000C, 0x00005a21 },
+	{ 0x0034, 0x00007c03 },
+	{ 0x0120, 0x00004c41 },
+	{ 0xffff, 0xffffffff },
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+	{ 0x2000, 0x1688a8a8 },
+	{ 0x2020, 0x00004120 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0000, 0xfc600309 },
+	{ 0x006C, 0x00909090 },
+	{ 0x0064, 0x00070000 },
+	{ 0x0004, 0x00000489 },
+	{ 0x0008, 0x0011030f },
+	{ 0x0010, 0x32302926 },
+	{ 0x0018, 0x274c0122 },
+	{ 0x0020, 0x00ce2222 },
+	{ 0x0014, 0x01001523 },
+	{ 0x001C, 0x1024010d },
+	{ 0x0024, 0x00cb2522 },
+	{ 0x0038, 0xffffff82 },
+	{ 0x003C, 0x00000000 },
+	{ 0x0040, 0x00000000 },
+	{ 0x0044, 0x00000000 },
+	{ 0x0048, 0x00000000 },
+	{ 0x004C, 0x00000000 },
+	{ 0x0050, 0x00000000 },
+	{ 0x0054, 0x00000000 },
+	{ 0x0058, 0x00000000 },
+	{ 0x005C, 0x00000000 },
+	{ 0x0060, 0x0f2aa02a },
+	{ 0x0064, 0x003f3005 },
+	{ 0x0068, 0x02020202 },
+	{ 0x0070, 0x00000000 },
+	{ 0x0074, 0x00000000 },
+	{ 0x0078, 0x00000000 },
+	{ 0x007C, 0x00000000 },
+	{ 0x0034, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x002C, 0x00000942 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000005 },
+	{ 0x0028, 0x00000007 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0028, 0x00000001 },
+	{ 0x000C, 0x00005a08 },
+	{ 0x002C, 0x00000842 },
+	{ 0x0028, 0x00000001 },
+	{ 0x0030, 0x000003c0 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000003 },
+	{ 0x000C, 0x00005a21 },
+	{ 0x0034, 0x00007c03 },
+	{ 0x0120, 0x00005061 },
+	{ 0xffff, 0xffffffff },
+};
+
+#endif

+ 244 - 0
drivers/gpu/drm/ast/ast_drv.c

@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_drv.h"
+
+int ast_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, ast_modeset, int, 0400);
+
+#define PCI_VENDOR_ASPEED 0x1a03
+
+static struct drm_driver driver;
+
+#define AST_VGA_DEVICE(id, info) {		\
+	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
+	.class_mask = 0xff0000,			\
+	.vendor = PCI_VENDOR_ASPEED,			\
+	.device = id,				\
+	.subvendor = PCI_ANY_ID,		\
+	.subdevice = PCI_ANY_ID,		\
+	.driver_data = (unsigned long) info }
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+	AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+	/*	AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
+	{0, 0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+ast_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+
+
+static int ast_drm_freeze(struct drm_device *dev)
+{
+	drm_kms_helper_poll_disable(dev);
+
+	pci_save_state(dev->pdev);
+
+	console_lock();
+	ast_fbdev_set_suspend(dev, 1);
+	console_unlock();
+	return 0;
+}
+
+static int ast_drm_thaw(struct drm_device *dev)
+{
+	int error = 0;
+
+	ast_post_gpu(dev);
+
+	drm_mode_config_reset(dev);
+	mutex_lock(&dev->mode_config.mutex);
+	drm_helper_resume_force_mode(dev);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	console_lock();
+	ast_fbdev_set_suspend(dev, 0);
+	console_unlock();
+	return error;
+}
+
+static int ast_drm_resume(struct drm_device *dev)
+{
+	int ret;
+
+	if (pci_enable_device(dev->pdev))
+		return -EIO;
+
+	ret = ast_drm_thaw(dev);
+	if (ret)
+		return ret;
+
+	drm_kms_helper_poll_enable(dev);
+	return 0;
+}
+
+static int ast_pm_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+	int error;
+
+	error = ast_drm_freeze(ddev);
+	if (error)
+		return error;
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+	return 0;
+}
+static int ast_pm_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+	return ast_drm_resume(ddev);
+}
+
+static int ast_pm_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+
+	if (!ddev || !ddev->dev_private)
+		return -ENODEV;
+	return ast_drm_freeze(ddev);
+
+}
+
+static int ast_pm_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+	return ast_drm_thaw(ddev);
+}
+
+static int ast_pm_poweroff(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+
+	return ast_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops ast_pm_ops = {
+	.suspend = ast_pm_suspend,
+	.resume = ast_pm_resume,
+	.freeze = ast_pm_freeze,
+	.thaw = ast_pm_thaw,
+	.poweroff = ast_pm_poweroff,
+	.restore = ast_pm_resume,
+};
+
+static struct pci_driver ast_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = ast_pci_probe,
+	.remove = ast_pci_remove,
+	.driver.pm = &ast_pm_ops,
+};
+
+static const struct file_operations ast_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = ast_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+	.dev_priv_size = 0,
+
+	.load = ast_driver_load,
+	.unload = ast_driver_unload,
+
+	.fops = &ast_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+
+	.gem_init_object = ast_gem_init_object,
+	.gem_free_object = ast_gem_free_object,
+	.dumb_create = ast_dumb_create,
+	.dumb_map_offset = ast_dumb_mmap_offset,
+	.dumb_destroy = ast_dumb_destroy,
+
+};
+
+static int __init ast_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && ast_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (ast_modeset == 0)
+		return -EINVAL;
+	return drm_pci_init(&driver, &ast_pci_driver);
+}
+static void __exit ast_exit(void)
+{
+	drm_pci_exit(&driver, &ast_pci_driver);
+}
+
+module_init(ast_init);
+module_exit(ast_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+

+ 356 - 0
drivers/gpu/drm/ast/ast_drv.h

@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#ifndef __AST_DRV_H__
+#define __AST_DRV_H__
+
+#include "drm_fb_helper.h"
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#define DRIVER_AUTHOR		"Dave Airlie"
+
+#define DRIVER_NAME		"ast"
+#define DRIVER_DESC		"AST"
+#define DRIVER_DATE		"20120228"
+
+#define DRIVER_MAJOR		0
+#define DRIVER_MINOR		1
+#define DRIVER_PATCHLEVEL	0
+
+#define PCI_CHIP_AST2000 0x2000
+#define PCI_CHIP_AST2100 0x2010
+#define PCI_CHIP_AST1180 0x1180
+
+
+enum ast_chip {
+	AST2000,
+	AST2100,
+	AST1100,
+	AST2200,
+	AST2150,
+	AST2300,
+	AST1180,
+};
+
+#define AST_DRAM_512Mx16 0
+#define AST_DRAM_1Gx16   1
+#define AST_DRAM_512Mx32 2
+#define AST_DRAM_1Gx32   3
+#define AST_DRAM_2Gx16   6
+#define AST_DRAM_4Gx16   7
+
+struct ast_fbdev;
+
+struct ast_private {
+	struct drm_device *dev;
+
+	void __iomem *regs;
+	void __iomem *ioregs;
+
+	enum ast_chip chip;
+	bool vga2_clone;
+	uint32_t dram_bus_width;
+	uint32_t dram_type;
+	uint32_t mclk;
+	uint32_t vram_size;
+
+	struct ast_fbdev *fbdev;
+
+	int fb_mtrr;
+
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+		atomic_t validate_sequence;
+	} ttm;
+
+	struct drm_gem_object *cursor_cache;
+	uint64_t cursor_cache_gpu_addr;
+	struct ttm_bo_kmap_obj cache_kmap;
+	int next_cursor;
+};
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags);
+int ast_driver_unload(struct drm_device *dev);
+
+struct ast_gem_object;
+
+#define AST_IO_AR_PORT_WRITE		(0x40)
+#define AST_IO_MISC_PORT_WRITE		(0x42)
+#define AST_IO_SEQ_PORT			(0x44)
+#define AST_DAC_INDEX_READ		(0x3c7)
+#define AST_IO_DAC_INDEX_WRITE		(0x48)
+#define AST_IO_DAC_DATA		        (0x49)
+#define AST_IO_GR_PORT			(0x4E)
+#define AST_IO_CRTC_PORT		(0x54)
+#define AST_IO_INPUT_STATUS1_READ	(0x5A)
+#define AST_IO_MISC_PORT_READ		(0x4C)
+
+#define __ast_read(x) \
+static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->regs + reg); \
+return val;\
+}
+
+__ast_read(8);
+__ast_read(16);
+__ast_read(32)
+
+#define __ast_io_read(x) \
+static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->ioregs + reg); \
+return val;\
+}
+
+__ast_io_read(8);
+__ast_io_read(16);
+__ast_io_read(32);
+
+#define __ast_write(x) \
+static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+	iowrite##x(val, ast->regs + reg);\
+	}
+
+__ast_write(8);
+__ast_write(16);
+__ast_write(32);
+
+#define __ast_io_write(x) \
+static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+	iowrite##x(val, ast->ioregs + reg);\
+	}
+
+__ast_io_write(8);
+__ast_io_write(16);
+#undef __ast_io_write
+
+static inline void ast_set_index_reg(struct ast_private *ast,
+				     uint32_t base, uint8_t index,
+				     uint8_t val)
+{
+	ast_io_write16(ast, base, ((u16)val << 8) | index);
+}
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+			    uint32_t base, uint8_t index,
+			    uint8_t mask, uint8_t val);
+uint8_t ast_get_index_reg(struct ast_private *ast,
+			  uint32_t base, uint8_t index);
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+			       uint32_t base, uint8_t index, uint8_t mask);
+
+static inline void ast_open_key(struct ast_private *ast)
+{
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+}
+
+#define AST_VIDMEM_SIZE_8M    0x00800000
+#define AST_VIDMEM_SIZE_16M   0x01000000
+#define AST_VIDMEM_SIZE_32M   0x02000000
+#define AST_VIDMEM_SIZE_64M   0x04000000
+#define AST_VIDMEM_SIZE_128M  0x08000000
+
+#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE                (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
+#define AST_HWC_SIGNATURE_SIZE      32
+
+#define AST_DEFAULT_HWC_NUM 2
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM  0x00
+#define AST_HWC_SIGNATURE_SizeX     0x04
+#define AST_HWC_SIGNATURE_SizeY     0x08
+#define AST_HWC_SIGNATURE_X         0x0C
+#define AST_HWC_SIGNATURE_Y         0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX  0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY  0x18
+
+
+struct ast_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+	struct drm_connector base;
+	struct ast_i2c_chan *i2c;
+};
+
+struct ast_crtc {
+	struct drm_crtc base;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	struct drm_gem_object *cursor_bo;
+	uint64_t cursor_addr;
+	int cursor_width, cursor_height;
+	u8 offset_x, offset_y;
+};
+
+struct ast_encoder {
+	struct drm_encoder base;
+};
+
+struct ast_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+struct ast_fbdev {
+	struct drm_fb_helper helper;
+	struct ast_framebuffer afb;
+	struct list_head fbdev_list;
+	void *sysram;
+	int size;
+	struct ttm_bo_kmap_obj mapping;
+};
+
+#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+#define to_ast_connector(x) container_of(x, struct ast_connector, base)
+#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
+#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
+
+struct ast_vbios_stdtable {
+	u8 misc;
+	u8 seq[4];
+	u8 crtc[25];
+	u8 ar[20];
+	u8 gr[9];
+};
+
+struct ast_vbios_enhtable {
+	u32 ht;
+	u32 hde;
+	u32 hfp;
+	u32 hsync;
+	u32 vt;
+	u32 vde;
+	u32 vfp;
+	u32 vsync;
+	u32 dclk_index;
+	u32 flags;
+	u32 refresh_rate;
+	u32 refresh_rate_index;
+	u32 mode_id;
+};
+
+struct ast_vbios_dclk_info {
+	u8 param1;
+	u8 param2;
+	u8 param3;
+};
+
+struct ast_vbios_mode_info {
+	struct ast_vbios_stdtable *std_table;
+	struct ast_vbios_enhtable *enh_table;
+};
+
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
+
+int ast_framebuffer_init(struct drm_device *dev,
+			 struct ast_framebuffer *ast_fb,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct drm_gem_object *obj);
+
+int ast_fbdev_init(struct drm_device *dev);
+void ast_fbdev_fini(struct drm_device *dev);
+void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+
+struct ast_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	u32 placements[3];
+	int pin_count;
+};
+#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
+
+static inline struct ast_bo *
+ast_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct ast_bo, bo);
+}
+
+
+#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
+
+#define AST_MM_ALIGN_SHIFT 4
+#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+
+extern int ast_dumb_create(struct drm_file *file,
+			   struct drm_device *dev,
+			   struct drm_mode_create_dumb *args);
+extern int ast_dumb_destroy(struct drm_file *file,
+			    struct drm_device *dev,
+			    uint32_t handle);
+
+extern int ast_gem_init_object(struct drm_gem_object *obj);
+extern void ast_gem_free_object(struct drm_gem_object *obj);
+extern int ast_dumb_mmap_offset(struct drm_file *file,
+				struct drm_device *dev,
+				uint32_t handle,
+				uint64_t *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+int ast_mm_init(struct ast_private *ast);
+void ast_mm_fini(struct ast_private *ast);
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct ast_bo **pastbo);
+
+int ast_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj);
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int ast_bo_unpin(struct ast_bo *bo);
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait);
+void ast_bo_unreserve(struct ast_bo *bo);
+void ast_ttm_placement(struct ast_bo *bo, int domain);
+int ast_bo_push_sysram(struct ast_bo *bo);
+int ast_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* ast post */
+void ast_post_gpu(struct drm_device *dev);
+#endif

+ 341 - 0
drivers/gpu/drm/ast/ast_fb.c

@@ -0,0 +1,341 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "ast_drv.h"
+
+static void ast_dirty_update(struct ast_fbdev *afbdev,
+			     int x, int y, int width, int height)
+{
+	int i;
+	struct drm_gem_object *obj;
+	struct ast_bo *bo;
+	int src_offset, dst_offset;
+	int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+	int ret;
+	bool unmap = false;
+
+	obj = afbdev->afb.obj;
+	bo = gem_to_ast_bo(obj);
+
+	ret = ast_bo_reserve(bo, true);
+	if (ret) {
+		DRM_ERROR("failed to reserve fb bo\n");
+		return;
+	}
+
+	if (!bo->kmap.virtual) {
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret) {
+			DRM_ERROR("failed to kmap fb updates\n");
+			ast_bo_unreserve(bo);
+			return;
+		}
+		unmap = true;
+	}
+	for (i = y; i < y + height; i++) {
+		/* assume equal stride for now */
+		src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+	}
+	if (unmap)
+		ttm_bo_kunmap(&bo->kmap);
+
+	ast_bo_unreserve(bo);
+}
+
+static void ast_fillrect(struct fb_info *info,
+			 const struct fb_fillrect *rect)
+{
+	struct ast_fbdev *afbdev = info->par;
+	sys_fillrect(info, rect);
+	ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+			 rect->height);
+}
+
+static void ast_copyarea(struct fb_info *info,
+			 const struct fb_copyarea *area)
+{
+	struct ast_fbdev *afbdev = info->par;
+	sys_copyarea(info, area);
+	ast_dirty_update(afbdev, area->dx, area->dy, area->width,
+			 area->height);
+}
+
+static void ast_imageblit(struct fb_info *info,
+			  const struct fb_image *image)
+{
+	struct ast_fbdev *afbdev = info->par;
+	sys_imageblit(info, image);
+	ast_dirty_update(afbdev, image->dx, image->dy, image->width,
+			 image->height);
+}
+
+static struct fb_ops astfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = ast_fillrect,
+	.fb_copyarea = ast_copyarea,
+	.fb_imageblit = ast_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int astfb_create_object(struct ast_fbdev *afbdev,
+			       struct drm_mode_fb_cmd2 *mode_cmd,
+			       struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = afbdev->helper.dev;
+	u32 bpp, depth;
+	u32 size;
+	struct drm_gem_object *gobj;
+
+	int ret = 0;
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	ret = ast_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+	return ret;
+}
+
+static int astfb_create(struct ast_fbdev *afbdev,
+			struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = afbdev->helper.dev;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_framebuffer *fb;
+	struct fb_info *info;
+	int size, ret;
+	struct device *device = &dev->pdev->dev;
+	void *sysram;
+	struct drm_gem_object *gobj = NULL;
+	struct ast_bo *bo = NULL;
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+	bo = gem_to_ast_bo(gobj);
+
+	sysram = vmalloc(size);
+	if (!sysram)
+		return -ENOMEM;
+
+	info = framebuffer_alloc(0, device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	info->par = afbdev;
+
+	ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
+	if (ret)
+		goto out;
+
+	afbdev->sysram = sysram;
+	afbdev->size = size;
+
+	fb = &afbdev->afb.base;
+	afbdev->helper.fb = fb;
+	afbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "astdrmfb");
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &astfb_ops;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+	info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	info->screen_base = sysram;
+	info->screen_size = size;
+
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+	DRM_DEBUG_KMS("allocated %dx%d\n",
+		      fb->width, fb->height);
+
+	return 0;
+out:
+	return ret;
+}
+
+static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			       u16 blue, int regno)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	ast_crtc->lut_r[regno] = red >> 8;
+	ast_crtc->lut_g[regno] = green >> 8;
+	ast_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			       u16 *blue, int regno)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	*red = ast_crtc->lut_r[regno] << 8;
+	*green = ast_crtc->lut_g[regno] << 8;
+	*blue = ast_crtc->lut_b[regno] << 8;
+}
+
+static int ast_find_or_create_single(struct drm_fb_helper *helper,
+					  struct drm_fb_helper_surface_size *sizes)
+{
+	struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	if (!helper->fb) {
+		ret = astfb_create(afbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
+static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+	.gamma_set = ast_fb_gamma_set,
+	.gamma_get = ast_fb_gamma_get,
+	.fb_probe = ast_find_or_create_single,
+};
+
+static void ast_fbdev_destroy(struct drm_device *dev,
+			      struct ast_fbdev *afbdev)
+{
+	struct fb_info *info;
+	struct ast_framebuffer *afb = &afbdev->afb;
+	if (afbdev->helper.fbdev) {
+		info = afbdev->helper.fbdev;
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (afb->obj) {
+		drm_gem_object_unreference_unlocked(afb->obj);
+		afb->obj = NULL;
+	}
+	drm_fb_helper_fini(&afbdev->helper);
+
+	vfree(afbdev->sysram);
+	drm_framebuffer_cleanup(&afb->base);
+}
+
+int ast_fbdev_init(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast_fbdev *afbdev;
+	int ret;
+
+	afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
+	if (!afbdev)
+		return -ENOMEM;
+
+	ast->fbdev = afbdev;
+	afbdev->helper.funcs = &ast_fb_helper_funcs;
+	ret = drm_fb_helper_init(dev, &afbdev->helper,
+				 1, 1);
+	if (ret) {
+		kfree(afbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+	drm_fb_helper_initial_config(&afbdev->helper, 32);
+	return 0;
+}
+
+void ast_fbdev_fini(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	if (!ast->fbdev)
+		return;
+
+	ast_fbdev_destroy(dev, ast->fbdev);
+	kfree(ast->fbdev);
+	ast->fbdev = NULL;
+}
+
+void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	if (!ast->fbdev)
+		return;
+
+	fb_set_suspend(ast->fbdev->helper.fbdev, state);
+}

+ 527 - 0
drivers/gpu/drm/ast/ast_main.c

@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+
+
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_dram_tables.h"
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+			    uint32_t base, uint8_t index,
+			    uint8_t mask, uint8_t val)
+{
+	u8 tmp;
+	ast_io_write8(ast, base, index);
+	tmp = (ast_io_read8(ast, base + 1) & mask) | val;
+	ast_set_index_reg(ast, base, index, tmp);
+}
+
+uint8_t ast_get_index_reg(struct ast_private *ast,
+			  uint32_t base, uint8_t index)
+{
+	uint8_t ret;
+	ast_io_write8(ast, base, index);
+	ret = ast_io_read8(ast, base + 1);
+	return ret;
+}
+
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+			       uint32_t base, uint8_t index, uint8_t mask)
+{
+	uint8_t ret;
+	ast_io_write8(ast, base, index);
+	ret = ast_io_read8(ast, base + 1) & mask;
+	return ret;
+}
+
+
+static int ast_detect_chip(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	if (dev->pdev->device == PCI_CHIP_AST1180) {
+		ast->chip = AST1100;
+		DRM_INFO("AST 1180 detected\n");
+	} else {
+		if (dev->pdev->revision >= 0x20) {
+			ast->chip = AST2300;
+			DRM_INFO("AST 2300 detected\n");
+		} else if (dev->pdev->revision >= 0x10) {
+			uint32_t data;
+			ast_write32(ast, 0xf004, 0x1e6e0000);
+			ast_write32(ast, 0xf000, 0x1);
+
+			data = ast_read32(ast, 0x1207c);
+			switch (data & 0x0300) {
+			case 0x0200:
+				ast->chip = AST1100;
+				DRM_INFO("AST 1100 detected\n");
+				break;
+			case 0x0100:
+				ast->chip = AST2200;
+				DRM_INFO("AST 2200 detected\n");
+				break;
+			case 0x0000:
+				ast->chip = AST2150;
+				DRM_INFO("AST 2150 detected\n");
+				break;
+			default:
+				ast->chip = AST2100;
+				DRM_INFO("AST 2100 detected\n");
+				break;
+			}
+			ast->vga2_clone = false;
+		} else {
+			ast->chip = 2000;
+			DRM_INFO("AST 2000 detected\n");
+		}
+	}
+	return 0;
+}
+
+static int ast_get_dram_info(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	uint32_t data, data2;
+	uint32_t denum, num, div, ref_pll;
+
+	ast_write32(ast, 0xf004, 0x1e6e0000);
+	ast_write32(ast, 0xf000, 0x1);
+
+
+	ast_write32(ast, 0x10000, 0xfc600309);
+
+	do {
+		;
+	} while (ast_read32(ast, 0x10000) != 0x01);
+	data = ast_read32(ast, 0x10004);
+
+	if (data & 0x400)
+		ast->dram_bus_width = 16;
+	else
+		ast->dram_bus_width = 32;
+
+	if (ast->chip == AST2300) {
+		switch (data & 0x03) {
+		case 0:
+			ast->dram_type = AST_DRAM_512Mx16;
+			break;
+		default:
+		case 1:
+			ast->dram_type = AST_DRAM_1Gx16;
+			break;
+		case 2:
+			ast->dram_type = AST_DRAM_2Gx16;
+			break;
+		case 3:
+			ast->dram_type = AST_DRAM_4Gx16;
+			break;
+		}
+	} else {
+		switch (data & 0x0c) {
+		case 0:
+		case 4:
+			ast->dram_type = AST_DRAM_512Mx16;
+			break;
+		case 8:
+			if (data & 0x40)
+				ast->dram_type = AST_DRAM_1Gx16;
+			else
+				ast->dram_type = AST_DRAM_512Mx32;
+			break;
+		case 0xc:
+			ast->dram_type = AST_DRAM_1Gx32;
+			break;
+		}
+	}
+
+	data = ast_read32(ast, 0x10120);
+	data2 = ast_read32(ast, 0x10170);
+	if (data2 & 0x2000)
+		ref_pll = 14318;
+	else
+		ref_pll = 12000;
+
+	denum = data & 0x1f;
+	num = (data & 0x3fe0) >> 5;
+	data = (data & 0xc000) >> 14;
+	switch (data) {
+	case 3:
+		div = 0x4;
+		break;
+	case 2:
+	case 1:
+		div = 0x2;
+		break;
+	default:
+		div = 0x1;
+		break;
+	}
+	ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+	return 0;
+}
+
+uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
+{
+	struct ast_private *ast = dev->dev_private;
+	uint32_t dclk, jreg;
+	uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
+
+	dram_bus_width = ast->dram_bus_width;
+	mclk = ast->mclk;
+
+	if (ast->chip == AST2100 ||
+	    ast->chip == AST1100 ||
+	    ast->chip == AST2200 ||
+	    ast->chip == AST2150 ||
+	    ast->dram_bus_width == 16)
+		dram_efficency = 600;
+	else if (ast->chip == AST2300)
+		dram_efficency = 400;
+
+	dram_bandwidth = mclk * dram_bus_width * 2 / 8;
+	actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
+
+	if (ast->chip == AST1180)
+		dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+	else {
+		jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+		if ((jreg & 0x08) && (ast->chip == AST2000))
+			dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
+		else if ((jreg & 0x08) && (bpp == 8))
+			dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
+		else
+			dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+	}
+
+	if (ast->chip == AST2100 ||
+	    ast->chip == AST2200 ||
+	    ast->chip == AST2300 ||
+	    ast->chip == AST1180) {
+		if (dclk > 200)
+			dclk = 200;
+	} else {
+		if (dclk > 165)
+			dclk = 165;
+	}
+
+	return dclk;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
+	if (ast_fb->obj)
+		drm_gem_object_unreference_unlocked(ast_fb->obj);
+
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+					      struct drm_file *file,
+					      unsigned int *handle)
+{
+	return -EINVAL;
+}
+
+static const struct drm_framebuffer_funcs ast_fb_funcs = {
+	.destroy = ast_user_framebuffer_destroy,
+	.create_handle = ast_user_framebuffer_create_handle,
+};
+
+
+int ast_framebuffer_init(struct drm_device *dev,
+			 struct ast_framebuffer *ast_fb,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct drm_gem_object *obj)
+{
+	int ret;
+
+	ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
+	if (ret) {
+		DRM_ERROR("framebuffer init failed %d\n", ret);
+		return ret;
+	}
+	drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+	ast_fb->obj = obj;
+	return 0;
+}
+
+static struct drm_framebuffer *
+ast_user_framebuffer_create(struct drm_device *dev,
+	       struct drm_file *filp,
+	       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct ast_framebuffer *ast_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
+	if (!ast_fb) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(obj);
+		kfree(ast_fb);
+		return ERR_PTR(ret);
+	}
+	return &ast_fb->base;
+}
+
+static const struct drm_mode_config_funcs ast_mode_funcs = {
+	.fb_create = ast_user_framebuffer_create,
+};
+
+static u32 ast_get_vram_info(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 jreg;
+
+	ast_open_key(ast);
+
+	jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
+	switch (jreg & 3) {
+	case 0: return AST_VIDMEM_SIZE_8M;
+	case 1: return AST_VIDMEM_SIZE_16M;
+	case 2: return AST_VIDMEM_SIZE_32M;
+	case 3: return AST_VIDMEM_SIZE_64M;
+	}
+	return AST_VIDMEM_DEFAULT_SIZE;
+}
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct ast_private *ast;
+	int ret = 0;
+
+	ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
+	if (!ast)
+		return -ENOMEM;
+
+	dev->dev_private = ast;
+	ast->dev = dev;
+
+	ast->regs = pci_iomap(dev->pdev, 1, 0);
+	if (!ast->regs) {
+		ret = -EIO;
+		goto out_free;
+	}
+	ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+	if (!ast->ioregs) {
+		ret = -EIO;
+		goto out_free;
+	}
+
+	ast_detect_chip(dev);
+
+	if (ast->chip != AST1180) {
+		ast_get_dram_info(dev);
+		ast->vram_size = ast_get_vram_info(dev);
+		DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+	}
+
+	ret = ast_mm_init(ast);
+	if (ret)
+		goto out_free;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.funcs = (void *)&ast_mode_funcs;
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.prefer_shadow = 1;
+
+	if (ast->chip == AST2100 ||
+	    ast->chip == AST2200 ||
+	    ast->chip == AST2300 ||
+	    ast->chip == AST1180) {
+		dev->mode_config.max_width = 1920;
+		dev->mode_config.max_height = 2048;
+	} else {
+		dev->mode_config.max_width = 1600;
+		dev->mode_config.max_height = 1200;
+	}
+
+	ret = ast_mode_init(dev);
+	if (ret)
+		goto out_free;
+
+	ret = ast_fbdev_init(dev);
+	if (ret)
+		goto out_free;
+
+	return 0;
+out_free:
+	kfree(ast);
+	dev->dev_private = NULL;
+	return ret;
+}
+
+int ast_driver_unload(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	ast_mode_fini(dev);
+	ast_fbdev_fini(dev);
+	drm_mode_config_cleanup(dev);
+
+	ast_mm_fini(ast);
+	pci_iounmap(dev->pdev, ast->ioregs);
+	pci_iounmap(dev->pdev, ast->regs);
+	kfree(ast);
+	return 0;
+}
+
+int ast_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj)
+{
+	struct ast_bo *astbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = ast_bo_create(dev, size, 0, 0, &astbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+	*obj = &astbo->gem;
+	return 0;
+}
+
+int ast_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	u32 handle;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = ast_gem_create(dev, args->size, false,
+			     &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+int ast_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int ast_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+	return 0;
+}
+
+void ast_bo_unref(struct ast_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+
+}
+void ast_gem_free_object(struct drm_gem_object *obj)
+{
+	struct ast_bo *ast_bo = gem_to_ast_bo(obj);
+
+	if (!ast_bo)
+		return;
+	ast_bo_unref(&ast_bo);
+}
+
+
+static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
+{
+	return bo->bo.addr_space_offset;
+}
+int
+ast_dumb_mmap_offset(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle,
+		     uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct ast_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_ast_bo(obj);
+	*offset = ast_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+}
+

+ 1160 - 0
drivers/gpu/drm/ast/ast_mode.c

@@ -0,0 +1,1160 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "ast_drv.h"
+
+#include "ast_tables.h"
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
+static int ast_cursor_set(struct drm_crtc *crtc,
+			  struct drm_file *file_priv,
+			  uint32_t handle,
+			  uint32_t width,
+			  uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+			   int x, int y);
+
+static inline void ast_load_palette_index(struct ast_private *ast,
+				     u8 index, u8 red, u8 green,
+				     u8 blue)
+{
+	ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+	ast_io_write8(ast, AST_IO_DAC_DATA, red);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+	ast_io_write8(ast, AST_IO_DAC_DATA, green);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+	ast_io_write8(ast, AST_IO_DAC_DATA, blue);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+}
+
+static void ast_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	int i;
+
+	if (!crtc->enabled)
+		return;
+
+	for (i = 0; i < 256; i++)
+		ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
+				       ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+}
+
+static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode,
+				    struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+	u32 hborder, vborder;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
+		color_index = VGAModeIndex - 1;
+		break;
+	case 16:
+		vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
+		color_index = HiCModeIndex;
+		break;
+	case 24:
+	case 32:
+		vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
+		color_index = TrueCModeIndex;
+		break;
+	default:
+		return false;
+	}
+
+	switch (crtc->mode.crtc_hdisplay) {
+	case 640:
+		vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+		break;
+	case 800:
+		vbios_mode->enh_table = &res_800x600[refresh_rate_index];
+		break;
+	case 1024:
+		vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
+		break;
+	case 1280:
+		if (crtc->mode.crtc_vdisplay == 800)
+			vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+		else
+			vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+		break;
+	case 1440:
+		vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+		break;
+	case 1600:
+		vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+		break;
+	case 1680:
+		vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+		break;
+	case 1920:
+		if (crtc->mode.crtc_vdisplay == 1080)
+			vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+		else
+			vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+		break;
+	default:
+		return false;
+	}
+
+	refresh_rate = drm_mode_vrefresh(mode);
+	while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
+		vbios_mode->enh_table++;
+		if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
+		    (vbios_mode->enh_table->refresh_rate == 0xff)) {
+			vbios_mode->enh_table--;
+			break;
+		}
+	}
+
+	hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+	vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+
+	adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+	adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+	adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+	adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
+		vbios_mode->enh_table->hfp;
+	adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
+					 vbios_mode->enh_table->hfp +
+					 vbios_mode->enh_table->hsync);
+
+	adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+	adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+	adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+	adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
+		vbios_mode->enh_table->vfp;
+	adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
+					 vbios_mode->enh_table->vfp +
+					 vbios_mode->enh_table->vsync);
+
+	refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+	mode_id = vbios_mode->enh_table->mode_id;
+
+	if (ast->chip == AST1180) {
+		/* TODO 1180 */
+	} else {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+	}
+
+	return true;
+
+
+}
+static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			    struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_vbios_stdtable *stdtable;
+	u32 i;
+	u8 jreg;
+
+	stdtable = vbios_mode->std_table;
+
+	jreg = stdtable->misc;
+	ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+
+	/* Set SEQ */
+	ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
+	for (i = 0; i < 4; i++) {
+		jreg = stdtable->seq[i];
+		if (!i)
+			jreg |= 0x20;
+		ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
+	}
+
+	/* Set CRTC */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+	for (i = 0; i < 25; i++)
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+
+	/* set AR */
+	jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+	for (i = 0; i < 20; i++) {
+		jreg = stdtable->ar[i];
+		ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
+		ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
+	}
+	ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
+	ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
+
+	jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+	ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
+
+	/* Set GR */
+	for (i = 0; i < 9; i++)
+		ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
+}
+
+static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			     struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
+	u16 temp;
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+
+	temp = (mode->crtc_htotal >> 3) - 5;
+	if (temp & 0x100)
+		jregAC |= 0x01; /* HT D[8] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
+
+	temp = (mode->crtc_hdisplay >> 3) - 1;
+	if (temp & 0x100)
+		jregAC |= 0x04; /* HDE D[8] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
+
+	temp = (mode->crtc_hblank_start >> 3) - 1;
+	if (temp & 0x100)
+		jregAC |= 0x10; /* HBS D[8] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
+
+	temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
+	if (temp & 0x20)
+		jreg05 |= 0x80;  /* HBE D[5] */
+	if (temp & 0x40)
+		jregAD |= 0x01;  /* HBE D[5] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
+
+	temp = (mode->crtc_hsync_start >> 3) - 1;
+	if (temp & 0x100)
+		jregAC |= 0x40; /* HRS D[5] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
+
+	temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
+	if (temp & 0x20)
+		jregAD |= 0x04; /* HRE D[5] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+
+	/* vert timings */
+	temp = (mode->crtc_vtotal) - 2;
+	if (temp & 0x100)
+		jreg07 |= 0x01;
+	if (temp & 0x200)
+		jreg07 |= 0x20;
+	if (temp & 0x400)
+		jregAE |= 0x01;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
+
+	temp = (mode->crtc_vsync_start) - 1;
+	if (temp & 0x100)
+		jreg07 |= 0x04;
+	if (temp & 0x200)
+		jreg07 |= 0x80;
+	if (temp & 0x400)
+		jregAE |= 0x08;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
+
+	temp = (mode->crtc_vsync_end - 1) & 0x3f;
+	if (temp & 0x10)
+		jregAE |= 0x20;
+	if (temp & 0x20)
+		jregAE |= 0x40;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
+
+	temp = mode->crtc_vdisplay - 1;
+	if (temp & 0x100)
+		jreg07 |= 0x02;
+	if (temp & 0x200)
+		jreg07 |= 0x40;
+	if (temp & 0x400)
+		jregAE |= 0x02;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
+
+	temp = mode->crtc_vblank_start - 1;
+	if (temp & 0x100)
+		jreg07 |= 0x08;
+	if (temp & 0x200)
+		jreg09 |= 0x20;
+	if (temp & 0x400)
+		jregAE |= 0x04;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
+
+	temp = mode->crtc_vblank_end - 1;
+	if (temp & 0x100)
+		jregAE |= 0x10;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
+}
+
+static void ast_set_offset_reg(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+
+	u16 offset;
+
+	offset = crtc->fb->pitches[0] >> 3;
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
+}
+
+static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+			     struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast_vbios_dclk_info *clk_info;
+
+	clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
+			       (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
+}
+
+static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			     struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		jregA0 = 0x70;
+		jregA3 = 0x01;
+		jregA8 = 0x00;
+		break;
+	case 15:
+	case 16:
+		jregA0 = 0x70;
+		jregA3 = 0x04;
+		jregA8 = 0x02;
+		break;
+	case 32:
+		jregA0 = 0x70;
+		jregA3 = 0x08;
+		jregA8 = 0x02;
+		break;
+	}
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+
+	/* Set Threshold */
+	if (ast->chip == AST2300) {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
+	} else if (ast->chip == AST2100 ||
+		   ast->chip == AST1100 ||
+		   ast->chip == AST2200 ||
+		   ast->chip == AST2150) {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
+	} else {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
+	}
+}
+
+void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+		      struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 jreg;
+
+	jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
+	jreg |= (vbios_mode->enh_table->flags & SyncNN);
+	ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+}
+
+bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		     struct ast_vbios_mode_info *vbios_mode)
+{
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u32 addr;
+
+	addr = offset >> 2;
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
+
+}
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+
+	if (ast->chip == AST1180)
+		return;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+		ast_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_OFF:
+		ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+		break;
+	}
+}
+
+static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+/* ast is different - we will force move buffers out of VRAM */
+static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, int atomic)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct ast_framebuffer *ast_fb;
+	struct ast_bo *bo;
+	int ret;
+	u64 gpu_addr;
+
+	/* push the previous fb to system ram */
+	if (!atomic && fb) {
+		ast_fb = to_ast_framebuffer(fb);
+		obj = ast_fb->obj;
+		bo = gem_to_ast_bo(obj);
+		ret = ast_bo_reserve(bo, false);
+		if (ret)
+			return ret;
+		ast_bo_push_sysram(bo);
+		ast_bo_unreserve(bo);
+	}
+
+	ast_fb = to_ast_framebuffer(crtc->fb);
+	obj = ast_fb->obj;
+	bo = gem_to_ast_bo(obj);
+
+	ret = ast_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		ast_bo_unreserve(bo);
+		return ret;
+	}
+
+	if (&ast->fbdev->afb == ast_fb) {
+		/* if pushing console in kmap it */
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret)
+			DRM_ERROR("failed to kmap fbcon\n");
+	}
+	ast_bo_unreserve(bo);
+
+	ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+	return 0;
+}
+
+static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			     struct drm_framebuffer *old_fb)
+{
+	return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int ast_crtc_mode_set(struct drm_crtc *crtc,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode,
+			     int x, int y,
+			     struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_vbios_mode_info vbios_mode;
+	bool ret;
+	if (ast->chip == AST1180) {
+		DRM_ERROR("AST 1180 modesetting not supported\n");
+		return -EINVAL;
+	}
+
+	ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
+	if (ret == false)
+		return -EINVAL;
+	ast_open_key(ast);
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+
+	ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+	ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+	ast_set_offset_reg(crtc);
+	ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
+	ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
+	ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
+	ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+
+	ast_crtc_mode_set_base(crtc, x, y, old_fb);
+
+	return 0;
+}
+
+static void ast_crtc_disable(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_prepare(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_commit(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+}
+
+
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+	.dpms = ast_crtc_dpms,
+	.mode_fixup = ast_crtc_mode_fixup,
+	.mode_set = ast_crtc_mode_set,
+	.mode_set_base = ast_crtc_mode_set_base,
+	.disable = ast_crtc_disable,
+	.load_lut = ast_crtc_load_lut,
+	.disable = ast_crtc_disable,
+	.prepare = ast_crtc_prepare,
+	.commit = ast_crtc_commit,
+
+};
+
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				 u16 *blue, uint32_t start, uint32_t size)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	int end = (start + size > 256) ? 256 : start + size, i;
+
+	/* userspace palettes are always correct as is */
+	for (i = start; i < end; i++) {
+		ast_crtc->lut_r[i] = red[i] >> 8;
+		ast_crtc->lut_g[i] = green[i] >> 8;
+		ast_crtc->lut_b[i] = blue[i] >> 8;
+	}
+	ast_crtc_load_lut(crtc);
+}
+
+
+static void ast_crtc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+static const struct drm_crtc_funcs ast_crtc_funcs = {
+	.cursor_set = ast_cursor_set,
+	.cursor_move = ast_cursor_move,
+	.reset = ast_crtc_reset,
+	.set_config = drm_crtc_helper_set_config,
+	.gamma_set = ast_crtc_gamma_set,
+	.destroy = ast_crtc_destroy,
+};
+
+int ast_crtc_init(struct drm_device *dev)
+{
+	struct ast_crtc *crtc;
+	int i;
+
+	crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
+	if (!crtc)
+		return -ENOMEM;
+
+	drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+	drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+	drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
+
+	for (i = 0; i < 256; i++) {
+		crtc->lut_r[i] = i;
+		crtc->lut_g[i] = i;
+		crtc->lut_b[i] = i;
+	}
+	return 0;
+}
+
+static void ast_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+
+static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+
+static const struct drm_encoder_funcs ast_enc_funcs = {
+	.destroy = ast_encoder_destroy,
+};
+
+static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool ast_mode_fixup(struct drm_encoder *encoder,
+			   struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void ast_encoder_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void ast_encoder_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void ast_encoder_commit(struct drm_encoder *encoder)
+{
+
+}
+
+
+static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
+	.dpms = ast_encoder_dpms,
+	.mode_fixup = ast_mode_fixup,
+	.prepare = ast_encoder_prepare,
+	.commit = ast_encoder_commit,
+	.mode_set = ast_encoder_mode_set,
+};
+
+int ast_encoder_init(struct drm_device *dev)
+{
+	struct ast_encoder *ast_encoder;
+
+	ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
+	if (!ast_encoder)
+		return -ENOMEM;
+
+	drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
+
+	ast_encoder->base.possible_crtcs = 1;
+	return 0;
+}
+
+static int ast_get_modes(struct drm_connector *connector)
+{
+	struct ast_connector *ast_connector = to_ast_connector(connector);
+	struct edid *edid;
+	int ret;
+
+	edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		return ret;
+	} else
+		drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+	return 0;
+}
+
+static int ast_mode_valid(struct drm_connector *connector,
+			  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void ast_connector_destroy(struct drm_connector *connector)
+{
+	struct ast_connector *ast_connector = to_ast_connector(connector);
+	ast_i2c_destroy(ast_connector->i2c);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static enum drm_connector_status
+ast_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
+	.mode_valid = ast_mode_valid,
+	.get_modes = ast_get_modes,
+	.best_encoder = ast_best_single_encoder,
+};
+
+static const struct drm_connector_funcs ast_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = ast_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = ast_connector_destroy,
+};
+
+int ast_connector_init(struct drm_device *dev)
+{
+	struct ast_connector *ast_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
+	if (!ast_connector)
+		return -ENOMEM;
+
+	connector = &ast_connector->base;
+	drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_sysfs_connector_add(connector);
+
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+	encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	ast_connector->i2c = ast_i2c_create(dev);
+	if (!ast_connector->i2c)
+		DRM_ERROR("failed to add ddc bus for connector\n");
+
+	return 0;
+}
+
+/* allocate cursor cache and pin at start of VRAM */
+int ast_cursor_init(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	int size;
+	int ret;
+	struct drm_gem_object *obj;
+	struct ast_bo *bo;
+	uint64_t gpu_addr;
+
+	size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+
+	ret = ast_gem_create(dev, size, true, &obj);
+	if (ret)
+		return ret;
+	bo = gem_to_ast_bo(obj);
+	ret = ast_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		goto fail;
+
+	ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	ast_bo_unreserve(bo);
+	if (ret)
+		goto fail;
+
+	/* kmap the object */
+	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
+	if (ret)
+		goto fail;
+
+	ast->cursor_cache = obj;
+	ast->cursor_cache_gpu_addr = gpu_addr;
+	DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+	return 0;
+fail:
+	return ret;
+}
+
+void ast_cursor_fini(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	ttm_bo_kunmap(&ast->cache_kmap);
+	drm_gem_object_unreference_unlocked(ast->cursor_cache);
+}
+
+int ast_mode_init(struct drm_device *dev)
+{
+	ast_cursor_init(dev);
+	ast_crtc_init(dev);
+	ast_encoder_init(dev);
+	ast_connector_init(dev);
+	return 0;
+}
+
+void ast_mode_fini(struct drm_device *dev)
+{
+	ast_cursor_fini(dev);
+}
+
+static int get_clock(void *i2c_priv)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	uint32_t val;
+
+	val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+	return val & 1 ? 1 : 0;
+}
+
+static int get_data(void *i2c_priv)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	uint32_t val;
+
+	val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+	return val & 1 ? 1 : 0;
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	int i;
+	u8 ujcrb7, jtemp;
+
+	for (i = 0; i < 0x10000; i++) {
+		ujcrb7 = ((clock & 0x01) ? 0 : 1);
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+		jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+		if (ujcrb7 == jtemp)
+			break;
+	}
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	int i;
+	u8 ujcrb7, jtemp;
+
+	for (i = 0; i < 0x10000; i++) {
+		ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+		jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+		if (ujcrb7 == jtemp)
+			break;
+	}
+}
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+	struct ast_i2c_chan *i2c;
+	int ret;
+
+	i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+	if (!i2c)
+		return NULL;
+
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+		 "AST i2c bit bus");
+	i2c->adapter.algo_data = &i2c->bit;
+
+	i2c->bit.udelay = 20;
+	i2c->bit.timeout = 2;
+	i2c->bit.data = i2c;
+	i2c->bit.setsda = set_data;
+	i2c->bit.setscl = set_clock;
+	i2c->bit.getsda = get_data;
+	i2c->bit.getscl = get_clock;
+	ret = i2c_bit_add_bus(&i2c->adapter);
+	if (ret) {
+		DRM_ERROR("Failed to register bit i2c\n");
+		goto out_free;
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+}
+
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	i2c_del_adapter(&i2c->adapter);
+	kfree(i2c);
+}
+
+void ast_show_cursor(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u8 jreg;
+
+	jreg = 0x2;
+	/* enable ARGB cursor */
+	jreg |= 1;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+void ast_hide_cursor(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+{
+	union {
+		u32 ul;
+		u8 b[4];
+	} srcdata32[2], data32;
+	union {
+		u16 us;
+		u8 b[2];
+	} data16;
+	u32 csum = 0;
+	s32 alpha_dst_delta, last_alpha_dst_delta;
+	u8 *srcxor, *dstxor;
+	int i, j;
+	u32 per_pixel_copy, two_pixel_copy;
+
+	alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+	last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+	srcxor = src;
+	dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+	per_pixel_copy = width & 1;
+	two_pixel_copy = width >> 1;
+
+	for (j = 0; j < height; j++) {
+		for (i = 0; i < two_pixel_copy; i++) {
+			srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+			srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+			data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+			data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+			data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+			data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+
+			writel(data32.ul, dstxor);
+			csum += data32.ul;
+
+			dstxor += 4;
+			srcxor += 8;
+
+		}
+
+		for (i = 0; i < per_pixel_copy; i++) {
+			srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+			data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+			data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+			writew(data16.us, dstxor);
+			csum += (u32)data16.us;
+
+			dstxor += 2;
+			srcxor += 4;
+		}
+		dstxor += last_alpha_dst_delta;
+	}
+	return csum;
+}
+
+static int ast_cursor_set(struct drm_crtc *crtc,
+			  struct drm_file *file_priv,
+			  uint32_t handle,
+			  uint32_t width,
+			  uint32_t height)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct ast_bo *bo;
+	uint64_t gpu_addr;
+	u32 csum;
+	int ret;
+	struct ttm_bo_kmap_obj uobj_map;
+	u8 *src, *dst;
+	bool src_isiomem, dst_isiomem;
+	if (!handle) {
+		ast_hide_cursor(crtc);
+		return 0;
+	}
+
+	if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+		return -ENOENT;
+	}
+	bo = gem_to_ast_bo(obj);
+
+	ret = ast_bo_reserve(bo, false);
+	if (ret)
+		goto fail;
+
+	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+
+	src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+	dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+
+	if (src_isiomem == true)
+		DRM_ERROR("src cursor bo should be in main memory\n");
+	if (dst_isiomem == false)
+		DRM_ERROR("dst bo should be in VRAM\n");
+
+	dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+
+	/* do data transfer to cursor cache */
+	csum = copy_cursor_image(src, dst, width, height);
+
+	/* write checksum + signature */
+	ttm_bo_kunmap(&uobj_map);
+	ast_bo_unreserve(bo);
+	{
+		u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+		writel(csum, dst);
+		writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+		writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+		writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+		writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+		/* set pattern offset */
+		gpu_addr = ast->cursor_cache_gpu_addr;
+		gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+		gpu_addr >>= 3;
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+	}
+	ast_crtc->cursor_width = width;
+	ast_crtc->cursor_height = height;
+	ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+	ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+	ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
+
+	ast_show_cursor(crtc);
+
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
+fail:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+static int ast_cursor_move(struct drm_crtc *crtc,
+			   int x, int y)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	struct ast_private *ast = crtc->dev->dev_private;
+	int x_offset, y_offset;
+	u8 *sig;
+
+	sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+	writel(x, sig + AST_HWC_SIGNATURE_X);
+	writel(y, sig + AST_HWC_SIGNATURE_Y);
+
+	x_offset = ast_crtc->offset_x;
+	y_offset = ast_crtc->offset_y;
+	if (x < 0) {
+		x_offset = (-x) + ast_crtc->offset_x;
+		x = 0;
+	}
+
+	if (y < 0) {
+		y_offset = (-y) + ast_crtc->offset_y;
+		y = 0;
+	}
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+	/* dummy write to fire HWC */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+
+	return 0;
+}

+ 1780 - 0
drivers/gpu/drm/ast/ast_post.c

@@ -0,0 +1,1780 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "drmP.h"
+#include "ast_drv.h"
+
+#include "ast_dram_tables.h"
+
+static void ast_init_dram_2300(struct drm_device *dev);
+
+static void
+ast_enable_vga(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	ast_io_write8(ast, 0x43, 0x01);
+	ast_io_write8(ast, 0x42, 0x01);
+}
+
+#if 0 /* will use later */
+static bool
+ast_is_vga_enabled(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 ch;
+
+	if (ast->chip == AST1180) {
+		/* TODO 1180 */
+	} else {
+		ch = ast_io_read8(ast, 0x43);
+		if (ch) {
+			ast_open_key(ast);
+			ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+			return ch & 0x04;
+		}
+	}
+	return 0;
+}
+#endif
+
+static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
+
+static void
+ast_set_def_ext_reg(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 i, index, reg;
+	const u8 *ext_reg_info;
+
+	/* reset scratch */
+	for (i = 0x81; i <= 0x8f; i++)
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
+
+	if (ast->chip == AST2300) {
+		if (dev->pdev->revision >= 0x20)
+			ext_reg_info = extreginfo_ast2300;
+		else
+			ext_reg_info = extreginfo_ast2300a0;
+	} else
+		ext_reg_info = extreginfo;
+
+	index = 0xa0;
+	while (*ext_reg_info != 0xff) {
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
+		index++;
+		ext_reg_info++;
+	}
+
+	/* disable standard IO/MEM decode if secondary */
+	/* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
+
+	/* Set Ext. Default */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
+
+	/* Enable RAMDAC for A1 */
+	reg = 0x04;
+	if (ast->chip == AST2300)
+		reg |= 0x20;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
+}
+
+static inline u32 mindwm(struct ast_private *ast, u32 r)
+{
+	ast_write32(ast, 0xf004, r & 0xffff0000);
+	ast_write32(ast, 0xf000, 0x1);
+
+	return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+}
+
+static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
+{
+	ast_write32(ast, 0xf004, r & 0xffff0000);
+	ast_write32(ast, 0xf000, 0x1);
+	ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+}
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150	     ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150          5
+#define CBR_THRESHOLD_AST2150        10
+#define CBR_THRESHOLD2_AST2150       10
+#define TIMEOUT_AST2150              5000000
+
+#define CBR_PATNUM_AST2150           8
+
+static const u32 pattern_AST2150[14] = {
+	0xFF00FF00,
+	0xCC33CC33,
+	0xAA55AA55,
+	0xFFFE0001,
+	0x683501FE,
+	0x0F1929B0,
+	0x2D0B4346,
+	0x60767F02,
+	0x6FBE36A6,
+	0x3A253035,
+	0x3019686D,
+	0x41C6167E,
+	0x620152BF,
+	0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x40;
+		if (++timeout > TIMEOUT_AST2150) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0xffffffff;
+		}
+	} while (!data);
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x40;
+		if (++timeout > TIMEOUT_AST2150) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0xffffffff;
+		}
+	} while (!data);
+	data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	return data;
+}
+
+#if 0 /* unused in DDX driver - here for completeness */
+static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x40;
+		if (++timeout > TIMEOUT_AST2150) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0xffffffff;
+		}
+	} while (!data);
+	data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	return data;
+}
+#endif
+
+static int cbrtest_ast2150(struct ast_private *ast)
+{
+	int i;
+
+	for (i = 0; i < 8; i++)
+		if (mmctestburst2_ast2150(ast, i))
+			return 0;
+	return 1;
+}
+
+static int cbrscan_ast2150(struct ast_private *ast, int busw)
+{
+	u32 patcnt, loop;
+
+	for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+		moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+		for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+			if (cbrtest_ast2150(ast))
+				break;
+		}
+		if (loop == CBR_PASSNUM_AST2150)
+			return 0;
+	}
+	return 1;
+}
+
+
+static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+{
+	u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+	dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
+	dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
+	passcnt = 0;
+
+	for (dlli = 0; dlli < 100; dlli++) {
+		moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+		data = cbrscan_ast2150(ast, busw);
+		if (data != 0) {
+			if (data & 0x1) {
+				if (dll_min[0] > dlli)
+					dll_min[0] = dlli;
+				if (dll_max[0] < dlli)
+					dll_max[0] = dlli;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD_AST2150)
+			goto cbr_start;
+	}
+	if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
+		goto cbr_start;
+
+	dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+	moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+
+
+static void ast_init_dram_reg(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 j;
+	u32 data, temp, i;
+	const struct ast_dramstruct *dram_reg_info;
+
+	j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+
+	if ((j & 0x80) == 0) { /* VGA only */
+		if (ast->chip == AST2000) {
+			dram_reg_info = ast2000_dram_table_data;
+			ast_write32(ast, 0xf004, 0x1e6e0000);
+			ast_write32(ast, 0xf000, 0x1);
+			ast_write32(ast, 0x10100, 0xa8);
+
+			do {
+				;
+			} while (ast_read32(ast, 0x10100) != 0xa8);
+		} else {/* AST2100/1100 */
+			if (ast->chip == AST2100 || ast->chip == 2200)
+				dram_reg_info = ast2100_dram_table_data;
+			else
+				dram_reg_info = ast1100_dram_table_data;
+
+			ast_write32(ast, 0xf004, 0x1e6e0000);
+			ast_write32(ast, 0xf000, 0x1);
+			ast_write32(ast, 0x12000, 0x1688A8A8);
+			do {
+				;
+			} while (ast_read32(ast, 0x12000) != 0x01);
+
+			ast_write32(ast, 0x10000, 0xfc600309);
+			do {
+				;
+			} while (ast_read32(ast, 0x10000) != 0x01);
+		}
+
+		while (dram_reg_info->index != 0xffff) {
+			if (dram_reg_info->index == 0xff00) {/* delay fn */
+				for (i = 0; i < 15; i++)
+					udelay(dram_reg_info->data);
+			} else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+				data = dram_reg_info->data;
+				if (ast->dram_type == AST_DRAM_1Gx16)
+					data = 0x00000d89;
+				else if (ast->dram_type == AST_DRAM_1Gx32)
+					data = 0x00000c8d;
+
+				temp = ast_read32(ast, 0x12070);
+				temp &= 0xc;
+				temp <<= 2;
+				ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+			} else
+				ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
+			dram_reg_info++;
+		}
+
+		/* AST 2100/2150 DRAM calibration */
+		data = ast_read32(ast, 0x10120);
+		if (data == 0x5061) { /* 266Mhz */
+			data = ast_read32(ast, 0x10004);
+			if (data & 0x40)
+				cbrdlli_ast2150(ast, 16); /* 16 bits */
+			else
+				cbrdlli_ast2150(ast, 32); /* 32 bits */
+		}
+
+		switch (ast->chip) {
+		case AST2000:
+			temp = ast_read32(ast, 0x10140);
+			ast_write32(ast, 0x10140, temp | 0x40);
+			break;
+		case AST1100:
+		case AST2100:
+		case AST2200:
+		case AST2150:
+			temp = ast_read32(ast, 0x1200c);
+			ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+			temp = ast_read32(ast, 0x12040);
+			ast_write32(ast, 0x12040, temp | 0x40);
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* wait ready */
+	do {
+		j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+	} while ((j & 0x40) == 0);
+}
+
+void ast_post_gpu(struct drm_device *dev)
+{
+	u32 reg;
+	struct ast_private *ast = dev->dev_private;
+
+	pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+	reg |= 0x3;
+	pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+
+	ast_enable_vga(dev);
+	ast_open_key(ast);
+	ast_set_def_ext_reg(dev);
+
+	if (ast->chip == AST2300)
+		ast_init_dram_2300(dev);
+	else
+		ast_init_dram_reg(dev);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+	u32 dram_type;
+	u32 dram_chipid;
+	u32 dram_freq;
+	u32 vram_size;
+	u32 odt;
+	u32 wodt;
+	u32 rodt;
+	u32 dram_config;
+	u32 reg_PERIOD;
+	u32 reg_MADJ;
+	u32 reg_SADJ;
+	u32 reg_MRS;
+	u32 reg_EMRS;
+	u32 reg_AC1;
+	u32 reg_AC2;
+	u32 reg_DQSIC;
+	u32 reg_DRV;
+	u32 reg_IOZ;
+	u32 reg_DQIDLY;
+	u32 reg_FREQ;
+	u32 madj_max;
+	u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE1            ((4  << 10) - 1)
+#define CBR_SIZE2            ((64 << 10) - 1)
+#define CBR_PASSNUM          5
+#define CBR_PASSNUM2         5
+#define CBR_THRESHOLD        10
+#define CBR_THRESHOLD2       10
+#define TIMEOUT              5000000
+#define CBR_PATNUM           8
+
+static const u32 pattern[8] = {
+	0xFF00FF00,
+	0xCC33CC33,
+	0xAA55AA55,
+	0x88778877,
+	0x92CC4D6E,
+	0x543D3CDE,
+	0xF1E843C7,
+	0x7C61D253
+};
+
+#if 0 /* unused in DDX, included for completeness */
+static int mmc_test_burst(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x3000;
+		if (data & 0x2000) {
+			return 0;
+		}
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0;
+		}
+	} while (!data);
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	return 1;
+}
+#endif
+
+static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x1000;
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x0);
+			return -1;
+		}
+	} while (!data);
+	data = mindwm(ast, 0x1e6e0078);
+	data = (data | (data >> 16)) & 0xffff;
+	moutdwm(ast, 0x1e6e0070, 0x0);
+	return data;
+}
+
+#if 0 /* Unused in DDX here for completeness */
+static int mmc_test_single(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x3000;
+		if (data & 0x2000)
+			return 0;
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x0);
+			return 0;
+		}
+	} while (!data);
+	moutdwm(ast, 0x1e6e0070, 0x0);
+	return 1;
+}
+#endif
+
+static int mmc_test_single2(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x1000;
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x0);
+			return -1;
+		}
+	} while (!data);
+	data = mindwm(ast, 0x1e6e0078);
+	data = (data | (data >> 16)) & 0xffff;
+	moutdwm(ast, 0x1e6e0070, 0x0);
+	return data;
+}
+
+static int cbr_test(struct ast_private *ast)
+{
+	u32 data;
+	int i;
+	data = mmc_test_single2(ast, 0);
+	if ((data & 0xff) && (data & 0xff00))
+		return 0;
+	for (i = 0; i < 8; i++) {
+		data = mmc_test_burst2(ast, i);
+		if ((data & 0xff) && (data & 0xff00))
+			return 0;
+	}
+	if (!data)
+		return 3;
+	else if (data & 0xff)
+		return 2;
+	return 1;
+}
+
+static int cbr_scan(struct ast_private *ast)
+{
+	u32 data, data2, patcnt, loop;
+
+	data2 = 3;
+	for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+		moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+		for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+			if ((data = cbr_test(ast)) != 0) {
+				data2 &= data;
+				if (!data2)
+					return 0;
+				break;
+			}
+		}
+		if (loop == CBR_PASSNUM2)
+			return 0;
+	}
+	return data2;
+}
+
+static u32 cbr_test2(struct ast_private *ast)
+{
+	u32 data;
+
+	data = mmc_test_burst2(ast, 0);
+	if (data == 0xffff)
+		return 0;
+	data |= mmc_test_single2(ast, 0);
+	if (data == 0xffff)
+		return 0;
+
+	return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_private *ast)
+{
+	u32 data, data2, patcnt, loop;
+
+	data2 = 0xffff;
+	for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+		moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+		for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+			if ((data = cbr_test2(ast)) != 0) {
+				data2 &= data;
+				if (!data)
+					return 0;
+				break;
+			}
+		}
+		if (loop == CBR_PASSNUM2)
+			return 0;
+	}
+	return data2;
+}
+
+#if 0 /* unused in DDX - added for completeness */
+static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+	gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
+	gold_sadj[1] = gold_sadj[0] >> 8;
+	gold_sadj[0] = gold_sadj[0] & 0xff;
+	gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
+	gold_sadj[1] = gold_sadj[0];
+
+	for (cnt = 0; cnt < 16; cnt++) {
+		dllmin[cnt] = 0xff;
+		dllmax[cnt] = 0x0;
+	}
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+		data = cbr_scan2(ast);
+		if (data != 0) {
+			mask = 0x00010001;
+			for (cnt = 0; cnt < 16; cnt++) {
+				if (data & mask) {
+					if (dllmin[cnt] > dlli) {
+						dllmin[cnt] = dlli;
+					}
+					if (dllmax[cnt] < dlli) {
+						dllmax[cnt] = dlli;
+					}
+				}
+				mask <<= 1;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD) {
+			break;
+		}
+	}
+	data = 0;
+	for (cnt = 0; cnt < 8; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[0] >= dlli) {
+				dlli = (gold_sadj[0] - dlli) >> 1;
+				if (dlli > 3) {
+					dlli = 3;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[0]) >> 1;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0080, data);
+
+	data = 0;
+	for (cnt = 8; cnt < 16; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[1] >= dlli) {
+				dlli = (gold_sadj[1] - dlli) >> 1;
+				if (dlli > 3) {
+					dlli = 3;
+				} else {
+					dlli = (dlli - 1) & 0x7;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[1]) >> 1;
+				dlli += 1;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI */
+#endif
+
+static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+FINETUNE_START:
+	for (cnt = 0; cnt < 16; cnt++) {
+		dllmin[cnt] = 0xff;
+		dllmax[cnt] = 0x0;
+	}
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+		data = cbr_scan2(ast);
+		if (data != 0) {
+			mask = 0x00010001;
+			for (cnt = 0; cnt < 16; cnt++) {
+				if (data & mask) {
+					if (dllmin[cnt] > dlli) {
+						dllmin[cnt] = dlli;
+					}
+					if (dllmax[cnt] < dlli) {
+						dllmax[cnt] = dlli;
+					}
+				}
+				mask <<= 1;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD2) {
+			break;
+		}
+	}
+	gold_sadj[0] = 0x0;
+	passcnt = 0;
+	for (cnt = 0; cnt < 16; cnt++) {
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			gold_sadj[0] += dllmin[cnt];
+			passcnt++;
+		}
+	}
+	if (passcnt != 16) {
+		goto FINETUNE_START;
+	}
+	gold_sadj[0] = gold_sadj[0] >> 4;
+	gold_sadj[1] = gold_sadj[0];
+
+	data = 0;
+	for (cnt = 0; cnt < 8; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = dllmin[cnt];
+			if (gold_sadj[0] >= dlli) {
+				dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+				if (dlli > 3) {
+					dlli = 3;
+				}
+			} else {
+				dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0080, data);
+
+	data = 0;
+	for (cnt = 8; cnt < 16; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = dllmin[cnt];
+			if (gold_sadj[1] >= dlli) {
+				dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+				if (dlli > 3) {
+					dlli = 3;
+				} else {
+					dlli = (dlli - 1) & 0x7;
+				}
+			} else {
+				dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+				dlli += 1;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L */
+
+static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
+
+	for (cnt = 0; cnt < 16; cnt++) {
+		dllmin[cnt] = 0xff;
+		dllmax[cnt] = 0x0;
+	}
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+		data = cbr_scan2(ast);
+		if (data != 0) {
+			mask = 0x00010001;
+			for (cnt = 0; cnt < 16; cnt++) {
+				if (data & mask) {
+					if (dllmin[cnt] > dlli) {
+						dllmin[cnt] = dlli;
+					}
+					if (dllmax[cnt] < dlli) {
+						dllmax[cnt] = dlli;
+					}
+				}
+				mask <<= 1;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD2) {
+			break;
+		}
+	}
+	gold_sadj[0] = 0x0;
+	gold_sadj[1] = 0xFF;
+	for (cnt = 0; cnt < 8; cnt++) {
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			if (gold_sadj[0] < dllmin[cnt]) {
+				gold_sadj[0] = dllmin[cnt];
+			}
+			if (gold_sadj[1] > dllmax[cnt]) {
+				gold_sadj[1] = dllmax[cnt];
+			}
+		}
+	}
+	gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+	gold_sadj[1] = mindwm(ast, 0x1E6E0080);
+
+	data = 0;
+	for (cnt = 0; cnt < 8; cnt++) {
+		data >>= 3;
+		data2 = gold_sadj[1] & 0x7;
+		gold_sadj[1] >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[0] >= dlli) {
+				dlli = (gold_sadj[0] - dlli) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 3) {
+					data2 = (data2 + dlli) & 0x7;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[0]) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 4) {
+					data2 = (data2 - dlli) & 0x7;
+				}
+			}
+		}
+		data |= data2 << 21;
+	}
+	moutdwm(ast, 0x1E6E0080, data);
+
+	gold_sadj[0] = 0x0;
+	gold_sadj[1] = 0xFF;
+	for (cnt = 8; cnt < 16; cnt++) {
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			if (gold_sadj[0] < dllmin[cnt]) {
+				gold_sadj[0] = dllmin[cnt];
+			}
+			if (gold_sadj[1] > dllmax[cnt]) {
+				gold_sadj[1] = dllmax[cnt];
+			}
+		}
+	}
+	gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+	gold_sadj[1] = mindwm(ast, 0x1E6E0084);
+
+	data = 0;
+	for (cnt = 8; cnt < 16; cnt++) {
+		data >>= 3;
+		data2 = gold_sadj[1] & 0x7;
+		gold_sadj[1] >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[0] >= dlli) {
+				dlli = (gold_sadj[0] - dlli) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 3) {
+					data2 = (data2 + dlli) & 0x7;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[0]) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 4) {
+					data2 = (data2 - dlli) & 0x7;
+				}
+			}
+		}
+		data |= data2 << 21;
+	}
+	moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L2 */
+
+static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
+
+
+	finetuneDQI_L(ast, param);
+	finetuneDQI_L2(ast, param);
+
+CBR_START2:
+	dllmin[0] = dllmin[1] = 0xff;
+	dllmax[0] = dllmax[1] = 0x0;
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+		data = cbr_scan(ast);
+		if (data != 0) {
+			if (data & 0x1) {
+				if (dllmin[0] > dlli) {
+					dllmin[0] = dlli;
+				}
+				if (dllmax[0] < dlli) {
+					dllmax[0] = dlli;
+				}
+			}
+			if (data & 0x2) {
+				if (dllmin[1] > dlli) {
+					dllmin[1] = dlli;
+				}
+				if (dllmax[1] < dlli) {
+					dllmax[1] = dlli;
+				}
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD) {
+			break;
+		}
+	}
+	if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
+		goto CBR_START2;
+	}
+	if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
+		goto CBR_START2;
+	}
+	dlli  = (dllmin[1] + dllmax[1]) >> 1;
+	dlli <<= 8;
+	dlli += (dllmin[0] + dllmax[0]) >> 1;
+	moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
+
+	data  = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
+	data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
+	moutdwm(ast, 0x1E6E0018, data2);
+	moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
+
+	/* Wait DQSI latch phase calibration */
+	moutdwm(ast, 0x1E6E0074, 0x00000010);
+	moutdwm(ast, 0x1E6E0070, 0x00000003);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0070, 0x00000003);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 trap, trap_AC2, trap_MRS;
+
+	moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+	/* Ger trap info */
+	trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+	trap_AC2  = 0x00020000 + (trap << 16);
+	trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+	trap_MRS  = 0x00000010 + (trap << 4);
+	trap_MRS |= ((trap & 0x2) << 18);
+
+	param->reg_MADJ       = 0x00034C4C;
+	param->reg_SADJ       = 0x00001800;
+	param->reg_DRV        = 0x000000F0;
+	param->reg_PERIOD     = param->dram_freq;
+	param->rodt           = 0;
+
+	switch (param->dram_freq) {
+	case 336:
+		moutdwm(ast, 0x1E6E2020, 0x0190);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x22202725;
+		param->reg_AC2       = 0xAA007613 | trap_AC2;
+		param->reg_DQSIC     = 0x000000BA;
+		param->reg_MRS       = 0x04001400 | trap_MRS;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000074;
+		param->reg_FREQ      = 0x00004DC0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 3;
+		break;
+	default:
+	case 396:
+		moutdwm(ast, 0x1E6E2020, 0x03F1);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x33302825;
+		param->reg_AC2       = 0xCC009617 | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x04001600 | trap_MRS;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		default:
+		case AST_DRAM_512Mx16:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC009617 | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC009622 | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00963F | trap_AC2;
+			break;
+		}
+		break;
+
+	case 408:
+		moutdwm(ast, 0x1E6E2020, 0x01F0);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x33302825;
+		param->reg_AC2       = 0xCC009617 | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x04001600 | trap_MRS;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		default:
+		case AST_DRAM_512Mx16:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC009617 | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC009622 | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00963F | trap_AC2;
+			break;
+		}
+
+		break;
+	case 456:
+		moutdwm(ast, 0x1E6E2020, 0x0230);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x33302926;
+		param->reg_AC2       = 0xCD44961A;
+		param->reg_DQSIC     = 0x000000FC;
+		param->reg_MRS       = 0x00081830;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x00000097;
+		param->reg_FREQ      = 0x000052C0;
+		param->madj_max      = 88;
+		param->dll2_finetune_step = 4;
+		break;
+	case 504:
+		moutdwm(ast, 0x1E6E2020, 0x0270);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x33302926;
+		param->reg_AC2       = 0xDE44A61D;
+		param->reg_DQSIC     = 0x00000117;
+		param->reg_MRS       = 0x00081A30;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x070000BB;
+		param->reg_DQIDLY    = 0x000000A0;
+		param->reg_FREQ      = 0x000054C0;
+		param->madj_max      = 79;
+		param->dll2_finetune_step = 4;
+		break;
+	case 528:
+		moutdwm(ast, 0x1E6E2020, 0x0290);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302926;
+		param->reg_AC2       = 0xEF44B61E;
+		param->reg_DQSIC     = 0x00000125;
+		param->reg_MRS       = 0x00081A30;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000023;
+		param->reg_DQIDLY    = 0x00000088;
+		param->reg_FREQ      = 0x000055C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	case 576:
+		moutdwm(ast, 0x1E6E2020, 0x0140);
+		param->reg_MADJ      = 0x00136868;
+		param->reg_SADJ      = 0x00004534;
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302A37;
+		param->reg_AC2       = 0xEF56B61E;
+		param->reg_DQSIC     = 0x0000013F;
+		param->reg_MRS       = 0x00101A50;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000023;
+		param->reg_DQIDLY    = 0x00000078;
+		param->reg_FREQ      = 0x000057C0;
+		param->madj_max      = 136;
+		param->dll2_finetune_step = 3;
+		break;
+	case 600:
+		moutdwm(ast, 0x1E6E2020, 0x02E1);
+		param->reg_MADJ      = 0x00136868;
+		param->reg_SADJ      = 0x00004534;
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x32302A37;
+		param->reg_AC2       = 0xDF56B61F;
+		param->reg_DQSIC     = 0x0000014D;
+		param->reg_MRS       = 0x00101A50;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000023;
+		param->reg_DQIDLY    = 0x00000078;
+		param->reg_FREQ      = 0x000058C0;
+		param->madj_max      = 132;
+		param->dll2_finetune_step = 3;
+		break;
+	case 624:
+		moutdwm(ast, 0x1E6E2020, 0x0160);
+		param->reg_MADJ      = 0x00136868;
+		param->reg_SADJ      = 0x00004534;
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x32302A37;
+		param->reg_AC2       = 0xEF56B621;
+		param->reg_DQSIC     = 0x0000015A;
+		param->reg_MRS       = 0x02101A50;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000078;
+		param->reg_FREQ      = 0x000059C0;
+		param->madj_max      = 128;
+		param->dll2_finetune_step = 3;
+		break;
+	} /* switch freq */
+
+	switch (param->dram_chipid) {
+	case AST_DRAM_512Mx16:
+		param->dram_config = 0x130;
+		break;
+	default:
+	case AST_DRAM_1Gx16:
+		param->dram_config = 0x131;
+		break;
+	case AST_DRAM_2Gx16:
+		param->dram_config = 0x132;
+		break;
+	case AST_DRAM_4Gx16:
+		param->dram_config = 0x133;
+		break;
+	}; /* switch size */
+
+	switch (param->vram_size) {
+	default:
+	case AST_VIDMEM_SIZE_8M:
+		param->dram_config |= 0x00;
+		break;
+	case AST_VIDMEM_SIZE_16M:
+		param->dram_config |= 0x04;
+		break;
+	case AST_VIDMEM_SIZE_32M:
+		param->dram_config |= 0x08;
+		break;
+	case AST_VIDMEM_SIZE_64M:
+		param->dram_config |= 0x0c;
+		break;
+	}
+
+}
+
+static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 data, data2;
+
+	moutdwm(ast, 0x1E6E0000, 0xFC600309);
+	moutdwm(ast, 0x1E6E0018, 0x00000100);
+	moutdwm(ast, 0x1E6E0024, 0x00000000);
+	moutdwm(ast, 0x1E6E0034, 0x00000000);
+	udelay(10);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+	moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+	udelay(10);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+	udelay(10);
+
+	moutdwm(ast, 0x1E6E0004, param->dram_config);
+	moutdwm(ast, 0x1E6E0008, 0x90040f);
+	moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+	moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+	moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+	moutdwm(ast, 0x1E6E0080, 0x00000000);
+	moutdwm(ast, 0x1E6E0084, 0x00000000);
+	moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+	moutdwm(ast, 0x1E6E0018, 0x4040A170);
+	moutdwm(ast, 0x1E6E0018, 0x20402370);
+	moutdwm(ast, 0x1E6E0038, 0x00000000);
+	moutdwm(ast, 0x1E6E0040, 0xFF444444);
+	moutdwm(ast, 0x1E6E0044, 0x22222222);
+	moutdwm(ast, 0x1E6E0048, 0x22222222);
+	moutdwm(ast, 0x1E6E004C, 0x00000002);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+	moutdwm(ast, 0x1E6E0054, 0);
+	moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+	moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0074, 0x00000000);
+	moutdwm(ast, 0x1E6E0078, 0x00000000);
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+	/* Wait MCLK2X lock to MCLK */
+	do {
+		data = mindwm(ast, 0x1E6E001C);
+	} while (!(data & 0x08000000));
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00005C04);
+	udelay(10);
+	moutdwm(ast, 0x1E6E000C, 0x00000000);
+	moutdwm(ast, 0x1E6E0034, 0x00000000);
+	data = mindwm(ast, 0x1E6E001C);
+	data = (data >> 8) & 0xff;
+	while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+		data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+		if ((data2 & 0xff) > param->madj_max) {
+			break;
+		}
+		moutdwm(ast, 0x1E6E0064, data2);
+		if (data2 & 0x00100000) {
+			data2 = ((data2 & 0xff) >> 3) + 3;
+		} else {
+			data2 = ((data2 & 0xff) >> 2) + 5;
+		}
+		data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+		data2 += data & 0xff;
+		data = data | (data2 << 8);
+		moutdwm(ast, 0x1E6E0068, data);
+		udelay(10);
+		moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+		udelay(10);
+		data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+		moutdwm(ast, 0x1E6E0018, data);
+		data = data | 0x200;
+		moutdwm(ast, 0x1E6E0018, data);
+		do {
+			data = mindwm(ast, 0x1E6E001C);
+		} while (!(data & 0x08000000));
+
+		moutdwm(ast, 0x1E6E0034, 0x00000001);
+		moutdwm(ast, 0x1E6E000C, 0x00005C04);
+		udelay(10);
+		moutdwm(ast, 0x1E6E000C, 0x00000000);
+		moutdwm(ast, 0x1E6E0034, 0x00000000);
+		data = mindwm(ast, 0x1E6E001C);
+		data = (data >> 8) & 0xff;
+	}
+	data = mindwm(ast, 0x1E6E0018) | 0xC00;
+	moutdwm(ast, 0x1E6E0018, data);
+
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00000040);
+	udelay(50);
+	/* Mode Register Setting */
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000005);
+	moutdwm(ast, 0x1E6E0028, 0x00000007);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+	moutdwm(ast, 0x1E6E000C, 0x00005C08);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+	moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+	data = 0;
+	if (param->wodt) {
+		data = 0x300;
+	}
+	if (param->rodt) {
+		data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+	}
+	moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+	/* Wait DQI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0080);
+	} while (!(data & 0x40000000));
+	/* Wait DQSI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0020);
+	} while (!(data & 0x00000800));
+	/* Calibrate the DQSI delay */
+	cbr_dll2(ast, param);
+
+	moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+	/* ECC Memory Initialization */
+#ifdef ECC
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+	moutdwm(ast, 0x1E6E0070, 0x221);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+
+}
+
+static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 trap, trap_AC2, trap_MRS;
+
+	moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+	/* Ger trap info */
+	trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+	trap_AC2  = (trap << 20) | (trap << 16);
+	trap_AC2 += 0x00110000;
+	trap_MRS  = 0x00000040 | (trap << 4);
+
+
+	param->reg_MADJ       = 0x00034C4C;
+	param->reg_SADJ       = 0x00001800;
+	param->reg_DRV        = 0x000000F0;
+	param->reg_PERIOD     = param->dram_freq;
+	param->rodt           = 0;
+
+	switch (param->dram_freq) {
+	case 264:
+		moutdwm(ast, 0x1E6E2020, 0x0130);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x11101513;
+		param->reg_AC2       = 0x78117011;
+		param->reg_DQSIC     = 0x00000092;
+		param->reg_MRS       = 0x00000842;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_DRV       = 0x000000F0;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x0000005A;
+		param->reg_FREQ      = 0x00004AC0;
+		param->madj_max      = 138;
+		param->dll2_finetune_step = 3;
+		break;
+	case 336:
+		moutdwm(ast, 0x1E6E2020, 0x0190);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x22202613;
+		param->reg_AC2       = 0xAA009016 | trap_AC2;
+		param->reg_DQSIC     = 0x000000BA;
+		param->reg_MRS       = 0x00000A02 | trap_MRS;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000074;
+		param->reg_FREQ      = 0x00004DC0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 3;
+		break;
+	default:
+	case 396:
+		moutdwm(ast, 0x1E6E2020, 0x03F1);
+		param->wodt          = 1;
+		param->rodt          = 0;
+		param->reg_AC1       = 0x33302714;
+		param->reg_AC2       = 0xCC00B01B | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x00000C02 | trap_MRS;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		case AST_DRAM_512Mx16:
+			param->reg_AC2   = 0xCC00B016 | trap_AC2;
+			break;
+		default:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC00B01B | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC00B02B | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00B03F | trap_AC2;
+			break;
+		}
+
+		break;
+
+	case 408:
+		moutdwm(ast, 0x1E6E2020, 0x01F0);
+		param->wodt          = 1;
+		param->rodt          = 0;
+		param->reg_AC1       = 0x33302714;
+		param->reg_AC2       = 0xCC00B01B | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x00000C02 | trap_MRS;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		case AST_DRAM_512Mx16:
+			param->reg_AC2   = 0xCC00B016 | trap_AC2;
+			break;
+		default:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC00B01B | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC00B02B | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00B03F | trap_AC2;
+			break;
+		}
+
+		break;
+	case 456:
+		moutdwm(ast, 0x1E6E2020, 0x0230);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x33302815;
+		param->reg_AC2       = 0xCD44B01E;
+		param->reg_DQSIC     = 0x000000FC;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_DRV       = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000097;
+		param->reg_FREQ      = 0x000052C0;
+		param->madj_max      = 88;
+		param->dll2_finetune_step = 3;
+		break;
+	case 504:
+		moutdwm(ast, 0x1E6E2020, 0x0261);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302815;
+		param->reg_AC2       = 0xDE44C022;
+		param->reg_DQSIC     = 0x00000117;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x0000000A;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000A0;
+		param->reg_FREQ      = 0x000054C0;
+		param->madj_max      = 79;
+		param->dll2_finetune_step = 3;
+		break;
+	case 528:
+		moutdwm(ast, 0x1E6E2020, 0x0120);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302815;
+		param->reg_AC2       = 0xEF44D024;
+		param->reg_DQSIC     = 0x00000125;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F9;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000A7;
+		param->reg_FREQ      = 0x000055C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	case 552:
+		moutdwm(ast, 0x1E6E2020, 0x02A1);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x43402915;
+		param->reg_AC2       = 0xFF44E025;
+		param->reg_DQSIC     = 0x00000132;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x0000000A;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000AD;
+		param->reg_FREQ      = 0x000056C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	case 576:
+		moutdwm(ast, 0x1E6E2020, 0x0140);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x43402915;
+		param->reg_AC2       = 0xFF44E027;
+		param->reg_DQSIC     = 0x0000013F;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000B3;
+		param->reg_FREQ      = 0x000057C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	}
+
+	switch (param->dram_chipid) {
+	case AST_DRAM_512Mx16:
+		param->dram_config = 0x100;
+		break;
+	default:
+	case AST_DRAM_1Gx16:
+		param->dram_config = 0x121;
+		break;
+	case AST_DRAM_2Gx16:
+		param->dram_config = 0x122;
+		break;
+	case AST_DRAM_4Gx16:
+		param->dram_config = 0x123;
+		break;
+	}; /* switch size */
+
+	switch (param->vram_size) {
+	default:
+	case AST_VIDMEM_SIZE_8M:
+		param->dram_config |= 0x00;
+		break;
+	case AST_VIDMEM_SIZE_16M:
+		param->dram_config |= 0x04;
+		break;
+	case AST_VIDMEM_SIZE_32M:
+		param->dram_config |= 0x08;
+		break;
+	case AST_VIDMEM_SIZE_64M:
+		param->dram_config |= 0x0c;
+		break;
+	}
+}
+
+static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 data, data2;
+
+	moutdwm(ast, 0x1E6E0000, 0xFC600309);
+	moutdwm(ast, 0x1E6E0018, 0x00000100);
+	moutdwm(ast, 0x1E6E0024, 0x00000000);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+	moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+	udelay(10);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+	udelay(10);
+
+	moutdwm(ast, 0x1E6E0004, param->dram_config);
+	moutdwm(ast, 0x1E6E0008, 0x90040f);
+	moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+	moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+	moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+	moutdwm(ast, 0x1E6E0080, 0x00000000);
+	moutdwm(ast, 0x1E6E0084, 0x00000000);
+	moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+	moutdwm(ast, 0x1E6E0018, 0x4040A130);
+	moutdwm(ast, 0x1E6E0018, 0x20402330);
+	moutdwm(ast, 0x1E6E0038, 0x00000000);
+	moutdwm(ast, 0x1E6E0040, 0xFF808000);
+	moutdwm(ast, 0x1E6E0044, 0x88848466);
+	moutdwm(ast, 0x1E6E0048, 0x44440008);
+	moutdwm(ast, 0x1E6E004C, 0x00000000);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+	moutdwm(ast, 0x1E6E0054, 0);
+	moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+	moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0074, 0x00000000);
+	moutdwm(ast, 0x1E6E0078, 0x00000000);
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+	/* Wait MCLK2X lock to MCLK */
+	do {
+		data = mindwm(ast, 0x1E6E001C);
+	} while (!(data & 0x08000000));
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00005C04);
+	udelay(10);
+	moutdwm(ast, 0x1E6E000C, 0x00000000);
+	moutdwm(ast, 0x1E6E0034, 0x00000000);
+	data = mindwm(ast, 0x1E6E001C);
+	data = (data >> 8) & 0xff;
+	while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+		data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+		if ((data2 & 0xff) > param->madj_max) {
+			break;
+		}
+		moutdwm(ast, 0x1E6E0064, data2);
+		if (data2 & 0x00100000) {
+			data2 = ((data2 & 0xff) >> 3) + 3;
+		} else {
+			data2 = ((data2 & 0xff) >> 2) + 5;
+		}
+		data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+		data2 += data & 0xff;
+		data = data | (data2 << 8);
+		moutdwm(ast, 0x1E6E0068, data);
+		udelay(10);
+		moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+		udelay(10);
+		data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+		moutdwm(ast, 0x1E6E0018, data);
+		data = data | 0x200;
+		moutdwm(ast, 0x1E6E0018, data);
+		do {
+			data = mindwm(ast, 0x1E6E001C);
+		} while (!(data & 0x08000000));
+
+		moutdwm(ast, 0x1E6E0034, 0x00000001);
+		moutdwm(ast, 0x1E6E000C, 0x00005C04);
+		udelay(10);
+		moutdwm(ast, 0x1E6E000C, 0x00000000);
+		moutdwm(ast, 0x1E6E0034, 0x00000000);
+		data = mindwm(ast, 0x1E6E001C);
+		data = (data >> 8) & 0xff;
+	}
+	data = mindwm(ast, 0x1E6E0018) | 0xC00;
+	moutdwm(ast, 0x1E6E0018, data);
+
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00000000);
+	udelay(50);
+	/* Mode Register Setting */
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000005);
+	moutdwm(ast, 0x1E6E0028, 0x00000007);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+	moutdwm(ast, 0x1E6E000C, 0x00005C08);
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+	moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+	data = 0;
+	if (param->wodt) {
+		data = 0x500;
+	}
+	if (param->rodt) {
+		data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+	}
+	moutdwm(ast, 0x1E6E0034, data | 0x3);
+	moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+	/* Wait DQI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0080);
+	} while (!(data & 0x40000000));
+	/* Wait DQSI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0020);
+	} while (!(data & 0x00000800));
+	/* Calibrate the DQSI delay */
+	cbr_dll2(ast, param);
+
+	/* ECC Memory Initialization */
+#ifdef ECC
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+	moutdwm(ast, 0x1E6E0070, 0x221);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+}
+
+static void ast_init_dram_2300(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast2300_dram_param param;
+	u32 temp;
+	u8 reg;
+
+	reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+	if ((reg & 0x80) == 0) {/* vga only */
+		ast_write32(ast, 0xf004, 0x1e6e0000);
+		ast_write32(ast, 0xf000, 0x1);
+		ast_write32(ast, 0x12000, 0x1688a8a8);
+		do {
+			;
+		} while (ast_read32(ast, 0x12000) != 0x1);
+
+		ast_write32(ast, 0x10000, 0xfc600309);
+		do {
+			;
+		} while (ast_read32(ast, 0x10000) != 0x1);
+
+		/* Slow down CPU/AHB CLK in VGA only mode */
+		temp = ast_read32(ast, 0x12008);
+		temp |= 0x73;
+		ast_write32(ast, 0x12008, temp);
+
+		param.dram_type = AST_DDR3;
+		if (temp & 0x01000000)
+			param.dram_type = AST_DDR2;
+		param.dram_chipid = ast->dram_type;
+		param.dram_freq = ast->mclk;
+		param.vram_size = ast->vram_size;
+
+		if (param.dram_type == AST_DDR3) {
+			get_ddr3_info(ast, &param);
+			ddr3_init(ast, &param);
+		} else {
+			get_ddr2_info(ast, &param);
+			ddr2_init(ast, &param);
+		}
+
+		temp = mindwm(ast, 0x1e6e2040);
+		moutdwm(ast, 0x1e6e2040, temp | 0x40);
+	}
+
+	/* wait ready */
+	do {
+		reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+	} while ((reg & 0x40) == 0);
+}
+

+ 265 - 0
drivers/gpu/drm/ast/ast_tables.h

@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  The authors makes no representations
+ * about the suitability of this software for any purpose.  It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_TABLES_H
+#define AST_TABLES_H
+
+/* Std. Table Index Definition */
+#define TextModeIndex		0
+#define EGAModeIndex		1
+#define VGAModeIndex		2
+#define HiCModeIndex		3
+#define TrueCModeIndex		4
+
+#define Charx8Dot               0x00000001
+#define HalfDCLK                0x00000002
+#define DoubleScanMode          0x00000004
+#define LineCompareOff          0x00000008
+#define SyncPP                  0x00000000
+#define SyncPN                  0x00000040
+#define SyncNP                  0x00000080
+#define SyncNN                  0x000000C0
+#define HBorder                 0x00000020
+#define VBorder                 0x00000010
+#define WideScreenMode		0x00000100
+
+
+/* DCLK Index */
+#define VCLK25_175     		0x00
+#define VCLK28_322     		0x01
+#define VCLK31_5       		0x02
+#define VCLK36         		0x03
+#define VCLK40         		0x04
+#define VCLK49_5       		0x05
+#define VCLK50         		0x06
+#define VCLK56_25      		0x07
+#define VCLK65		 	0x08
+#define VCLK75	        	0x09
+#define VCLK78_75      		0x0A
+#define VCLK94_5       		0x0B
+#define VCLK108        		0x0C
+#define VCLK135        		0x0D
+#define VCLK157_5      		0x0E
+#define VCLK162        		0x0F
+/* #define VCLK193_25     		0x10 */
+#define VCLK154     		0x10
+#define VCLK83_5    		0x11
+#define VCLK106_5   		0x12
+#define VCLK146_25  		0x13
+#define VCLK148_5   		0x14
+
+static struct ast_vbios_dclk_info dclk_table[] = {
+	{0x2C, 0xE7, 0x03},					/* 00: VCLK25_175	*/
+	{0x95, 0x62, 0x03},				        /* 01: VCLK28_322	*/
+	{0x67, 0x63, 0x01},				        /* 02: VCLK31_5         */
+	{0x76, 0x63, 0x01},				        /* 03: VCLK36         	*/
+	{0xEE, 0x67, 0x01},				        /* 04: VCLK40          	*/
+	{0x82, 0x62, 0x01}, 			        /* 05: VCLK49_5        	*/
+	{0xC6, 0x64, 0x01},                        	        /* 06: VCLK50          	*/
+	{0x94, 0x62, 0x01},                        	        /* 07: VCLK56_25       	*/
+	{0x80, 0x64, 0x00},                        	        /* 08: VCLK65		*/
+	{0x7B, 0x63, 0x00},                        	        /* 09: VCLK75	        */
+	{0x67, 0x62, 0x00},				        /* 0A: VCLK78_75       	*/
+	{0x7C, 0x62, 0x00},                        	        /* 0B: VCLK94_5        	*/
+	{0x8E, 0x62, 0x00},                        	        /* 0C: VCLK108         	*/
+	{0x85, 0x24, 0x00},                        	        /* 0D: VCLK135         	*/
+	{0x67, 0x22, 0x00},                        	        /* 0E: VCLK157_5       	*/
+	{0x6A, 0x22, 0x00},				        /* 0F: VCLK162         	*/
+	{0x4d, 0x4c, 0x80},				        /* 10: VCLK154      	*/
+	{0xa7, 0x78, 0x80},					/* 11: VCLK83.5         */
+	{0x28, 0x49, 0x80},					/* 12: VCLK106.5        */
+	{0x37, 0x49, 0x80},					/* 13: VCLK146.25       */
+	{0x1f, 0x45, 0x80},					/* 14: VCLK148.5        */
+};
+
+static struct ast_vbios_stdtable vbios_stdtable[] = {
+	/* MD_2_3_400 */
+	{
+		0x67,
+		{0x00,0x03,0x00,0x02},
+		{0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
+		 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
+		 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+		 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+		 0x0c,0x00,0x0f,0x08},
+		{0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
+		 0xff}
+	},
+	/* Mode12/ExtEGATable */
+	{
+		0xe3,
+		{0x01,0x0f,0x00,0x06},
+		{0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+		 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+		 0x01,0x00,0x0f,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+		 0xff}
+	},
+	/* ExtVGATable */
+	{
+		0x2f,
+		{0x01,0x0f,0x00,0x0e},
+		{0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+		 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+		 0x01,0x00,0x00,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+		 0xff}
+	},
+	/* ExtHiCTable */
+	{
+		0x2f,
+		{0x01,0x0f,0x00,0x0e},
+		{0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+		 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+		 0x01,0x00,0x00,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+		 0xff}
+	},
+	/* ExtTrueCTable */
+	{
+		0x2f,
+		{0x01,0x0f,0x00,0x0e},
+		{0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+		 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+		 0x01,0x00,0x00,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+		 0xff}
+	},
+};
+
+static struct ast_vbios_enhtable res_640x480[] = {
+	{ 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175,	/* 60Hz */
+	  (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
+	{ 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5,	/* 72Hz */
+	  (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E  },
+	{ 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5,	/* 75Hz */
+	  (SyncNN | Charx8Dot) , 75, 3, 0x2E },
+	{ 832, 640, 56, 56, 509, 480, 1, 3, VCLK36,		/* 85Hz */
+	  (SyncNN | Charx8Dot) , 85, 4, 0x2E },
+	{ 832, 640, 56, 56, 509, 480, 1, 3, VCLK36,		/* end */
+	  (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
+};
+
+static struct ast_vbios_enhtable res_800x600[] = {
+	{1024, 800, 24, 72, 625, 600, 1, 2, VCLK36,		/* 56Hz */
+	 (SyncPP | Charx8Dot), 56, 1, 0x30 },
+	{1056, 800, 40, 128, 628, 600, 1, 4, VCLK40,	/* 60Hz */
+	 (SyncPP | Charx8Dot), 60, 2, 0x30 },
+	{1040, 800, 56, 120, 666, 600, 37, 6, VCLK50,	/* 72Hz */
+	 (SyncPP | Charx8Dot), 72, 3, 0x30 },
+	{1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5,	/* 75Hz */
+	 (SyncPP | Charx8Dot), 75, 4, 0x30 },
+	{1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25,	/* 85Hz */
+	 (SyncPP | Charx8Dot), 84, 5, 0x30 },
+	{1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
+};
+
+
+static struct ast_vbios_enhtable res_1024x768[] = {
+	{1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65,	/* 60Hz */
+	 (SyncNN | Charx8Dot), 60, 1, 0x31 },
+	{1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75,	/* 70Hz */
+	 (SyncNN | Charx8Dot), 70, 2, 0x31 },
+	{1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75,	/* 75Hz */
+	 (SyncPP | Charx8Dot), 75, 3, 0x31 },
+	{1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5,	/* 85Hz */
+	 (SyncPP | Charx8Dot), 84, 4, 0x31 },
+	{1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
+};
+
+static struct ast_vbios_enhtable res_1280x1024[] = {
+	{1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108,	/* 60Hz */
+	 (SyncPP | Charx8Dot), 60, 1, 0x32 },
+	{1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135,	/* 75Hz */
+	 (SyncPP | Charx8Dot), 75, 2, 0x32 },
+	{1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5,	/* 85Hz */
+	 (SyncPP | Charx8Dot), 85, 3, 0x32 },
+	{1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
+};
+
+static struct ast_vbios_enhtable res_1600x1200[] = {
+	{2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162,	/* 60Hz */
+	 (SyncPP | Charx8Dot), 60, 1, 0x33 },
+	{2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
+};
+
+static struct ast_vbios_enhtable res_1920x1200[] = {
+	{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154,	/* 60Hz */
+	 (SyncNP | Charx8Dot), 60, 1, 0x34 },
+	{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154,	/* 60Hz */
+	 (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
+};
+
+/* 16:10 */
+static struct ast_vbios_enhtable res_1280x800[] = {
+	{1680, 1280, 72,128,  831,  800, 3, 6, VCLK83_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
+	{1680, 1280, 72,128,  831,  800, 3, 6, VCLK83_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
+
+};
+
+static struct ast_vbios_enhtable res_1440x900[] = {
+	{1904, 1440, 80,152,  934,  900, 3, 6, VCLK106_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
+	{1904, 1440, 80,152,  934,  900, 3, 6, VCLK106_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
+};
+
+static struct ast_vbios_enhtable res_1680x1050[] = {
+	{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
+	{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
+};
+
+/* HDTV */
+static struct ast_vbios_enhtable res_1920x1080[] = {
+	{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5,	/* 60Hz */
+	 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
+	{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5,	/* 60Hz */
+	 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
+};
+#endif

+ 453 - 0
drivers/gpu/drm/ast/ast_ttm.c

@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct ast_private *
+ast_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct ast_private, ttm.bdev);
+}
+
+static int
+ast_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+ast_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	global_ref = &ast->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &ast_ttm_mem_global_init;
+	global_ref->release = &ast_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	ast->ttm.bo_global_ref.mem_glob =
+		ast->ttm.mem_global_ref.object;
+	global_ref = &ast->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&ast->ttm.mem_global_ref);
+		return r;
+	}
+	return 0;
+}
+
+void
+ast_ttm_global_release(struct ast_private *ast)
+{
+	if (ast->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&ast->ttm.mem_global_ref);
+	ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct ast_bo *bo;
+
+	bo = container_of(tbo, struct ast_bo, bo);
+
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &ast_bo_ttm_destroy)
+		return true;
+	return false;
+}
+
+static int
+ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		     struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct ast_bo *astbo = ast_bo(bo);
+
+	if (!ast_ttm_bo_is_ast_bo(bo))
+		return;
+
+	ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
+	*pl = astbo->placement;
+}
+
+static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct ast_private *ast = ast_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int ast_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_reserve, bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	int r;
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	return r;
+}
+
+
+static void ast_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func ast_tt_backend_func = {
+	.destroy = &ast_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+	tt->func = &ast_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+	return tt;
+}
+
+static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	return ttm_pool_populate(ttm);
+}
+
+static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver ast_bo_driver = {
+	.ttm_tt_create = ast_ttm_tt_create,
+	.ttm_tt_populate = ast_ttm_tt_populate,
+	.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
+	.init_mem_type = ast_bo_init_mem_type,
+	.evict_flags = ast_bo_evict_flags,
+	.move = ast_bo_move,
+	.verify_access = ast_bo_verify_access,
+	.io_mem_reserve = &ast_ttm_io_mem_reserve,
+	.io_mem_free = &ast_ttm_io_mem_free,
+};
+
+int ast_mm_init(struct ast_private *ast)
+{
+	int ret;
+	struct drm_device *dev = ast->dev;
+	struct ttm_bo_device *bdev = &ast->ttm.bdev;
+
+	ret = ast_ttm_global_init(ast);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&ast->ttm.bdev,
+				 ast->ttm.bo_global_ref.ref.object,
+				 &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		return ret;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+			     ast->vram_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		return ret;
+	}
+
+	ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+				    pci_resource_len(dev->pdev, 0),
+				    DRM_MTRR_WC);
+
+	return 0;
+}
+
+void ast_mm_fini(struct ast_private *ast)
+{
+	struct drm_device *dev = ast->dev;
+	ttm_bo_device_release(&ast->ttm.bdev);
+
+	ast_ttm_global_release(ast);
+
+	if (ast->fb_mtrr >= 0) {
+		drm_mtrr_del(ast->fb_mtrr,
+			     pci_resource_start(dev->pdev, 0),
+			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+		ast->fb_mtrr = -1;
+	}
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain)
+{
+	u32 c = 0;
+	bo->placement.fpfn = 0;
+	bo->placement.lpfn = 0;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+	if (domain & TTM_PL_FLAG_VRAM)
+		bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+	if (domain & TTM_PL_FLAG_SYSTEM)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+}
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("reserve failed %p\n", bo);
+		return ret;
+	}
+	return 0;
+}
+
+void ast_bo_unreserve(struct ast_bo *bo)
+{
+	ttm_bo_unreserve(&bo->bo);
+}
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct ast_bo **pastbo)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast_bo *astbo;
+	size_t acc_size;
+	int ret;
+
+	astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
+	if (!astbo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &astbo->gem, size);
+	if (ret) {
+		kfree(astbo);
+		return ret;
+	}
+
+	astbo->gem.driver_private = NULL;
+	astbo->bo.bdev = &ast->ttm.bdev;
+
+	ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
+				       sizeof(struct ast_bo));
+
+	ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
+			  ttm_bo_type_device, &astbo->placement,
+			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+			  ast_bo_ttm_destroy);
+	if (ret)
+		return ret;
+
+	*pastbo = astbo;
+	return 0;
+}
+
+static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = ast_bo_gpu_offset(bo);
+	}
+
+	ast_ttm_placement(bo, pl_flag);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+	if (gpu_addr)
+		*gpu_addr = ast_bo_gpu_offset(bo);
+	return 0;
+}
+
+int ast_bo_unpin(struct ast_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int ast_bo_push_sysram(struct ast_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	if (bo->kmap.virtual)
+		ttm_bo_kunmap(&bo->kmap);
+
+	ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret) {
+		DRM_ERROR("pushing to VRAM failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+int ast_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct ast_private *ast;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	file_priv = filp->private_data;
+	ast = file_priv->minor->dev->dev_private;
+	return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
+}

+ 12 - 0
drivers/gpu/drm/cirrus/Kconfig

@@ -0,0 +1,12 @@
+config DRM_CIRRUS_QEMU
+	tristate "Cirrus driver for QEMU emulated device"
+	depends on DRM && PCI && EXPERIMENTAL
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	help
+	 This is a KMS driver for emulated cirrus device in qemu.
+	 It is *NOT* intended for real cirrus devices. This requires
+	 the modesetting userspace X.org driver.

+ 5 - 0
drivers/gpu/drm/cirrus/Makefile

@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+cirrus-y  := cirrus_main.o cirrus_mode.o \
+	cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
+
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o

+ 108 - 0
drivers/gpu/drm/cirrus/cirrus_drv.c

@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat <mjg@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "cirrus_drv.h"
+
+int cirrus_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, cirrus_modeset, int, 0400);
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+
+static struct drm_driver driver;
+
+/* only bind to the cirrus chip in qemu */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+	  0, 0 },
+	{0,}
+};
+
+static int __devinit
+cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static const struct file_operations cirrus_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = cirrus_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+};
+static struct drm_driver driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+	.load = cirrus_driver_load,
+	.unload = cirrus_driver_unload,
+	.fops = &cirrus_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+	.gem_init_object = cirrus_gem_init_object,
+	.gem_free_object = cirrus_gem_free_object,
+	.dumb_create = cirrus_dumb_create,
+	.dumb_map_offset = cirrus_dumb_mmap_offset,
+	.dumb_destroy = cirrus_dumb_destroy,
+};
+
+static struct pci_driver cirrus_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = cirrus_pci_probe,
+	.remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && cirrus_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (cirrus_modeset == 0)
+		return -EINVAL;
+	return drm_pci_init(&driver, &cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+	drm_pci_exit(&driver, &cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");

+ 246 - 0
drivers/gpu/drm/cirrus/cirrus_drv.h

@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#ifndef __CIRRUS_DRV_H__
+#define __CIRRUS_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#define DRIVER_AUTHOR		"Matthew Garrett"
+
+#define DRIVER_NAME		"cirrus"
+#define DRIVER_DESC		"qemu Cirrus emulation"
+#define DRIVER_DATE		"20110418"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+#define CIRRUSFB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+#define WREG_SEQ(reg, v)					\
+	do {							\
+		WREG8(SEQ_INDEX, reg);				\
+		WREG8(SEQ_DATA, v);				\
+	} while (0)						\
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+#define WREG_CRT(reg, v)					\
+	do {							\
+		WREG8(CRT_INDEX, reg);				\
+		WREG8(CRT_DATA, v);				\
+	} while (0)						\
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+#define WREG_GFX(reg, v)					\
+	do {							\
+		WREG8(GFX_INDEX, reg);				\
+		WREG8(GFX_DATA, v);				\
+	} while (0)						\
+
+/*
+ * Cirrus has a "hidden" DAC register that can be accessed by writing to
+ * the pixel mask register to reset the state, then reading from the register
+ * four times. The next write will then pass to the DAC
+ */
+#define VGA_DAC_MASK 0x6
+
+#define WREG_HDR(v)						\
+	do {							\
+		RREG8(VGA_DAC_MASK);					\
+		RREG8(VGA_DAC_MASK);					\
+		RREG8(VGA_DAC_MASK);					\
+		RREG8(VGA_DAC_MASK);					\
+		WREG8(VGA_DAC_MASK, v);					\
+	} while (0)						\
+
+
+#define CIRRUS_MAX_FB_HEIGHT 4096
+#define CIRRUS_MAX_FB_WIDTH 4096
+
+#define CIRRUS_DPMS_CLEARED (-1)
+
+#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
+#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
+#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
+
+struct cirrus_crtc {
+	struct drm_crtc			base;
+	u8				lut_r[256], lut_g[256], lut_b[256];
+	int				last_dpms;
+	bool				enabled;
+};
+
+struct cirrus_fbdev;
+struct cirrus_mode_info {
+	bool				mode_config_initialized;
+	struct cirrus_crtc		*crtc;
+	/* pointer to fbdev info structure */
+	struct cirrus_fbdev		*gfbdev;
+};
+
+struct cirrus_encoder {
+	struct drm_encoder		base;
+	int				last_dpms;
+};
+
+struct cirrus_connector {
+	struct drm_connector		base;
+};
+
+struct cirrus_framebuffer {
+	struct drm_framebuffer		base;
+	struct drm_gem_object *obj;
+};
+
+struct cirrus_mc {
+	resource_size_t			vram_size;
+	resource_size_t			vram_base;
+};
+
+struct cirrus_device {
+	struct drm_device		*dev;
+	unsigned long			flags;
+
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	void __iomem			*rmmio;
+
+	struct cirrus_mc			mc;
+	struct cirrus_mode_info		mode_info;
+
+	int				num_crtc;
+	int fb_mtrr;
+
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+		atomic_t validate_sequence;
+	} ttm;
+
+};
+
+
+struct cirrus_fbdev {
+	struct drm_fb_helper helper;
+	struct cirrus_framebuffer gfb;
+	struct list_head fbdev_list;
+	void *sysram;
+	int size;
+};
+
+struct cirrus_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	u32 placements[3];
+	int pin_count;
+};
+#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
+
+static inline struct cirrus_bo *
+cirrus_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct cirrus_bo, bo);
+}
+
+
+#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+				/* cirrus_mode.c */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			     u16 blue, int regno);
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			     u16 *blue, int regno);
+
+
+				/* cirrus_main.c */
+int cirrus_device_init(struct cirrus_device *cdev,
+		      struct drm_device *ddev,
+		      struct pci_dev *pdev,
+		      uint32_t flags);
+void cirrus_device_fini(struct cirrus_device *cdev);
+int cirrus_gem_init_object(struct drm_gem_object *obj);
+void cirrus_gem_free_object(struct drm_gem_object *obj);
+int cirrus_dumb_mmap_offset(struct drm_file *file,
+			    struct drm_device *dev,
+			    uint32_t handle,
+			    uint64_t *offset);
+int cirrus_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		      struct drm_gem_object **obj);
+int cirrus_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		       struct drm_mode_create_dumb *args);
+int cirrus_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+			uint32_t handle);
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+			   struct cirrus_framebuffer *gfb,
+			    struct drm_mode_fb_cmd2 *mode_cmd,
+			    struct drm_gem_object *obj);
+
+				/* cirrus_display.c */
+int cirrus_modeset_init(struct cirrus_device *cdev);
+void cirrus_modeset_fini(struct cirrus_device *cdev);
+
+				/* cirrus_fbdev.c */
+int cirrus_fbdev_init(struct cirrus_device *cdev);
+void cirrus_fbdev_fini(struct cirrus_device *cdev);
+
+
+
+				/* cirrus_irq.c */
+void cirrus_driver_irq_preinstall(struct drm_device *dev);
+int cirrus_driver_irq_postinstall(struct drm_device *dev);
+void cirrus_driver_irq_uninstall(struct drm_device *dev);
+irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+
+				/* cirrus_kms.c */
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
+int cirrus_driver_unload(struct drm_device *dev);
+extern struct drm_ioctl_desc cirrus_ioctls[];
+extern int cirrus_max_ioctl;
+
+int cirrus_mm_init(struct cirrus_device *cirrus);
+void cirrus_mm_fini(struct cirrus_device *cirrus);
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+		     uint32_t flags, struct cirrus_bo **pcirrusbo);
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait);
+void cirrus_bo_unreserve(struct cirrus_bo *bo);
+int cirrus_bo_push_sysram(struct cirrus_bo *bo);
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+#endif				/* __CIRRUS_DRV_H__ */

+ 307 - 0
drivers/gpu/drm/cirrus/cirrus_fbdev.c

@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "cirrus_drv.h"
+
+static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+			     int x, int y, int width, int height)
+{
+	int i;
+	struct drm_gem_object *obj;
+	struct cirrus_bo *bo;
+	int src_offset, dst_offset;
+	int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+	int ret;
+	bool unmap = false;
+
+	obj = afbdev->gfb.obj;
+	bo = gem_to_cirrus_bo(obj);
+
+	ret = cirrus_bo_reserve(bo, true);
+	if (ret) {
+		DRM_ERROR("failed to reserve fb bo\n");
+		return;
+	}
+
+	if (!bo->kmap.virtual) {
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret) {
+			DRM_ERROR("failed to kmap fb updates\n");
+			cirrus_bo_unreserve(bo);
+			return;
+		}
+		unmap = true;
+	}
+	for (i = y; i < y + height; i++) {
+		/* assume equal stride for now */
+		src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+	}
+	if (unmap)
+		ttm_bo_kunmap(&bo->kmap);
+
+	cirrus_bo_unreserve(bo);
+}
+
+static void cirrus_fillrect(struct fb_info *info,
+			 const struct fb_fillrect *rect)
+{
+	struct cirrus_fbdev *afbdev = info->par;
+	sys_fillrect(info, rect);
+	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+			 rect->height);
+}
+
+static void cirrus_copyarea(struct fb_info *info,
+			 const struct fb_copyarea *area)
+{
+	struct cirrus_fbdev *afbdev = info->par;
+	sys_copyarea(info, area);
+	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
+			 area->height);
+}
+
+static void cirrus_imageblit(struct fb_info *info,
+			  const struct fb_image *image)
+{
+	struct cirrus_fbdev *afbdev = info->par;
+	sys_imageblit(info, image);
+	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
+			 image->height);
+}
+
+
+static struct fb_ops cirrusfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = cirrus_fillrect,
+	.fb_copyarea = cirrus_copyarea,
+	.fb_imageblit = cirrus_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
+			       struct drm_mode_fb_cmd2 *mode_cmd,
+			       struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = afbdev->helper.dev;
+	u32 bpp, depth;
+	u32 size;
+	struct drm_gem_object *gobj;
+
+	int ret = 0;
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	if (bpp > 24)
+		return -EINVAL;
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	ret = cirrus_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+	return ret;
+}
+
+static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = gfbdev->helper.dev;
+	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct device *device = &dev->pdev->dev;
+	void *sysram;
+	struct drm_gem_object *gobj = NULL;
+	struct cirrus_bo *bo = NULL;
+	int size, ret;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+
+	bo = gem_to_cirrus_bo(gobj);
+
+	sysram = vmalloc(size);
+	if (!sysram)
+		return -ENOMEM;
+
+	info = framebuffer_alloc(0, device);
+	if (info == NULL)
+		return -ENOMEM;
+
+	info->par = gfbdev;
+
+	ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+	if (ret)
+		return ret;
+
+	gfbdev->sysram = sysram;
+	gfbdev->size = size;
+
+	fb = &gfbdev->gfb.base;
+	if (!fb) {
+		DRM_INFO("fb is NULL\n");
+		return -EINVAL;
+	}
+
+	/* setup helper */
+	gfbdev->helper.fb = fb;
+	gfbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "cirrusdrmfb");
+
+
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &cirrusfb_ops;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_iounmap;
+	}
+	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+	info->apertures->ranges[0].size = cdev->mc.vram_size;
+
+	info->screen_base = sysram;
+	info->screen_size = size;
+
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+		ret = -ENOMEM;
+		goto out_iounmap;
+	}
+
+	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
+	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
+	DRM_INFO("fb depth is %d\n", fb->depth);
+	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+
+	return 0;
+out_iounmap:
+	return ret;
+}
+
+static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
+					   struct drm_fb_helper_surface_size
+					   *sizes)
+{
+	struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	if (!helper->fb) {
+		ret = cirrusfb_create(gfbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
+static int cirrus_fbdev_destroy(struct drm_device *dev,
+				struct cirrus_fbdev *gfbdev)
+{
+	struct fb_info *info;
+	struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+
+	if (gfbdev->helper.fbdev) {
+		info = gfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (gfb->obj) {
+		drm_gem_object_unreference_unlocked(gfb->obj);
+		gfb->obj = NULL;
+	}
+
+	vfree(gfbdev->sysram);
+	drm_fb_helper_fini(&gfbdev->helper);
+	drm_framebuffer_cleanup(&gfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+	.gamma_set = cirrus_crtc_fb_gamma_set,
+	.gamma_get = cirrus_crtc_fb_gamma_get,
+	.fb_probe = cirrus_fb_find_or_create_single,
+};
+
+int cirrus_fbdev_init(struct cirrus_device *cdev)
+{
+	struct cirrus_fbdev *gfbdev;
+	int ret;
+	int bpp_sel = 24;
+
+	/*bpp_sel = 8;*/
+	gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
+	if (!gfbdev)
+		return -ENOMEM;
+
+	cdev->mode_info.gfbdev = gfbdev;
+	gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+				 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+	if (ret) {
+		kfree(gfbdev);
+		return ret;
+	}
+	drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+	drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+
+	return 0;
+}
+
+void cirrus_fbdev_fini(struct cirrus_device *cdev)
+{
+	if (!cdev->mode_info.gfbdev)
+		return;
+
+	cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
+	kfree(cdev->mode_info.gfbdev);
+	cdev->mode_info.gfbdev = NULL;
+}

+ 335 - 0
drivers/gpu/drm/cirrus/cirrus_main.c

@@ -0,0 +1,335 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "cirrus_drv.h"
+
+
+static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+	if (cirrus_fb->obj)
+		drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						 struct drm_file *file_priv,
+						 unsigned int *handle)
+{
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+	.destroy = cirrus_user_framebuffer_destroy,
+	.create_handle = cirrus_user_framebuffer_create_handle,
+};
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+			    struct cirrus_framebuffer *gfb,
+			    struct drm_mode_fb_cmd2 *mode_cmd,
+			    struct drm_gem_object *obj)
+{
+	int ret;
+
+	ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+	if (ret) {
+		DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+		return ret;
+	}
+	drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+	gfb->obj = obj;
+	return 0;
+}
+
+static struct drm_framebuffer *
+cirrus_user_framebuffer_create(struct drm_device *dev,
+			       struct drm_file *filp,
+			       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct cirrus_framebuffer *cirrus_fb;
+	int ret;
+	u32 bpp, depth;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+	/* cirrus can't handle > 24bpp framebuffers at all */
+	if (bpp > 24)
+		return ERR_PTR(-EINVAL);
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
+	if (!cirrus_fb) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(obj);
+		kfree(cirrus_fb);
+		return ERR_PTR(ret);
+	}
+	return &cirrus_fb->base;
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_funcs = {
+	.fb_create = cirrus_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void cirrus_vram_fini(struct cirrus_device *cdev)
+{
+	iounmap(cdev->rmmio);
+	cdev->rmmio = NULL;
+	if (cdev->mc.vram_base)
+		release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int cirrus_vram_init(struct cirrus_device *cdev)
+{
+	/* BAR 0 is VRAM */
+	cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
+	/* We have 4MB of VRAM */
+	cdev->mc.vram_size = 4 * 1024 * 1024;
+
+	if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
+				"cirrusdrmfb_vram")) {
+		DRM_ERROR("can't reserve VRAM\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Our emulated hardware has two sets of memory. One is video RAM and can
+ * simply be used as a linear framebuffer - the other provides mmio access
+ * to the display registers. The latter can also be accessed via IO port
+ * access, but we map the range and use mmio to program them instead
+ */
+
+int cirrus_device_init(struct cirrus_device *cdev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev, uint32_t flags)
+{
+	int ret;
+
+	cdev->dev = ddev;
+	cdev->flags = flags;
+
+	/* Hardcode the number of CRTCs to 1 */
+	cdev->num_crtc = 1;
+
+	/* BAR 0 is the framebuffer, BAR 1 contains registers */
+	cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
+	cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
+
+	if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
+				"cirrusdrmfb_mmio")) {
+		DRM_ERROR("can't reserve mmio registers\n");
+		return -ENOMEM;
+	}
+
+	cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
+
+	if (cdev->rmmio == NULL)
+		return -ENOMEM;
+
+	ret = cirrus_vram_init(cdev);
+	if (ret) {
+		release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+		return ret;
+	}
+
+	return 0;
+}
+
+void cirrus_device_fini(struct cirrus_device *cdev)
+{
+	release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+	cirrus_vram_fini(cdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct cirrus_device *cdev;
+	int r;
+
+	cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
+	if (cdev == NULL)
+		return -ENOMEM;
+	dev->dev_private = (void *)cdev;
+
+	r = cirrus_device_init(cdev, dev, dev->pdev, flags);
+	if (r) {
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+		goto out;
+	}
+
+	r = cirrus_mm_init(cdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+
+	r = cirrus_modeset_init(cdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+
+	dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+out:
+	if (r)
+		cirrus_driver_unload(dev);
+	return r;
+}
+
+int cirrus_driver_unload(struct drm_device *dev)
+{
+	struct cirrus_device *cdev = dev->dev_private;
+
+	if (cdev == NULL)
+		return 0;
+	cirrus_modeset_fini(cdev);
+	cirrus_mm_fini(cdev);
+	cirrus_device_fini(cdev);
+	kfree(cdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+int cirrus_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj)
+{
+	struct cirrus_bo *cirrusbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+	*obj = &cirrusbo->gem;
+	return 0;
+}
+
+int cirrus_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	u32 handle;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = cirrus_gem_create(dev, args->size, false,
+			     &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+int cirrus_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int cirrus_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+	return 0;
+}
+
+void cirrus_bo_unref(struct cirrus_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+
+}
+
+void cirrus_gem_free_object(struct drm_gem_object *obj)
+{
+	struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
+
+	if (!cirrus_bo)
+		return;
+	cirrus_bo_unref(&cirrus_bo);
+}
+
+
+static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
+{
+	return bo->bo.addr_space_offset;
+}
+
+int
+cirrus_dumb_mmap_offset(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle,
+		     uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct cirrus_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_cirrus_bo(obj);
+	*offset = cirrus_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+}

+ 629 - 0
drivers/gpu/drm/cirrus/cirrus_mode.c

@@ -0,0 +1,629 @@
+
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include <video/cirrus.h>
+
+#include "cirrus_drv.h"
+
+#define CIRRUS_LUT_SIZE 256
+
+#define PALETTE_INDEX 0x8
+#define PALETTE_DATA 0x9
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct cirrus_device *cdev = dev->dev_private;
+	int i;
+
+	if (!crtc->enabled)
+		return;
+
+	for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+		/* VGA registers */
+		WREG8(PALETTE_INDEX, i);
+		WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
+		WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
+		WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
+	}
+}
+
+/*
+ * The DRM core requires DPMS functions, but they make little sense in our
+ * case and so are just stubs
+ */
+
+static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct cirrus_device *cdev = dev->dev_private;
+	u8 sr01, gr0e;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		sr01 = 0x00;
+		gr0e = 0x00;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		sr01 = 0x20;
+		gr0e = 0x02;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		sr01 = 0x20;
+		gr0e = 0x04;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		sr01 = 0x20;
+		gr0e = 0x06;
+		break;
+	default:
+		return;
+	}
+
+	WREG8(SEQ_INDEX, 0x1);
+	sr01 |= RREG8(SEQ_DATA) & ~0x20;
+	WREG_SEQ(0x1, sr01);
+
+	WREG8(GFX_INDEX, 0xe);
+	gr0e |= RREG8(GFX_DATA) & ~0x06;
+	WREG_GFX(0xe, gr0e);
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+	struct cirrus_device *cdev = crtc->dev->dev_private;
+	u32 addr;
+	u8 tmp;
+
+	addr = offset >> 2;
+	WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
+	WREG_CRT(0x0d, (u8)(addr & 0xff));
+
+	WREG8(CRT_INDEX, 0x1b);
+	tmp = RREG8(CRT_DATA);
+	tmp &= 0xf2;
+	tmp |= (addr >> 16) & 0x01;
+	tmp |= (addr >> 15) & 0x0c;
+	WREG_CRT(0x1b, tmp);
+	WREG8(CRT_INDEX, 0x1d);
+	tmp = RREG8(CRT_DATA);
+	tmp &= 0x7f;
+	tmp |= (addr >> 12) & 0x80;
+	WREG_CRT(0x1d, tmp);
+}
+
+/* cirrus is different - we will force move buffers out of VRAM */
+static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, int atomic)
+{
+	struct cirrus_device *cdev = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct cirrus_framebuffer *cirrus_fb;
+	struct cirrus_bo *bo;
+	int ret;
+	u64 gpu_addr;
+
+	/* push the previous fb to system ram */
+	if (!atomic && fb) {
+		cirrus_fb = to_cirrus_framebuffer(fb);
+		obj = cirrus_fb->obj;
+		bo = gem_to_cirrus_bo(obj);
+		ret = cirrus_bo_reserve(bo, false);
+		if (ret)
+			return ret;
+		cirrus_bo_push_sysram(bo);
+		cirrus_bo_unreserve(bo);
+	}
+
+	cirrus_fb = to_cirrus_framebuffer(crtc->fb);
+	obj = cirrus_fb->obj;
+	bo = gem_to_cirrus_bo(obj);
+
+	ret = cirrus_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		cirrus_bo_unreserve(bo);
+		return ret;
+	}
+
+	if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+		/* if pushing console in kmap it */
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret)
+			DRM_ERROR("failed to kmap fbcon\n");
+	}
+	cirrus_bo_unreserve(bo);
+
+	cirrus_set_start_address(crtc, (u32)gpu_addr);
+	return 0;
+}
+
+static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			     struct drm_framebuffer *old_fb)
+{
+	return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode,
+				int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct cirrus_device *cdev = dev->dev_private;
+	int hsyncstart, hsyncend, htotal, hdispend;
+	int vtotal, vdispend;
+	int tmp;
+	int sr07 = 0, hdr = 0;
+
+	htotal = mode->htotal / 8;
+	hsyncend = mode->hsync_end / 8;
+	hsyncstart = mode->hsync_start / 8;
+	hdispend = mode->hdisplay / 8;
+
+	vtotal = mode->vtotal;
+	vdispend = mode->vdisplay;
+
+	vdispend -= 1;
+	vtotal -= 2;
+
+	htotal -= 5;
+	hdispend -= 1;
+	hsyncstart += 1;
+	hsyncend += 1;
+
+	WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
+	WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
+	WREG_CRT(VGA_CRTC_H_DISP, hdispend);
+	WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
+	WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
+	WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
+	WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+	tmp = 0x40;
+	if ((vdispend + 1) & 512)
+		tmp |= 0x20;
+	WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
+
+	/*
+	 * Overflow bits for values that don't fit in the standard registers
+	 */
+	tmp = 16;
+	if (vtotal & 256)
+		tmp |= 1;
+	if (vdispend & 256)
+		tmp |= 2;
+	if ((vdispend + 1) & 256)
+		tmp |= 8;
+	if (vtotal & 512)
+		tmp |= 32;
+	if (vdispend & 512)
+		tmp |= 64;
+	WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
+
+	tmp = 0;
+
+	/* More overflow bits */
+
+	if ((htotal + 5) & 64)
+		tmp |= 16;
+	if ((htotal + 5) & 128)
+		tmp |= 32;
+	if (vtotal & 256)
+		tmp |= 64;
+	if (vtotal & 512)
+		tmp |= 128;
+
+	WREG_CRT(CL_CRT1A, tmp);
+
+	/* Disable Hercules/CGA compatibility */
+	WREG_CRT(VGA_CRTC_MODE, 0x03);
+
+	WREG8(SEQ_INDEX, 0x7);
+	sr07 = RREG8(SEQ_DATA);
+	sr07 &= 0xe0;
+	hdr = 0;
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		sr07 |= 0x11;
+		break;
+	case 16:
+		sr07 |= 0xc1;
+		hdr = 0xc0;
+		break;
+	case 24:
+		sr07 |= 0x15;
+		hdr = 0xc5;
+		break;
+	case 32:
+		sr07 |= 0x19;
+		hdr = 0xc5;
+		break;
+	default:
+		return -1;
+	}
+
+	WREG_SEQ(0x7, sr07);
+
+	/* Program the pitch */
+	tmp = crtc->fb->pitches[0] / 8;
+	WREG_CRT(VGA_CRTC_OFFSET, tmp);
+
+	/* Enable extended blanking and pitch bits, and enable full memory */
+	tmp = 0x22;
+	tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
+	tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
+	WREG_CRT(0x1b, tmp);
+
+	/* Enable high-colour modes */
+	WREG_GFX(VGA_GFX_MODE, 0x40);
+
+	/* And set graphics mode */
+	WREG_GFX(VGA_GFX_MISC, 0x01);
+
+	WREG_HDR(hdr);
+	cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	return 0;
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void cirrus_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				  u16 *blue, uint32_t start, uint32_t size)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+	int i;
+
+	if (size != CIRRUS_LUT_SIZE)
+		return;
+
+	for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+		cirrus_crtc->lut_r[i] = red[i];
+		cirrus_crtc->lut_g[i] = green[i];
+		cirrus_crtc->lut_b[i] = blue[i];
+	}
+	cirrus_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void cirrus_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(cirrus_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+	.gamma_set = cirrus_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = cirrus_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
+	.dpms = cirrus_crtc_dpms,
+	.mode_fixup = cirrus_crtc_mode_fixup,
+	.mode_set = cirrus_crtc_mode_set,
+	.mode_set_base = cirrus_crtc_mode_set_base,
+	.prepare = cirrus_crtc_prepare,
+	.commit = cirrus_crtc_commit,
+	.load_lut = cirrus_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void cirrus_crtc_init(struct drm_device *dev)
+{
+	struct cirrus_device *cdev = dev->dev_private;
+	struct cirrus_crtc *cirrus_crtc;
+	int i;
+
+	cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
+			      (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+			      GFP_KERNEL);
+
+	if (cirrus_crtc == NULL)
+		return;
+
+	drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
+	cdev->mode_info.crtc = cirrus_crtc;
+
+	for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+		cirrus_crtc->lut_r[i] = i;
+		cirrus_crtc->lut_g[i] = i;
+		cirrus_crtc->lut_b[i] = i;
+	}
+
+	drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			      u16 blue, int regno)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+	cirrus_crtc->lut_r[regno] = red;
+	cirrus_crtc->lut_g[regno] = green;
+	cirrus_crtc->lut_b[regno] = blue;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			      u16 *blue, int regno)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+	*red = cirrus_crtc->lut_r[regno];
+	*green = cirrus_crtc->lut_g[regno];
+	*blue = cirrus_crtc->lut_b[regno];
+}
+
+
+static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+	return;
+}
+
+static void cirrus_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void cirrus_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(cirrus_encoder);
+}
+
+static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
+	.dpms = cirrus_encoder_dpms,
+	.mode_fixup = cirrus_encoder_mode_fixup,
+	.mode_set = cirrus_encoder_mode_set,
+	.prepare = cirrus_encoder_prepare,
+	.commit = cirrus_encoder_commit,
+};
+
+static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
+	.destroy = cirrus_encoder_destroy,
+};
+
+static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	struct cirrus_encoder *cirrus_encoder;
+
+	cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
+	if (!cirrus_encoder)
+		return NULL;
+
+	encoder = &cirrus_encoder->base;
+	encoder->possible_crtcs = 0x1;
+
+	drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
+
+	return encoder;
+}
+
+
+int cirrus_vga_get_modes(struct drm_connector *connector)
+{
+	/* Just add a static list of modes */
+	drm_add_modes_noedid(connector, 640, 480);
+	drm_add_modes_noedid(connector, 800, 600);
+	drm_add_modes_noedid(connector, 1024, 768);
+	drm_add_modes_noedid(connector, 1280, 1024);
+
+	return 4;
+}
+
+static int cirrus_vga_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	/* Any mode we've added is valid */
+	return MODE_OK;
+}
+
+struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+						  *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj =
+		    drm_mode_object_find(connector->dev, enc_id,
+					 DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+static enum drm_connector_status cirrus_vga_detect(struct drm_connector
+						   *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static void cirrus_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+	.get_modes = cirrus_vga_get_modes,
+	.mode_valid = cirrus_vga_mode_valid,
+	.best_encoder = cirrus_connector_best_encoder,
+};
+
+struct drm_connector_funcs cirrus_vga_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = cirrus_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = cirrus_connector_destroy,
+};
+
+static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct cirrus_connector *cirrus_connector;
+
+	cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
+	if (!cirrus_connector)
+		return NULL;
+
+	connector = &cirrus_connector->base;
+
+	drm_connector_init(dev, connector,
+			   &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+
+	return connector;
+}
+
+
+int cirrus_modeset_init(struct cirrus_device *cdev)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int ret;
+
+	drm_mode_config_init(cdev->dev);
+	cdev->mode_info.mode_config_initialized = true;
+
+	cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
+	cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
+
+	cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
+	cdev->dev->mode_config.preferred_depth = 24;
+	/* don't prefer a shadow on virt GPU */
+	cdev->dev->mode_config.prefer_shadow = 0;
+
+	cirrus_crtc_init(cdev->dev);
+
+	encoder = cirrus_encoder_init(cdev->dev);
+	if (!encoder) {
+		DRM_ERROR("cirrus_encoder_init failed\n");
+		return -1;
+	}
+
+	connector = cirrus_vga_init(cdev->dev);
+	if (!connector) {
+		DRM_ERROR("cirrus_vga_init failed\n");
+		return -1;
+	}
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	ret = cirrus_fbdev_init(cdev);
+	if (ret) {
+		DRM_ERROR("cirrus_fbdev_init failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void cirrus_modeset_fini(struct cirrus_device *cdev)
+{
+	cirrus_fbdev_fini(cdev);
+
+	if (cdev->mode_info.mode_config_initialized) {
+		drm_mode_config_cleanup(cdev->dev);
+		cdev->mode_info.mode_config_initialized = false;
+	}
+}

+ 453 - 0
drivers/gpu/drm/cirrus/cirrus_ttm.c

@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "cirrus_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct cirrus_device *
+cirrus_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct cirrus_device, ttm.bdev);
+}
+
+static int
+cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	global_ref = &cirrus->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &cirrus_ttm_mem_global_init;
+	global_ref->release = &cirrus_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	cirrus->ttm.bo_global_ref.mem_glob =
+		cirrus->ttm.mem_global_ref.object;
+	global_ref = &cirrus->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+		return r;
+	}
+	return 0;
+}
+
+void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+	if (cirrus->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+	cirrus->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct cirrus_bo *bo;
+
+	bo = container_of(tbo, struct cirrus_bo, bo);
+
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &cirrus_bo_ttm_destroy)
+		return true;
+	return false;
+}
+
+static int
+cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		     struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+	if (!cirrus_ttm_bo_is_cirrus_bo(bo))
+		return;
+
+	cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
+	*pl = cirrusbo->placement;
+}
+
+static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct cirrus_device *cirrus = cirrus_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int cirrus_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_reserve, bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	int r;
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	return r;
+}
+
+
+static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func cirrus_tt_backend_func = {
+	.destroy = &cirrus_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+	tt->func = &cirrus_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+	return tt;
+}
+
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	return ttm_pool_populate(ttm);
+}
+
+static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver cirrus_bo_driver = {
+	.ttm_tt_create = cirrus_ttm_tt_create,
+	.ttm_tt_populate = cirrus_ttm_tt_populate,
+	.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
+	.init_mem_type = cirrus_bo_init_mem_type,
+	.evict_flags = cirrus_bo_evict_flags,
+	.move = cirrus_bo_move,
+	.verify_access = cirrus_bo_verify_access,
+	.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
+	.io_mem_free = &cirrus_ttm_io_mem_free,
+};
+
+int cirrus_mm_init(struct cirrus_device *cirrus)
+{
+	int ret;
+	struct drm_device *dev = cirrus->dev;
+	struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+
+	ret = cirrus_ttm_global_init(cirrus);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+				 cirrus->ttm.bo_global_ref.ref.object,
+				 &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		return ret;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+			     cirrus->mc.vram_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		return ret;
+	}
+
+	cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+				    pci_resource_len(dev->pdev, 0),
+				    DRM_MTRR_WC);
+
+	return 0;
+}
+
+void cirrus_mm_fini(struct cirrus_device *cirrus)
+{
+	struct drm_device *dev = cirrus->dev;
+	ttm_bo_device_release(&cirrus->ttm.bdev);
+
+	cirrus_ttm_global_release(cirrus);
+
+	if (cirrus->fb_mtrr >= 0) {
+		drm_mtrr_del(cirrus->fb_mtrr,
+			     pci_resource_start(dev->pdev, 0),
+			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+		cirrus->fb_mtrr = -1;
+	}
+}
+
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+{
+	u32 c = 0;
+	bo->placement.fpfn = 0;
+	bo->placement.lpfn = 0;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+	if (domain & TTM_PL_FLAG_VRAM)
+		bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+	if (domain & TTM_PL_FLAG_SYSTEM)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+}
+
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("reserve failed %p\n", bo);
+		return ret;
+	}
+	return 0;
+}
+
+void cirrus_bo_unreserve(struct cirrus_bo *bo)
+{
+	ttm_bo_unreserve(&bo->bo);
+}
+
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct cirrus_bo **pcirrusbo)
+{
+	struct cirrus_device *cirrus = dev->dev_private;
+	struct cirrus_bo *cirrusbo;
+	size_t acc_size;
+	int ret;
+
+	cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
+	if (!cirrusbo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
+	if (ret) {
+		kfree(cirrusbo);
+		return ret;
+	}
+
+	cirrusbo->gem.driver_private = NULL;
+	cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+
+	cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
+				       sizeof(struct cirrus_bo));
+
+	ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
+			  ttm_bo_type_device, &cirrusbo->placement,
+			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+			  cirrus_bo_ttm_destroy);
+	if (ret)
+		return ret;
+
+	*pcirrusbo = cirrusbo;
+	return 0;
+}
+
+static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = cirrus_bo_gpu_offset(bo);
+	}
+
+	cirrus_ttm_placement(bo, pl_flag);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+	if (gpu_addr)
+		*gpu_addr = cirrus_bo_gpu_offset(bo);
+	return 0;
+}
+
+int cirrus_bo_unpin(struct cirrus_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	if (bo->kmap.virtual)
+		ttm_bo_kunmap(&bo->kmap);
+
+	cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret) {
+		DRM_ERROR("pushing to VRAM failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct cirrus_device *cirrus;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	file_priv = filp->private_data;
+	cirrus = file_priv->minor->dev->dev_private;
+	return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
+}

+ 210 - 92
drivers/gpu/drm/drm_crtc.c

@@ -384,6 +384,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
 	if (ret)
 		goto out;
 
+	crtc->base.properties = &crtc->properties;
+
 	list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
 	dev->mode_config.num_crtc++;
 
@@ -483,6 +485,7 @@ int drm_connector_init(struct drm_device *dev,
 	if (ret)
 		goto out;
 
+	connector->base.properties = &connector->properties;
 	connector->dev = dev;
 	connector->funcs = funcs;
 	connector->connector_type = connector_type;
@@ -1424,11 +1427,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 	}
 	connector = obj_to_connector(obj);
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] != 0) {
-			props_count++;
-		}
-	}
+	props_count = connector->properties.count;
 
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 		if (connector->encoder_ids[i] != 0) {
@@ -1481,21 +1480,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 		copied = 0;
 		prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
 		prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
-		for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-			if (connector->property_ids[i] != 0) {
-				if (put_user(connector->property_ids[i],
-					     prop_ptr + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
+		for (i = 0; i < connector->properties.count; i++) {
+			if (put_user(connector->properties.ids[i],
+				     prop_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
 
-				if (put_user(connector->property_values[i],
-					     prop_values + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+			if (put_user(connector->properties.values[i],
+				     prop_values + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
+			copied++;
 		}
 	}
 	out_resp->count_props = props_count;
@@ -2819,60 +2816,78 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
 }
 EXPORT_SYMBOL(drm_property_destroy);
 
-int drm_connector_attach_property(struct drm_connector *connector,
+void drm_connector_attach_property(struct drm_connector *connector,
 			       struct drm_property *property, uint64_t init_val)
 {
-	int i;
-
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == 0) {
-			connector->property_ids[i] = property->base.id;
-			connector->property_values[i] = init_val;
-			break;
-		}
-	}
-
-	if (i == DRM_CONNECTOR_MAX_PROPERTY)
-		return -EINVAL;
-	return 0;
+	drm_object_attach_property(&connector->base, property, init_val);
 }
 EXPORT_SYMBOL(drm_connector_attach_property);
 
 int drm_connector_property_set_value(struct drm_connector *connector,
 				  struct drm_property *property, uint64_t value)
+{
+	return drm_object_property_set_value(&connector->base, property, value);
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+				  struct drm_property *property, uint64_t *val)
+{
+	return drm_object_property_get_value(&connector->base, property, val);
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+				struct drm_property *property,
+				uint64_t init_val)
+{
+	int count = obj->properties->count;
+
+	if (count == DRM_OBJECT_MAX_PROPERTY) {
+		WARN(1, "Failed to attach object property (type: 0x%x). Please "
+			"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+			"you see this message on the same object type.\n",
+			obj->type);
+		return;
+	}
+
+	obj->properties->ids[count] = property->base.id;
+	obj->properties->values[count] = init_val;
+	obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t val)
 {
 	int i;
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == property->base.id) {
-			connector->property_values[i] = value;
-			break;
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->ids[i] == property->base.id) {
+			obj->properties->values[i] = val;
+			return 0;
 		}
 	}
 
-	if (i == DRM_CONNECTOR_MAX_PROPERTY)
-		return -EINVAL;
-	return 0;
+	return -EINVAL;
 }
-EXPORT_SYMBOL(drm_connector_property_set_value);
+EXPORT_SYMBOL(drm_object_property_set_value);
 
-int drm_connector_property_get_value(struct drm_connector *connector,
+int drm_object_property_get_value(struct drm_mode_object *obj,
 				  struct drm_property *property, uint64_t *val)
 {
 	int i;
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == property->base.id) {
-			*val = connector->property_values[i];
-			break;
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->ids[i] == property->base.id) {
+			*val = obj->properties->values[i];
+			return 0;
 		}
 	}
 
-	if (i == DRM_CONNECTOR_MAX_PROPERTY)
-		return -EINVAL;
-	return 0;
+	return -EINVAL;
 }
-EXPORT_SYMBOL(drm_connector_property_get_value);
+EXPORT_SYMBOL(drm_object_property_get_value);
 
 int drm_mode_getproperty_ioctl(struct drm_device *dev,
 			       void *data, struct drm_file *file_priv)
@@ -3074,75 +3089,178 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
+static bool drm_property_change_is_valid(struct drm_property *property,
+					 __u64 value)
+{
+	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+		return false;
+	if (property->flags & DRM_MODE_PROP_RANGE) {
+		if (value < property->values[0] || value > property->values[1])
+			return false;
+		return true;
+	} else {
+		int i;
+		for (i = 0; i < property->num_values; i++)
+			if (property->values[i] == value)
+				return true;
+		return false;
+	}
+}
+
 int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
 				       void *data, struct drm_file *file_priv)
 {
-	struct drm_mode_connector_set_property *out_resp = data;
-	struct drm_mode_object *obj;
-	struct drm_property *property;
-	struct drm_connector *connector;
+	struct drm_mode_connector_set_property *conn_set_prop = data;
+	struct drm_mode_obj_set_property obj_set_prop = {
+		.value = conn_set_prop->value,
+		.prop_id = conn_set_prop->prop_id,
+		.obj_id = conn_set_prop->connector_id,
+		.obj_type = DRM_MODE_OBJECT_CONNECTOR
+	};
+
+	/* It does all the locking and checking we need */
+	return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+					   struct drm_property *property,
+					   uint64_t value)
+{
 	int ret = -EINVAL;
+	struct drm_connector *connector = obj_to_connector(obj);
+
+	/* Do DPMS ourselves */
+	if (property == connector->dev->mode_config.dpms_property) {
+		if (connector->funcs->dpms)
+			(*connector->funcs->dpms)(connector, (int)value);
+		ret = 0;
+	} else if (connector->funcs->set_property)
+		ret = connector->funcs->set_property(connector, property, value);
+
+	/* store the property value if successful */
+	if (!ret)
+		drm_connector_property_set_value(connector, property, value);
+	return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+				      struct drm_property *property,
+				      uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_crtc *crtc = obj_to_crtc(obj);
+
+	if (crtc->funcs->set_property)
+		ret = crtc->funcs->set_property(crtc, property, value);
+	if (!ret)
+		drm_object_property_set_value(obj, property, value);
+
+	return ret;
+}
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
+{
+	struct drm_mode_obj_get_properties *arg = data;
+	struct drm_mode_object *obj;
+	int ret = 0;
 	int i;
+	int copied = 0;
+	int props_count = 0;
+	uint32_t __user *props_ptr;
+	uint64_t __user *prop_values_ptr;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
 	mutex_lock(&dev->mode_config.mutex);
 
-	obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
 	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!obj->properties) {
+		ret = -EINVAL;
 		goto out;
 	}
-	connector = obj_to_connector(obj);
 
-	for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
-		if (connector->property_ids[i] == out_resp->prop_id)
-			break;
+	props_count = obj->properties->count;
+
+	/* This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it. */
+	if ((arg->count_props >= props_count) && props_count) {
+		copied = 0;
+		props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+		prop_values_ptr = (uint64_t __user *)(unsigned long)
+				  (arg->prop_values_ptr);
+		for (i = 0; i < props_count; i++) {
+			if (put_user(obj->properties->ids[i],
+				     props_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			if (put_user(obj->properties->values[i],
+				     prop_values_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
 	}
+	arg->count_props = props_count;
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+	return ret;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_mode_obj_set_property *arg = data;
+	struct drm_mode_object *arg_obj;
+	struct drm_mode_object *prop_obj;
+	struct drm_property *property;
+	int ret = -EINVAL;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
 
-	if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+	mutex_lock(&dev->mode_config.mutex);
+
+	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!arg_obj)
+		goto out;
+	if (!arg_obj->properties)
 		goto out;
-	}
 
-	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
-	if (!obj) {
+	for (i = 0; i < arg_obj->properties->count; i++)
+		if (arg_obj->properties->ids[i] == arg->prop_id)
+			break;
+
+	if (i == arg_obj->properties->count)
 		goto out;
-	}
-	property = obj_to_property(obj);
 
-	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+	prop_obj = drm_mode_object_find(dev, arg->prop_id,
+					DRM_MODE_OBJECT_PROPERTY);
+	if (!prop_obj)
 		goto out;
+	property = obj_to_property(prop_obj);
 
-	if (property->flags & DRM_MODE_PROP_RANGE) {
-		if (out_resp->value < property->values[0])
-			goto out;
+	if (!drm_property_change_is_valid(property, arg->value))
+		goto out;
 
-		if (out_resp->value > property->values[1])
-			goto out;
-	} else {
-		int found = 0;
-		for (i = 0; i < property->num_values; i++) {
-			if (property->values[i] == out_resp->value) {
-				found = 1;
-				break;
-			}
-		}
-		if (!found) {
-			goto out;
-		}
+	switch (arg_obj->type) {
+	case DRM_MODE_OBJECT_CONNECTOR:
+		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+						      arg->value);
+		break;
+	case DRM_MODE_OBJECT_CRTC:
+		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+		break;
 	}
 
-	/* Do DPMS ourselves */
-	if (property == connector->dev->mode_config.dpms_property) {
-		if (connector->funcs->dpms)
-			(*connector->funcs->dpms)(connector, (int) out_resp->value);
-		ret = 0;
-	} else if (connector->funcs->set_property)
-		ret = connector->funcs->set_property(connector, property, out_resp->value);
-
-	/* store the property value if successful */
-	if (!ret)
-		drm_connector_property_set_value(connector, property, out_resp->value);
 out:
 	mutex_unlock(&dev->mode_config.mutex);
 	return ret;

+ 3 - 1
drivers/gpu/drm/drm_drv.c

@@ -163,7 +163,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )

+ 3 - 3
drivers/gpu/drm/drm_gem.c

@@ -626,7 +626,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
 	drm_gem_object_reference(obj);
 
 	mutex_lock(&obj->dev->struct_mutex);
-	drm_vm_open_locked(vma);
+	drm_vm_open_locked(obj->dev, vma);
 	mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
@@ -637,7 +637,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
 	struct drm_device *dev = obj->dev;
 
 	mutex_lock(&dev->struct_mutex);
-	drm_vm_close_locked(vma);
+	drm_vm_close_locked(obj->dev, vma);
 	drm_gem_object_unreference(obj);
 	mutex_unlock(&dev->struct_mutex);
 }
@@ -710,7 +710,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	 */
 	drm_gem_object_reference(obj);
 
-	drm_vm_open_locked(vma);
+	drm_vm_open_locked(dev, vma);
 
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);

+ 4 - 0
drivers/gpu/drm/drm_ioctl.c

@@ -283,6 +283,10 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 	case DRM_CAP_DUMB_PREFER_SHADOW:
 		req->value = dev->mode_config.prefer_shadow;
 		break;
+	case DRM_CAP_PRIME:
+		req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+		req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+		break;
 	default:
 		return -EINVAL;
 	}

+ 8 - 10
drivers/gpu/drm/drm_vm.c

@@ -406,10 +406,9 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
  * Create a new drm_vma_entry structure as the \p vma private data entry and
  * add it to drm_device::vmalist.
  */
-void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct drm_device *dev,
+		struct vm_area_struct *vma)
 {
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
 	struct drm_vma_entry *vma_entry;
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -430,14 +429,13 @@ static void drm_vm_open(struct vm_area_struct *vma)
 	struct drm_device *dev = priv->minor->dev;
 
 	mutex_lock(&dev->struct_mutex);
-	drm_vm_open_locked(vma);
+	drm_vm_open_locked(dev, vma);
 	mutex_unlock(&dev->struct_mutex);
 }
 
-void drm_vm_close_locked(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct drm_device *dev,
+		struct vm_area_struct *vma)
 {
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
 	struct drm_vma_entry *pt, *temp;
 
 	DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -467,7 +465,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
 	struct drm_device *dev = priv->minor->dev;
 
 	mutex_lock(&dev->struct_mutex);
-	drm_vm_close_locked(vma);
+	drm_vm_close_locked(dev, vma);
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -519,7 +517,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
 	vma->vm_flags |= VM_DONTEXPAND;
 
-	drm_vm_open_locked(vma);
+	drm_vm_open_locked(dev, vma);
 	return 0;
 }
 
@@ -670,7 +668,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
 	vma->vm_flags |= VM_DONTEXPAND;
 
-	drm_vm_open_locked(vma);
+	drm_vm_open_locked(dev, vma);
 	return 0;
 }
 

+ 2 - 1
drivers/gpu/drm/gma500/Makefile

@@ -13,7 +13,6 @@ gma500_gfx-y += gem_glue.o \
 	  intel_i2c.o \
 	  intel_gmbus.o \
 	  mmu.o \
-	  opregion.o \
 	  power.o \
 	  psb_drv.o \
 	  psb_intel_display.o \
@@ -25,6 +24,8 @@ gma500_gfx-y += gem_glue.o \
 	  psb_device.o \
 	  mid_bios.o
 
+gma500_gfx-$(CONFIG_ACPI) +=  opregion.o \
+
 gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
 	  cdv_intel_crt.o \
 	  cdv_intel_display.o \

+ 59 - 1
drivers/gpu/drm/gma500/cdv_device.c

@@ -255,7 +255,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
 	struct psb_save_area *regs = &dev_priv->regs;
 	struct drm_connector *connector;
 
-	dev_info(dev->dev, "Saving GPU registers.\n");
+	dev_dbg(dev->dev, "Saving GPU registers.\n");
 
 	pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
 
@@ -485,10 +485,68 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
 	}	
 }
 
+/* Cedarview */
+static const struct psb_offset cdv_regmap[2] = {
+	{
+		.fp0 = FPA0,
+		.fp1 = FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = DPLL_A,
+		.dpll_md = DPLL_A_MD,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.base = DSPABASE,
+		.surf = DSPASURF,
+		.addr = DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = FPB0,
+		.fp1 = FPB1,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = DPLL_B,
+		.dpll_md = DPLL_B_MD,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.base = DSPBBASE,
+		.surf = DSPBSURF,
+		.addr = DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	}
+};
+
 static int cdv_chip_setup(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+	if (pci_enable_msi(dev->pdev))
+		dev_warn(dev->dev, "Enabling MSI failed!\n");
+	dev_priv->regmap = cdv_regmap;
 	cdv_get_core_freq(dev);
 	psb_intel_opregion_init(dev);
 	psb_intel_init_bios(dev);

+ 144 - 177
drivers/gpu/drm/gma500/cdv_intel_display.c

@@ -218,8 +218,7 @@ static int
 cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
 			       struct cdv_intel_clock_t *clock, bool is_lvds)
 {
-	struct psb_intel_crtc *psb_crtc =
-				to_psb_intel_crtc(crtc);
+	struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_crtc->pipe;
 	u32 m, n_vco, p;
 	int ret = 0;
@@ -503,14 +502,12 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
 			    int x, int y, struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
 	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	unsigned long start, offset;
-	int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
-	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
-	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
 	u32 dspcntr;
 	int ret = 0;
 
@@ -532,9 +529,9 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
 	start = psbfb->gtt->offset;
 	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-	REG_WRITE(dspstride, crtc->fb->pitches[0]);
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
 
-	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr = REG_READ(map->cntr);
 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
 
 	switch (crtc->fb->bits_per_pixel) {
@@ -556,15 +553,15 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
 		ret = -EINVAL;
 		goto psb_intel_pipe_set_base_exit;
 	}
-	REG_WRITE(dspcntr_reg, dspcntr);
+	REG_WRITE(map->cntr, dspcntr);
 
 	dev_dbg(dev->dev,
 		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
 
-	REG_WRITE(dspbase, offset);
-	REG_READ(dspbase);
-	REG_WRITE(dspsurf, start);
-	REG_READ(dspsurf);
+	REG_WRITE(map->base, offset);
+	REG_READ(map->base);
+	REG_WRITE(map->surf, start);
+	REG_READ(map->surf);
 
 psb_intel_pipe_cleaner:
 	/* If there was a previous display we can now unpin it */
@@ -721,8 +718,7 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
 static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_psb_private *dev_priv =
-				(struct drm_psb_private *)dev->dev_private;
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int palreg = PALETTE_A;
 	int i;
@@ -758,7 +754,7 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
 		gma_power_end(dev);
 	} else {
 		for (i = 0; i < 256; i++) {
-			dev_priv->regs.psb.save_palette_a[i] =
+			dev_priv->regs.pipe[0].palette[i] =
 				  ((psb_intel_crtc->lut_r[i] +
 				  psb_intel_crtc->lut_adj[i]) << 16) |
 				  ((psb_intel_crtc->lut_g[i] +
@@ -779,13 +775,10 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
 static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-	int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-	int pipestat_reg = (pipe == 0) ? PIPEASTAT : PIPEBSTAT;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
 
 	/* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -803,44 +796,44 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 		psb_intel_crtc->active = true;
 
 		/* Enable the DPLL */
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 		if ((temp & DPLL_VCO_ENABLE) == 0) {
-			REG_WRITE(dpll_reg, temp);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
 		}
 
 		/* Jim Bish - switch plan and pipe per scott */
 		/* Enable the plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				  temp | DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+			REG_WRITE(map->base, REG_READ(map->base));
 		}
 
 		udelay(150);
 
 		/* Enable the pipe */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) == 0)
-			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
 
-		temp = REG_READ(pipestat_reg);
+		temp = REG_READ(map->status);
 		temp &= ~(0xFFFF);
 		temp |= PIPE_FIFO_UNDERRUN;
-		REG_WRITE(pipestat_reg, temp);
-		REG_READ(pipestat_reg);
+		REG_WRITE(map->status, temp);
+		REG_READ(map->status);
 
 		cdv_intel_update_watermark(dev, crtc);
 		cdv_intel_crtc_load_lut(crtc);
@@ -870,10 +863,10 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 		cdv_intel_wait_for_vblank(dev);
 
 		/* Next, disable display pipes */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) != 0) {
-			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-			REG_READ(pipeconf_reg);
+			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+			REG_READ(map->conf);
 		}
 
 		/* Wait for vblank for the disable to take effect. */
@@ -882,19 +875,19 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 		udelay(150);
 
 		/* Disable display plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				  temp & ~DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
-			REG_READ(dspbase_reg);
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
 		}
 
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 		if ((temp & DPLL_VCO_ENABLE) != 0) {
-			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 		}
 
 		/* Wait for the clocks to turn off. */
@@ -953,19 +946,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-	int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
-	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk;
 	struct cdv_intel_clock_t clock;
 	u32 dpll = 0, dspcntr, pipeconf;
@@ -1036,7 +1017,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	/* dpll |= (2 << 11); */
 
 	/* setup pipeconf */
-	pipeconf = REG_READ(pipeconf_reg);
+	pipeconf = REG_READ(map->conf);
 
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -1049,8 +1030,8 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	dspcntr |= DISPLAY_PLANE_ENABLE;
 	pipeconf |= PIPEACONF_ENABLE;
 
-	REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
-	REG_READ(dpll_reg);
+	REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+	REG_READ(map->dpll);
 
 	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
 
@@ -1094,48 +1075,48 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
 	drm_mode_debug_printmodeline(mode);
 
-	REG_WRITE(dpll_reg,
-		(REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
-	REG_READ(dpll_reg);
+	REG_WRITE(map->dpll,
+		(REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+	REG_READ(map->dpll);
 	/* Wait for the clocks to stabilize. */
 	udelay(150); /* 42 usec w/o calibration, 110 with.  rounded up. */
 
-	if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+	if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
 		dev_err(dev->dev, "Failed to get DPLL lock\n");
 		return -EBUSY;
 	}
 
 	{
 		int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
-		REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+		REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
 	}
 
-	REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+	REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
 		  ((adjusted_mode->crtc_htotal - 1) << 16));
-	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+	REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
 		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
-	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+	REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
 		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
-	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+	REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
 		  ((adjusted_mode->crtc_vtotal - 1) << 16));
-	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+	REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
 		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
-	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+	REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
 		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
 	/* pipesrc and dspsize control the size that is scaled from,
 	 * which should always be the user's requested size.
 	 */
-	REG_WRITE(dspsize_reg,
+	REG_WRITE(map->size,
 		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-	REG_WRITE(dsppos_reg, 0);
-	REG_WRITE(pipesrc_reg,
+	REG_WRITE(map->pos, 0);
+	REG_WRITE(map->src,
 		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-	REG_WRITE(pipeconf_reg, pipeconf);
-	REG_READ(pipeconf_reg);
+	REG_WRITE(map->conf, pipeconf);
+	REG_READ(map->conf);
 
 	cdv_intel_wait_for_vblank(dev);
 
-	REG_WRITE(dspcntr_reg, dspcntr);
+	REG_WRITE(map->cntr, dspcntr);
 
 	/* Flush the plane changes */
 	{
@@ -1156,11 +1137,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
 static void cdv_intel_crtc_save(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	/* struct drm_psb_private *dev_priv =
-			(struct drm_psb_private *)dev->dev_private; */
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
-	int pipeA = (psb_intel_crtc->pipe == 0);
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
 	uint32_t paletteReg;
 	int i;
 
@@ -1169,25 +1149,25 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
 		return;
 	}
 
-	crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
-	crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
-	crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
-	crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
-	crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
-	crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
-	crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
-	crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
-	crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
-	crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
-	crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
-	crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
-	crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+	crtc_state->savePIPECONF = REG_READ(map->conf);
+	crtc_state->savePIPESRC = REG_READ(map->src);
+	crtc_state->saveFP0 = REG_READ(map->fp0);
+	crtc_state->saveFP1 = REG_READ(map->fp1);
+	crtc_state->saveDPLL = REG_READ(map->dpll);
+	crtc_state->saveHTOTAL = REG_READ(map->htotal);
+	crtc_state->saveHBLANK = REG_READ(map->hblank);
+	crtc_state->saveHSYNC = REG_READ(map->hsync);
+	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+	crtc_state->saveVBLANK = REG_READ(map->vblank);
+	crtc_state->saveVSYNC = REG_READ(map->vsync);
+	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
 
 	/*NOTE: DSPSIZE DSPPOS only for psb*/
-	crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
-	crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+	crtc_state->saveDSPSIZE = REG_READ(map->size);
+	crtc_state->saveDSPPOS = REG_READ(map->pos);
 
-	crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+	crtc_state->saveDSPBASE = REG_READ(map->base);
 
 	DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
 			crtc_state->saveDSPCNTR,
@@ -1208,7 +1188,7 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
 			crtc_state->saveDSPBASE
 		);
 
-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+	paletteReg = map->palette;
 	for (i = 0; i < 256; ++i)
 		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
 }
@@ -1219,12 +1199,10 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
 static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	/* struct drm_psb_private * dev_priv =
-				(struct drm_psb_private *)dev->dev_private; */
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
 	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
-	/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
-	int pipeA = (psb_intel_crtc->pipe == 0);
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
 	uint32_t paletteReg;
 	int i;
 
@@ -1235,23 +1213,23 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
 
 	DRM_DEBUG(
 		"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
-		REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
-		REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
-		REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
-		REG_READ(pipeA ? FPA0 : FPB0),
-		REG_READ(pipeA ? FPA1 : FPB1),
-		REG_READ(pipeA ? DPLL_A : DPLL_B),
-		REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
-		REG_READ(pipeA ? HBLANK_A : HBLANK_B),
-		REG_READ(pipeA ? HSYNC_A : HSYNC_B),
-		REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
-		REG_READ(pipeA ? VBLANK_A : VBLANK_B),
-		REG_READ(pipeA ? VSYNC_A : VSYNC_B),
-		REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
-		REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
-		REG_READ(pipeA ? DSPAPOS : DSPBPOS),
-		REG_READ(pipeA ? DSPABASE : DSPBBASE)
-		);
+		REG_READ(map->cntr),
+		REG_READ(map->conf),
+		REG_READ(map->src),
+		REG_READ(map->fp0),
+		REG_READ(map->fp1),
+		REG_READ(map->dpll),
+		REG_READ(map->htotal),
+		REG_READ(map->hblank),
+		REG_READ(map->hsync),
+		REG_READ(map->vtotal),
+		REG_READ(map->vblank),
+		REG_READ(map->vsync),
+		REG_READ(map->stride),
+		REG_READ(map->size),
+		REG_READ(map->pos),
+		REG_READ(map->base)
+	);
 
 	DRM_DEBUG(
 		"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
@@ -1271,51 +1249,51 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
 		crtc_state->saveDSPSIZE,
 		crtc_state->saveDSPPOS,
 		crtc_state->saveDSPBASE
-		);
+	);
 
 
 	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
-		REG_WRITE(pipeA ? DPLL_A : DPLL_B,
-			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
-		REG_READ(pipeA ? DPLL_A : DPLL_B);
+		REG_WRITE(map->dpll,
+				crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
 		DRM_DEBUG("write dpll: %x\n",
-				REG_READ(pipeA ? DPLL_A : DPLL_B));
+				REG_READ(map->dpll));
 		udelay(150);
 	}
 
-	REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
-	REG_READ(pipeA ? FPA0 : FPB0);
+	REG_WRITE(map->fp0, crtc_state->saveFP0);
+	REG_READ(map->fp0);
 
-	REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
-	REG_READ(pipeA ? FPA1 : FPB1);
+	REG_WRITE(map->fp1, crtc_state->saveFP1);
+	REG_READ(map->fp1);
 
-	REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
-	REG_READ(pipeA ? DPLL_A : DPLL_B);
+	REG_WRITE(map->dpll, crtc_state->saveDPLL);
+	REG_READ(map->dpll);
 	udelay(150);
 
-	REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
-	REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
-	REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
-	REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
-	REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
-	REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
-	REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
 
-	REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
-	REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
 
-	REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
-	REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+	REG_WRITE(map->src, crtc_state->savePIPESRC);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+	REG_WRITE(map->conf, crtc_state->savePIPECONF);
 
 	cdv_intel_wait_for_vblank(dev);
 
-	REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
 
 	cdv_intel_wait_for_vblank(dev);
 
-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+	paletteReg = map->palette;
 	for (i = 0; i < 256; ++i)
 		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
 }
@@ -1490,35 +1468,30 @@ static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
 static int cdv_intel_crtc_clock_get(struct drm_device *dev,
 				struct drm_crtc *crtc)
 {
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 dpll;
 	u32 fp;
 	struct cdv_intel_clock_t clock;
 	bool is_lvds;
-	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
 
 	if (gma_power_begin(dev, false)) {
-		dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+		dpll = REG_READ(map->dpll);
 		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-			fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+			fp = REG_READ(map->fp0);
 		else
-			fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+			fp = REG_READ(map->fp1);
 		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
 		gma_power_end(dev);
 	} else {
-		dpll = (pipe == 0) ?
-			dev_priv->regs.psb.saveDPLL_A :
-			dev_priv->regs.psb.saveDPLL_B;
-
+		dpll = p->dpll;
 		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-			fp = (pipe == 0) ?
-				dev_priv->regs.psb.saveFPA0 :
-				dev_priv->regs.psb.saveFPB0;
+			fp = p->fp0;
 		else
-			fp = (pipe == 0) ?
-				dev_priv->regs.psb.saveFPA1 :
-				dev_priv->regs.psb.saveFPB1;
+			fp = p->fp1;
 
 		is_lvds = (pipe == 1) &&
 				(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
@@ -1576,32 +1549,26 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
 {
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_intel_crtc->pipe;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	struct drm_display_mode *mode;
 	int htot;
 	int hsync;
 	int vtot;
 	int vsync;
-	struct drm_psb_private *dev_priv = dev->dev_private;
 
 	if (gma_power_begin(dev, false)) {
-		htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
-		hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
-		vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
-		vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+		htot = REG_READ(map->htotal);
+		hsync = REG_READ(map->hsync);
+		vtot = REG_READ(map->vtotal);
+		vsync = REG_READ(map->vsync);
 		gma_power_end(dev);
 	} else {
-		htot = (pipe == 0) ?
-			dev_priv->regs.psb.saveHTOTAL_A :
-			dev_priv->regs.psb.saveHTOTAL_B;
-		hsync = (pipe == 0) ?
-			dev_priv->regs.psb.saveHSYNC_A :
-			dev_priv->regs.psb.saveHSYNC_B;
-		vtot = (pipe == 0) ?
-			dev_priv->regs.psb.saveVTOTAL_A :
-			dev_priv->regs.psb.saveVTOTAL_B;
-		vsync = (pipe == 0) ?
-			dev_priv->regs.psb.saveVSYNC_A :
-			dev_priv->regs.psb.saveVSYNC_B;
+		htot = p->htotal;
+		hsync = p->hsync;
+		vtot = p->vtotal;
+		vsync = p->vsync;
 	}
 
 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);

+ 11 - 6
drivers/gpu/drm/gma500/framebuffer.c

@@ -476,7 +476,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-	dev_info(dev->dev, "allocated %dx%d fb\n",
+	dev_dbg(dev->dev, "allocated %dx%d fb\n",
 					psbfb->base.width, psbfb->base.height);
 
 	mutex_unlock(&dev->struct_mutex);
@@ -800,15 +800,20 @@ void psb_modeset_init(struct drm_device *dev)
 
 	if (dev_priv->ops->errata)
 	        dev_priv->ops->errata(dev);
+
+        dev_priv->modeset = true;
 }
 
 void psb_modeset_cleanup(struct drm_device *dev)
 {
-	mutex_lock(&dev->struct_mutex);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if (dev_priv->modeset) {
+		mutex_lock(&dev->struct_mutex);
 
-	drm_kms_helper_poll_fini(dev);
-	psb_fbdev_fini(dev);
-	drm_mode_config_cleanup(dev);
+		drm_kms_helper_poll_fini(dev);
+		psb_fbdev_fini(dev);
+		drm_mode_config_cleanup(dev);
 
-	mutex_unlock(&dev->struct_mutex);
+		mutex_unlock(&dev->struct_mutex);
+	}
 }

+ 3 - 9
drivers/gpu/drm/gma500/gtt.c

@@ -416,7 +416,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 	unsigned long stolen_size, vram_stolen_size;
 	unsigned i, num_pages;
 	unsigned pfn_base;
-	uint32_t dvmt_mode = 0;
 	struct psb_gtt *pg;
 
 	int ret = 0;
@@ -489,13 +488,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 
 	stolen_size = vram_stolen_size;
 
-	printk(KERN_INFO "Stolen memory information\n");
-	printk(KERN_INFO "       base in RAM: 0x%x\n", dev_priv->stolen_base);
-	printk(KERN_INFO "       size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
-		vram_stolen_size/1024);
-	dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
-	printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n",
-		(dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
+			dev_priv->stolen_base, vram_stolen_size / 1024);
 
 	if (resume && (gtt_pages != pg->gtt_pages) &&
 	    (stolen_size != pg->stolen_size)) {
@@ -532,7 +526,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 
 	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
 	num_pages = vram_stolen_size >> PAGE_SHIFT;
-	printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+	dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
 		num_pages, pfn_base << PAGE_SHIFT, 0);
 	for (i = 0; i < num_pages; ++i) {
 		pte = psb_gtt_mask_pte(pfn_base + i, 0);

+ 3 - 21
drivers/gpu/drm/gma500/intel_bios.c

@@ -490,26 +490,8 @@ bool psb_intel_init_bios(struct drm_device *dev)
 void psb_intel_destroy_bios(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
-	struct drm_display_mode *sdvo_lvds_vbt_mode =
-				dev_priv->sdvo_lvds_vbt_mode;
-	struct drm_display_mode *lfp_lvds_vbt_mode =
-				dev_priv->lfp_lvds_vbt_mode;
-	struct bdb_lvds_backlight *lvds_bl =
-				dev_priv->lvds_bl;
-
-	/*free sdvo panel mode*/
-	if (sdvo_lvds_vbt_mode) {
-		dev_priv->sdvo_lvds_vbt_mode = NULL;
-		kfree(sdvo_lvds_vbt_mode);
-	}
-
-	if (lfp_lvds_vbt_mode) {
-		dev_priv->lfp_lvds_vbt_mode = NULL;
-		kfree(lfp_lvds_vbt_mode);
-	}
 
-	if (lvds_bl) {
-		dev_priv->lvds_bl = NULL;
-		kfree(lvds_bl);
-	}
+	kfree(dev_priv->sdvo_lvds_vbt_mode);
+	kfree(dev_priv->lfp_lvds_vbt_mode);
+	kfree(dev_priv->lvds_bl);
 }

+ 153 - 296
drivers/gpu/drm/gma500/mdfld_device.c

@@ -163,142 +163,30 @@ struct backlight_device *mdfld_get_backlight_device(void)
  *
  * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
  */
-static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct medfield_state *regs = &dev_priv->regs.mdfld;
+	struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+	const struct psb_offset *map = &dev_priv->regmap[pipenum];
 	int i;
+	u32 *mipi_val;
 
 	/* register */
-	u32 dpll_reg = MRST_DPLL_A;
-	u32 fp_reg = MRST_FPA0;
-	u32 pipeconf_reg = PIPEACONF;
-	u32 htot_reg = HTOTAL_A;
-	u32 hblank_reg = HBLANK_A;
-	u32 hsync_reg = HSYNC_A;
-	u32 vtot_reg = VTOTAL_A;
-	u32 vblank_reg = VBLANK_A;
-	u32 vsync_reg = VSYNC_A;
-	u32 pipesrc_reg = PIPEASRC;
-	u32 dspstride_reg = DSPASTRIDE;
-	u32 dsplinoff_reg = DSPALINOFF;
-	u32 dsptileoff_reg = DSPATILEOFF;
-	u32 dspsize_reg = DSPASIZE;
-	u32 dsppos_reg = DSPAPOS;
-	u32 dspsurf_reg = DSPASURF;
 	u32 mipi_reg = MIPI;
-	u32 dspcntr_reg = DSPACNTR;
-	u32 dspstatus_reg = PIPEASTAT;
-	u32 palette_reg = PALETTE_A;
-
-	/* pointer to values */
-	u32 *dpll_val = &regs->saveDPLL_A;
-	u32 *fp_val = &regs->saveFPA0;
-	u32 *pipeconf_val = &regs->savePIPEACONF;
-	u32 *htot_val = &regs->saveHTOTAL_A;
-	u32 *hblank_val = &regs->saveHBLANK_A;
-	u32 *hsync_val = &regs->saveHSYNC_A;
-	u32 *vtot_val = &regs->saveVTOTAL_A;
-	u32 *vblank_val = &regs->saveVBLANK_A;
-	u32 *vsync_val = &regs->saveVSYNC_A;
-	u32 *pipesrc_val = &regs->savePIPEASRC;
-	u32 *dspstride_val = &regs->saveDSPASTRIDE;
-	u32 *dsplinoff_val = &regs->saveDSPALINOFF;
-	u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
-	u32 *dspsize_val = &regs->saveDSPASIZE;
-	u32 *dsppos_val = &regs->saveDSPAPOS;
-	u32 *dspsurf_val = &regs->saveDSPASURF;
-	u32 *mipi_val = &regs->saveMIPI;
-	u32 *dspcntr_val = &regs->saveDSPACNTR;
-	u32 *dspstatus_val = &regs->saveDSPASTATUS;
-	u32 *palette_val = regs->save_palette_a;
-
-	switch (pipe) {
+
+	switch (pipenum) {
 	case 0:
+		mipi_val = &regs->saveMIPI;
 		break;
 	case 1:
-		/* regester */
-		dpll_reg = MDFLD_DPLL_B;
-		fp_reg = MDFLD_DPLL_DIV0;
-		pipeconf_reg = PIPEBCONF;
-		htot_reg = HTOTAL_B;
-		hblank_reg = HBLANK_B;
-		hsync_reg = HSYNC_B;
-		vtot_reg = VTOTAL_B;
-		vblank_reg = VBLANK_B;
-		vsync_reg = VSYNC_B;
-		pipesrc_reg = PIPEBSRC;
-		dspstride_reg = DSPBSTRIDE;
-		dsplinoff_reg = DSPBLINOFF;
-		dsptileoff_reg = DSPBTILEOFF;
-		dspsize_reg = DSPBSIZE;
-		dsppos_reg = DSPBPOS;
-		dspsurf_reg = DSPBSURF;
-		dspcntr_reg = DSPBCNTR;
-		dspstatus_reg = PIPEBSTAT;
-		palette_reg = PALETTE_B;
-
-		/* values */
-		dpll_val = &regs->saveDPLL_B;
-		fp_val = &regs->saveFPB0;
-		pipeconf_val = &regs->savePIPEBCONF;
-		htot_val = &regs->saveHTOTAL_B;
-		hblank_val = &regs->saveHBLANK_B;
-		hsync_val = &regs->saveHSYNC_B;
-		vtot_val = &regs->saveVTOTAL_B;
-		vblank_val = &regs->saveVBLANK_B;
-		vsync_val = &regs->saveVSYNC_B;
-		pipesrc_val = &regs->savePIPEBSRC;
-		dspstride_val = &regs->saveDSPBSTRIDE;
-		dsplinoff_val = &regs->saveDSPBLINOFF;
-		dsptileoff_val = &regs->saveDSPBTILEOFF;
-		dspsize_val = &regs->saveDSPBSIZE;
-		dsppos_val = &regs->saveDSPBPOS;
-		dspsurf_val = &regs->saveDSPBSURF;
-		dspcntr_val = &regs->saveDSPBCNTR;
-		dspstatus_val = &regs->saveDSPBSTATUS;
-		palette_val = regs->save_palette_b;
+		mipi_val = &regs->saveMIPI;
 		break;
 	case 2:
 		/* register */
-		pipeconf_reg = PIPECCONF;
-		htot_reg = HTOTAL_C;
-		hblank_reg = HBLANK_C;
-		hsync_reg = HSYNC_C;
-		vtot_reg = VTOTAL_C;
-		vblank_reg = VBLANK_C;
-		vsync_reg = VSYNC_C;
-		pipesrc_reg = PIPECSRC;
-		dspstride_reg = DSPCSTRIDE;
-		dsplinoff_reg = DSPCLINOFF;
-		dsptileoff_reg = DSPCTILEOFF;
-		dspsize_reg = DSPCSIZE;
-		dsppos_reg = DSPCPOS;
-		dspsurf_reg = DSPCSURF;
 		mipi_reg = MIPI_C;
-		dspcntr_reg = DSPCCNTR;
-		dspstatus_reg = PIPECSTAT;
-		palette_reg = PALETTE_C;
-
 		/* pointer to values */
-		pipeconf_val = &regs->savePIPECCONF;
-		htot_val = &regs->saveHTOTAL_C;
-		hblank_val = &regs->saveHBLANK_C;
-		hsync_val = &regs->saveHSYNC_C;
-		vtot_val = &regs->saveVTOTAL_C;
-		vblank_val = &regs->saveVBLANK_C;
-		vsync_val = &regs->saveVSYNC_C;
-		pipesrc_val = &regs->savePIPECSRC;
-		dspstride_val = &regs->saveDSPCSTRIDE;
-		dsplinoff_val = &regs->saveDSPCLINOFF;
-		dsptileoff_val = &regs->saveDSPCTILEOFF;
-		dspsize_val = &regs->saveDSPCSIZE;
-		dsppos_val = &regs->saveDSPCPOS;
-		dspsurf_val = &regs->saveDSPCSURF;
 		mipi_val = &regs->saveMIPI_C;
-		dspcntr_val = &regs->saveDSPCCNTR;
-		dspstatus_val = &regs->saveDSPCSTATUS;
-		palette_val = regs->save_palette_c;
 		break;
 	default:
 		DRM_ERROR("%s, invalid pipe number.\n", __func__);
@@ -306,30 +194,30 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
 	}
 
 	/* Pipe & plane A info */
-	*dpll_val = PSB_RVDC32(dpll_reg);
-	*fp_val = PSB_RVDC32(fp_reg);
-	*pipeconf_val = PSB_RVDC32(pipeconf_reg);
-	*htot_val = PSB_RVDC32(htot_reg);
-	*hblank_val = PSB_RVDC32(hblank_reg);
-	*hsync_val = PSB_RVDC32(hsync_reg);
-	*vtot_val = PSB_RVDC32(vtot_reg);
-	*vblank_val = PSB_RVDC32(vblank_reg);
-	*vsync_val = PSB_RVDC32(vsync_reg);
-	*pipesrc_val = PSB_RVDC32(pipesrc_reg);
-	*dspstride_val = PSB_RVDC32(dspstride_reg);
-	*dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
-	*dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
-	*dspsize_val = PSB_RVDC32(dspsize_reg);
-	*dsppos_val = PSB_RVDC32(dsppos_reg);
-	*dspsurf_val = PSB_RVDC32(dspsurf_reg);
-	*dspcntr_val = PSB_RVDC32(dspcntr_reg);
-	*dspstatus_val = PSB_RVDC32(dspstatus_reg);
+	pipe->dpll = PSB_RVDC32(map->dpll);
+	pipe->fp0 = PSB_RVDC32(map->fp0);
+	pipe->conf = PSB_RVDC32(map->conf);
+	pipe->htotal = PSB_RVDC32(map->htotal);
+	pipe->hblank = PSB_RVDC32(map->hblank);
+	pipe->hsync = PSB_RVDC32(map->hsync);
+	pipe->vtotal = PSB_RVDC32(map->vtotal);
+	pipe->vblank = PSB_RVDC32(map->vblank);
+	pipe->vsync = PSB_RVDC32(map->vsync);
+	pipe->src = PSB_RVDC32(map->src);
+	pipe->stride = PSB_RVDC32(map->stride);
+	pipe->linoff = PSB_RVDC32(map->linoff);
+	pipe->tileoff = PSB_RVDC32(map->tileoff);
+	pipe->size = PSB_RVDC32(map->size);
+	pipe->pos = PSB_RVDC32(map->pos);
+	pipe->surf = PSB_RVDC32(map->surf);
+	pipe->cntr = PSB_RVDC32(map->cntr);
+	pipe->status = PSB_RVDC32(map->status);
 
 	/*save palette (gamma) */
 	for (i = 0; i < 256; i++)
-		palette_val[i] = PSB_RVDC32(palette_reg + (i << 2));
+		pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
 
-	if (pipe == 1) {
+	if (pipenum == 1) {
 		regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
 		regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
 
@@ -349,7 +237,7 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
  *
  * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
  */
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
 {
 	/* To get  panel out of ULPS mode. */
 	u32 temp = 0;
@@ -357,142 +245,30 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct mdfld_dsi_config *dsi_config = NULL;
 	struct medfield_state *regs = &dev_priv->regs.mdfld;
-	u32 i = 0;
-	u32 dpll = 0;
+	struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+	const struct psb_offset *map = &dev_priv->regmap[pipenum];
+	u32 i;
+	u32 dpll;
 	u32 timeout = 0;
 
-	/* regester */
-	u32 dpll_reg = MRST_DPLL_A;
-	u32 fp_reg = MRST_FPA0;
-	u32 pipeconf_reg = PIPEACONF;
-	u32 htot_reg = HTOTAL_A;
-	u32 hblank_reg = HBLANK_A;
-	u32 hsync_reg = HSYNC_A;
-	u32 vtot_reg = VTOTAL_A;
-	u32 vblank_reg = VBLANK_A;
-	u32 vsync_reg = VSYNC_A;
-	u32 pipesrc_reg = PIPEASRC;
-	u32 dspstride_reg = DSPASTRIDE;
-	u32 dsplinoff_reg = DSPALINOFF;
-	u32 dsptileoff_reg = DSPATILEOFF;
-	u32 dspsize_reg = DSPASIZE;
-	u32 dsppos_reg = DSPAPOS;
-	u32 dspsurf_reg = DSPASURF;
-	u32 dspstatus_reg = PIPEASTAT;
+	/* register */
 	u32 mipi_reg = MIPI;
-	u32 dspcntr_reg = DSPACNTR;
-	u32 palette_reg = PALETTE_A;
 
 	/* values */
-	u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE;
-	u32 fp_val = regs->saveFPA0;
-	u32 pipeconf_val = regs->savePIPEACONF;
-	u32 htot_val = regs->saveHTOTAL_A;
-	u32 hblank_val = regs->saveHBLANK_A;
-	u32 hsync_val = regs->saveHSYNC_A;
-	u32 vtot_val = regs->saveVTOTAL_A;
-	u32 vblank_val = regs->saveVBLANK_A;
-	u32 vsync_val = regs->saveVSYNC_A;
-	u32 pipesrc_val = regs->savePIPEASRC;
-	u32 dspstride_val = regs->saveDSPASTRIDE;
-	u32 dsplinoff_val = regs->saveDSPALINOFF;
-	u32 dsptileoff_val = regs->saveDSPATILEOFF;
-	u32 dspsize_val = regs->saveDSPASIZE;
-	u32 dsppos_val = regs->saveDSPAPOS;
-	u32 dspsurf_val = regs->saveDSPASURF;
-	u32 dspstatus_val = regs->saveDSPASTATUS;
+	u32 dpll_val = pipe->dpll;
 	u32 mipi_val = regs->saveMIPI;
-	u32 dspcntr_val = regs->saveDSPACNTR;
-	u32 *palette_val = regs->save_palette_a;
 
-	switch (pipe) {
+	switch (pipenum) {
 	case 0:
+		dpll_val &= ~DPLL_VCO_ENABLE;
 		dsi_config = dev_priv->dsi_configs[0];
 		break;
 	case 1:
-		/* regester */
-		dpll_reg = MDFLD_DPLL_B;
-		fp_reg = MDFLD_DPLL_DIV0;
-		pipeconf_reg = PIPEBCONF;
-		htot_reg = HTOTAL_B;
-		hblank_reg = HBLANK_B;
-		hsync_reg = HSYNC_B;
-		vtot_reg = VTOTAL_B;
-		vblank_reg = VBLANK_B;
-		vsync_reg = VSYNC_B;
-		pipesrc_reg = PIPEBSRC;
-		dspstride_reg = DSPBSTRIDE;
-		dsplinoff_reg = DSPBLINOFF;
-		dsptileoff_reg = DSPBTILEOFF;
-		dspsize_reg = DSPBSIZE;
-		dsppos_reg = DSPBPOS;
-		dspsurf_reg = DSPBSURF;
-		dspcntr_reg = DSPBCNTR;
-		dspstatus_reg = PIPEBSTAT;
-		palette_reg = PALETTE_B;
-
-		/* values */
-		dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
-		fp_val = regs->saveFPB0;
-		pipeconf_val = regs->savePIPEBCONF;
-		htot_val = regs->saveHTOTAL_B;
-		hblank_val = regs->saveHBLANK_B;
-		hsync_val = regs->saveHSYNC_B;
-		vtot_val = regs->saveVTOTAL_B;
-		vblank_val = regs->saveVBLANK_B;
-		vsync_val = regs->saveVSYNC_B;
-		pipesrc_val = regs->savePIPEBSRC;
-		dspstride_val = regs->saveDSPBSTRIDE;
-		dsplinoff_val = regs->saveDSPBLINOFF;
-		dsptileoff_val = regs->saveDSPBTILEOFF;
-		dspsize_val = regs->saveDSPBSIZE;
-		dsppos_val = regs->saveDSPBPOS;
-		dspsurf_val = regs->saveDSPBSURF;
-		dspcntr_val = regs->saveDSPBCNTR;
-		dspstatus_val = regs->saveDSPBSTATUS;
-		palette_val = regs->save_palette_b;
+		dpll_val &= ~DPLL_VCO_ENABLE;
 		break;
 	case 2:
-		/* regester */
-		pipeconf_reg = PIPECCONF;
-		htot_reg = HTOTAL_C;
-		hblank_reg = HBLANK_C;
-		hsync_reg = HSYNC_C;
-		vtot_reg = VTOTAL_C;
-		vblank_reg = VBLANK_C;
-		vsync_reg = VSYNC_C;
-		pipesrc_reg = PIPECSRC;
-		dspstride_reg = DSPCSTRIDE;
-		dsplinoff_reg = DSPCLINOFF;
-		dsptileoff_reg = DSPCTILEOFF;
-		dspsize_reg = DSPCSIZE;
-		dsppos_reg = DSPCPOS;
-		dspsurf_reg = DSPCSURF;
 		mipi_reg = MIPI_C;
-		dspcntr_reg = DSPCCNTR;
-		dspstatus_reg = PIPECSTAT;
-		palette_reg = PALETTE_C;
-
-		/* values */
-		pipeconf_val = regs->savePIPECCONF;
-		htot_val = regs->saveHTOTAL_C;
-		hblank_val = regs->saveHBLANK_C;
-		hsync_val = regs->saveHSYNC_C;
-		vtot_val = regs->saveVTOTAL_C;
-		vblank_val = regs->saveVBLANK_C;
-		vsync_val = regs->saveVSYNC_C;
-		pipesrc_val = regs->savePIPECSRC;
-		dspstride_val = regs->saveDSPCSTRIDE;
-		dsplinoff_val = regs->saveDSPCLINOFF;
-		dsptileoff_val = regs->saveDSPCTILEOFF;
-		dspsize_val = regs->saveDSPCSIZE;
-		dsppos_val = regs->saveDSPCPOS;
-		dspsurf_val = regs->saveDSPCSURF;
 		mipi_val = regs->saveMIPI_C;
-		dspcntr_val = regs->saveDSPCCNTR;
-		dspstatus_val = regs->saveDSPCSTATUS;
-		palette_val = regs->save_palette_c;
-
 		dsi_config = dev_priv->dsi_configs[1];
 		break;
 	default:
@@ -503,14 +279,14 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 	/*make sure VGA plane is off. it initializes to on after reset!*/
 	PSB_WVDC32(0x80000000, VGACNTRL);
 
-	if (pipe == 1) {
-		PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
-		PSB_RVDC32(dpll_reg);
+	if (pipenum == 1) {
+		PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
+		PSB_RVDC32(map->dpll);
 
-		PSB_WVDC32(fp_val, fp_reg);
+		PSB_WVDC32(pipe->fp0, map->fp0);
 	} else {
 
-		dpll = PSB_RVDC32(dpll_reg);
+		dpll = PSB_RVDC32(map->dpll);
 
 		if (!(dpll & DPLL_VCO_ENABLE)) {
 
@@ -518,23 +294,23 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 			   before enable the VCO */
 			if (dpll & MDFLD_PWR_GATE_EN) {
 				dpll &= ~MDFLD_PWR_GATE_EN;
-				PSB_WVDC32(dpll, dpll_reg);
+				PSB_WVDC32(dpll, map->dpll);
 				/* FIXME_MDFLD PO - change 500 to 1 after PO */
 				udelay(500);
 			}
 
-			PSB_WVDC32(fp_val, fp_reg);
-			PSB_WVDC32(dpll_val, dpll_reg);
+			PSB_WVDC32(pipe->fp0, map->fp0);
+			PSB_WVDC32(dpll_val, map->dpll);
 			/* FIXME_MDFLD PO - change 500 to 1 after PO */
 			udelay(500);
 
 			dpll_val |= DPLL_VCO_ENABLE;
-			PSB_WVDC32(dpll_val, dpll_reg);
-			PSB_RVDC32(dpll_reg);
+			PSB_WVDC32(dpll_val, map->dpll);
+			PSB_RVDC32(map->dpll);
 
 			/* wait for DSI PLL to lock */
 			while (timeout < 20000 &&
-			  !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+			  !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
 				udelay(150);
 				timeout++;
 			}
@@ -547,28 +323,28 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 		}
 	}
 	/* Restore mode */
-	PSB_WVDC32(htot_val, htot_reg);
-	PSB_WVDC32(hblank_val, hblank_reg);
-	PSB_WVDC32(hsync_val, hsync_reg);
-	PSB_WVDC32(vtot_val, vtot_reg);
-	PSB_WVDC32(vblank_val, vblank_reg);
-	PSB_WVDC32(vsync_val, vsync_reg);
-	PSB_WVDC32(pipesrc_val, pipesrc_reg);
-	PSB_WVDC32(dspstatus_val, dspstatus_reg);
+	PSB_WVDC32(pipe->htotal, map->htotal);
+	PSB_WVDC32(pipe->hblank, map->hblank);
+	PSB_WVDC32(pipe->hsync, map->hsync);
+	PSB_WVDC32(pipe->vtotal, map->vtotal);
+	PSB_WVDC32(pipe->vblank, map->vblank);
+	PSB_WVDC32(pipe->vsync, map->vsync);
+	PSB_WVDC32(pipe->src, map->src);
+	PSB_WVDC32(pipe->status, map->status);
 
 	/*set up the plane*/
-	PSB_WVDC32(dspstride_val, dspstride_reg);
-	PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
-	PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
-	PSB_WVDC32(dspsize_val, dspsize_reg);
-	PSB_WVDC32(dsppos_val, dsppos_reg);
-	PSB_WVDC32(dspsurf_val, dspsurf_reg);
-
-	if (pipe == 1) {
+	PSB_WVDC32(pipe->stride, map->stride);
+	PSB_WVDC32(pipe->linoff, map->linoff);
+	PSB_WVDC32(pipe->tileoff, map->tileoff);
+	PSB_WVDC32(pipe->size, map->size);
+	PSB_WVDC32(pipe->pos, map->pos);
+	PSB_WVDC32(pipe->surf, map->surf);
+
+	if (pipenum == 1) {
 		/* restore palette (gamma) */
 		/*DRM_UDELAY(50000); */
 		for (i = 0; i < 256; i++)
-			PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+			PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
 
 		PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
 		PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
@@ -578,7 +354,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 		/*TODO: resume pipe*/
 
 		/*enable the plane*/
-		PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg);
+		PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
 
 		return 0;
 	}
@@ -588,7 +364,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 
 	/*setup MIPI adapter + MIPI IP registers*/
 	if (dsi_config)
-		mdfld_dsi_controller_init(dsi_config, pipe);
+		mdfld_dsi_controller_init(dsi_config, pipenum);
 
 	if (in_atomic() || in_interrupt())
 		mdelay(20);
@@ -596,7 +372,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 		msleep(20);
 
 	/*enable the plane*/
-	PSB_WVDC32(dspcntr_val, dspcntr_reg);
+	PSB_WVDC32(pipe->cntr, map->cntr);
 
 	if (in_atomic() || in_interrupt())
 		mdelay(20);
@@ -625,12 +401,12 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
 	mdelay(1);
 
 	/*enable the pipe*/
-	PSB_WVDC32(pipeconf_val, pipeconf_reg);
+	PSB_WVDC32(pipe->conf, map->conf);
 
 	/* restore palette (gamma) */
 	/*DRM_UDELAY(50000); */
 	for (i = 0; i < 256; i++)
-		PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+		PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
 
 	return 0;
 }
@@ -667,6 +443,87 @@ static int mdfld_power_up(struct drm_device *dev)
 	return 0;
 }
 
+/* Medfield  */
+static const struct psb_offset mdfld_regmap[3] = {
+	{
+		.fp0 = MRST_FPA0,
+		.fp1 = MRST_FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = MRST_DPLL_A,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.surf = DSPASURF,
+		.addr = MRST_DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = MDFLD_DPLL_DIV0,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = MDFLD_DPLL_B,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.surf = DSPBSURF,
+		.addr = MRST_DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	},
+	{
+		.fp0 = MRST_FPA0,	/* This is what the old code did ?? */
+		.cntr = DSPCCNTR,
+		.conf = PIPECCONF,
+		.src = PIPECSRC,
+		/* No DPLL_C */
+		.dpll = MRST_DPLL_A,
+		.htotal = HTOTAL_C,
+		.hblank = HBLANK_C,
+		.hsync = HSYNC_C,
+		.vtotal = VTOTAL_C,
+		.vblank = VBLANK_C,
+		.vsync = VSYNC_C,
+		.stride = DSPCSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPCPOS,
+		.surf = DSPCSURF,
+		.addr = MDFLD_DSPCBASE,
+		.status = PIPECSTAT,
+		.linoff = DSPCLINOFF,
+		.tileoff = DSPCTILEOFF,
+		.palette = PALETTE_C,
+	},
+};
+
+static int mdfld_chip_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if (pci_enable_msi(dev->pdev))
+		dev_warn(dev->dev, "Enabling MSI failed!\n");
+	dev_priv->regmap = mdfld_regmap;
+	return mid_chip_setup(dev);
+}
+
 const struct psb_ops mdfld_chip_ops = {
 	.name = "mdfld",
 	.accel_2d = 0,
@@ -676,7 +533,7 @@ const struct psb_ops mdfld_chip_ops = {
 	.hdmi_mask = (1 << 1),
 	.sgx_offset = MRST_SGX_OFFSET,
 
-	.chip_setup = mid_chip_setup,
+	.chip_setup = mdfld_chip_setup,
 	.crtc_helper = &mdfld_helper_funcs,
 	.crtc_funcs = &psb_intel_crtc_funcs,
 

+ 9 - 15
drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c

@@ -605,6 +605,8 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
 	struct mdfld_dsi_config *dsi_config =
 				mdfld_dsi_get_config(dsi_connector);
 	struct drm_device *dev = dsi_config->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 mipi_val = 0;
 
 	if (!dsi_connector) {
@@ -632,21 +634,13 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
 	pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
 
 	/*init regs*/
-	if (pipe == 0) {
-		pkg_sender->dpll_reg = MRST_DPLL_A;
-		pkg_sender->dspcntr_reg = DSPACNTR;
-		pkg_sender->pipeconf_reg = PIPEACONF;
-		pkg_sender->dsplinoff_reg = DSPALINOFF;
-		pkg_sender->dspsurf_reg = DSPASURF;
-		pkg_sender->pipestat_reg = PIPEASTAT;
-	} else if (pipe == 2) {
-		pkg_sender->dpll_reg = MRST_DPLL_A;
-		pkg_sender->dspcntr_reg = DSPCCNTR;
-		pkg_sender->pipeconf_reg = PIPECCONF;
-		pkg_sender->dsplinoff_reg = DSPCLINOFF;
-		pkg_sender->dspsurf_reg = DSPCSURF;
-		pkg_sender->pipestat_reg = PIPECSTAT;
-	}
+	/* FIXME: should just copy the regmap ptr ? */
+	pkg_sender->dpll_reg = map->dpll;
+	pkg_sender->dspcntr_reg = map->cntr;
+	pkg_sender->pipeconf_reg = map->conf;
+	pkg_sender->dsplinoff_reg = map->linoff;
+	pkg_sender->dspsurf_reg = map->surf;
+	pkg_sender->pipestat_reg = map->status;
 
 	pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
 	pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);

+ 102 - 228
drivers/gpu/drm/gma500/mdfld_intel_display.c

@@ -50,17 +50,14 @@ struct mrst_clock_t {
 
 void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
 {
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int count, temp;
-	u32 pipeconf_reg = PIPEACONF;
 
 	switch (pipe) {
 	case 0:
-		break;
 	case 1:
-		pipeconf_reg = PIPEBCONF;
-		break;
 	case 2:
-		pipeconf_reg = PIPECCONF;
 		break;
 	default:
 		DRM_ERROR("Illegal Pipe Number.\n");
@@ -73,7 +70,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
 
 	/* Wait for for the pipe disable to take effect. */
 	for (count = 0; count < COUNT_MAX; count++) {
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_PIPE_STATE) == 0)
 			break;
 	}
@@ -81,17 +78,14 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
 
 void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
 {
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int count, temp;
-	u32 pipeconf_reg = PIPEACONF;
 
 	switch (pipe) {
 	case 0:
-		break;
 	case 1:
-		pipeconf_reg = PIPEBCONF;
-		break;
 	case 2:
-		pipeconf_reg = PIPECCONF;
 		break;
 	default:
 		DRM_ERROR("Illegal Pipe Number.\n");
@@ -104,7 +98,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
 
 	/* Wait for for the pipe enable to take effect. */
 	for (count = 0; count < COUNT_MAX; count++) {
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_PIPE_STATE) == 1)
 			break;
 	}
@@ -189,15 +183,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 				struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	/* struct drm_i915_master_private *master_priv; */
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
 	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	unsigned long start, offset;
-	int dsplinoff = DSPALINOFF;
-	int dspsurf = DSPASURF;
-	int dspstride = DSPASTRIDE;
-	int dspcntr_reg = DSPACNTR;
 	u32 dspcntr;
 	int ret;
 
@@ -215,23 +206,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 	if (ret)
 		return ret;
 
-	switch (pipe) {
-	case 0:
-		dsplinoff = DSPALINOFF;
-		break;
-	case 1:
-		dsplinoff = DSPBLINOFF;
-		dspsurf = DSPBSURF;
-		dspstride = DSPBSTRIDE;
-		dspcntr_reg = DSPBCNTR;
-		break;
-	case 2:
-		dsplinoff = DSPCLINOFF;
-		dspsurf = DSPCSURF;
-		dspstride = DSPCSTRIDE;
-		dspcntr_reg = DSPCCNTR;
-		break;
-	default:
+	if (pipe > 2) {
 		DRM_ERROR("Illegal Pipe Number.\n");
 		return -EINVAL;
 	}
@@ -242,8 +217,8 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 	start = psbfb->gtt->offset;
 	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-	REG_WRITE(dspstride, crtc->fb->pitches[0]);
-	dspcntr = REG_READ(dspcntr_reg);
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
+	dspcntr = REG_READ(map->cntr);
 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
 
 	switch (crtc->fb->bits_per_pixel) {
@@ -261,14 +236,14 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
 		break;
 	}
-	REG_WRITE(dspcntr_reg, dspcntr);
+	REG_WRITE(map->cntr, dspcntr);
 
 	dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
 						start, offset, x, y);
-	REG_WRITE(dsplinoff, offset);
-	REG_READ(dsplinoff);
-	REG_WRITE(dspsurf, start);
-	REG_READ(dspsurf);
+	REG_WRITE(map->linoff, offset);
+	REG_READ(map->linoff);
+	REG_WRITE(map->surf, start);
+	REG_READ(map->surf);
 
 	gma_power_end(dev);
 
@@ -281,78 +256,56 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  */
 void mdfld_disable_crtc(struct drm_device *dev, int pipe)
 {
-	int dpll_reg = MRST_DPLL_A;
-	int dspcntr_reg = DSPACNTR;
-	int dspbase_reg = MRST_DSPABASE;
-	int pipeconf_reg = PIPEACONF;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
 
 	dev_dbg(dev->dev, "pipe = %d\n", pipe);
 
 
-	switch (pipe) {
-	case 0:
-		break;
-	case 1:
-		dpll_reg = MDFLD_DPLL_B;
-		dspcntr_reg = DSPBCNTR;
-		dspbase_reg = DSPBSURF;
-		pipeconf_reg = PIPEBCONF;
-		break;
-	case 2:
-		dpll_reg = MRST_DPLL_A;
-		dspcntr_reg = DSPCCNTR;
-		dspbase_reg = MDFLD_DSPCBASE;
-		pipeconf_reg = PIPECCONF;
-		break;
-	default:
-		DRM_ERROR("Illegal Pipe Number.\n");
-		return;
-	}
-
 	if (pipe != 1)
 		mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
 				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
 
 	/* Disable display plane */
-	temp = REG_READ(dspcntr_reg);
+	temp = REG_READ(map->cntr);
 	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-		REG_WRITE(dspcntr_reg,
+		REG_WRITE(map->cntr,
 			  temp & ~DISPLAY_PLANE_ENABLE);
 		/* Flush the plane changes */
-		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
-		REG_READ(dspbase_reg);
+		REG_WRITE(map->base, REG_READ(map->base));
+		REG_READ(map->base);
 	}
 
 	/* FIXME_JLIU7 MDFLD_PO revisit */
 
 	/* Next, disable display pipes */
-	temp = REG_READ(pipeconf_reg);
+	temp = REG_READ(map->conf);
 	if ((temp & PIPEACONF_ENABLE) != 0) {
 		temp &= ~PIPEACONF_ENABLE;
 		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
-		REG_WRITE(pipeconf_reg, temp);
-		REG_READ(pipeconf_reg);
+		REG_WRITE(map->conf, temp);
+		REG_READ(map->conf);
 
 		/* Wait for for the pipe disable to take effect. */
 		mdfldWaitForPipeDisable(dev, pipe);
 	}
 
-	temp = REG_READ(dpll_reg);
+	temp = REG_READ(map->dpll);
 	if (temp & DPLL_VCO_ENABLE) {
 		if ((pipe != 1 &&
 			!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
 				& PIPEACONF_ENABLE)) || pipe == 1) {
 			temp &= ~(DPLL_VCO_ENABLE);
-			REG_WRITE(dpll_reg, temp);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to turn off. */
 			/* FIXME_MDFLD PO may need more delay */
 			udelay(500);
 
 			if (!(temp & MDFLD_PWR_GATE_EN)) {
 				/* gating power of DPLL */
-				REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+				REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
 				/* FIXME_MDFLD PO - change 500 to 1 after PO */
 				udelay(5000);
 			}
@@ -373,41 +326,15 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_intel_crtc->pipe;
-	int dpll_reg = MRST_DPLL_A;
-	int dspcntr_reg = DSPACNTR;
-	int dspbase_reg = MRST_DSPABASE;
-	int pipeconf_reg = PIPEACONF;
-	u32 pipestat_reg = PIPEASTAT;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 pipeconf = dev_priv->pipeconf[pipe];
 	u32 temp;
 	int timeout = 0;
 
 	dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
 
-/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
-/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
-
-	switch (pipe) {
-	case 0:
-		break;
-	case 1:
-		dpll_reg = DPLL_B;
-		dspcntr_reg = DSPBCNTR;
-		dspbase_reg = MRST_DSPBBASE;
-		pipeconf_reg = PIPEBCONF;
-		dpll_reg = MDFLD_DPLL_B;
-		break;
-	case 2:
-		dpll_reg = MRST_DPLL_A;
-		dspcntr_reg = DSPCCNTR;
-		dspbase_reg = MDFLD_DSPCBASE;
-		pipeconf_reg = PIPECCONF;
-		pipestat_reg = PIPECSTAT;
-		break;
-	default:
-		DRM_ERROR("Illegal Pipe Number.\n");
-		return;
-	}
+	/* Note: Old code uses pipe a stat for pipe b but that appears
+	   to be a bug */
 
 	if (!gma_power_begin(dev, true))
 		return;
@@ -420,25 +347,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 		/* Enable the DPLL */
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 
 		if ((temp & DPLL_VCO_ENABLE) == 0) {
 			/* When ungating power of DPLL, needs to wait 0.5us
 			   before enable the VCO */
 			if (temp & MDFLD_PWR_GATE_EN) {
 				temp &= ~MDFLD_PWR_GATE_EN;
-				REG_WRITE(dpll_reg, temp);
+				REG_WRITE(map->dpll, temp);
 				/* FIXME_MDFLD PO - change 500 to 1 after PO */
 				udelay(500);
 			}
 
-			REG_WRITE(dpll_reg, temp);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
 			/* FIXME_MDFLD PO - change 500 to 1 after PO */
 			udelay(500);
 
-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 
 			/**
 			 * wait for DSI PLL to lock
@@ -446,25 +373,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
 			 * since both MIPI pipes share the same PLL.
 			 */
 			while ((pipe != 2) && (timeout < 20000) &&
-			  !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+			  !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
 				udelay(150);
 				timeout++;
 			}
 		}
 
 		/* Enable the plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				temp | DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+			REG_WRITE(map->base, REG_READ(map->base));
 		}
 
 		/* Enable the pipe */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) == 0) {
-			REG_WRITE(pipeconf_reg, pipeconf);
+			REG_WRITE(map->conf, pipeconf);
 
 			/* Wait for for the pipe enable to take effect. */
 			mdfldWaitForPipeEnable(dev, pipe);
@@ -473,39 +400,39 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
 		/*workaround for sighting 3741701 Random X blank display*/
 		/*perform w/a in video mode only on pipe A or C*/
 		if (pipe == 0 || pipe == 2) {
-			REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+			REG_WRITE(map->status, REG_READ(map->status));
 			msleep(100);
-			if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
+			if (PIPE_VBLANK_STATUS & REG_READ(map->status))
 				dev_dbg(dev->dev, "OK");
 			else {
 				dev_dbg(dev->dev, "STUCK!!!!");
 				/*shutdown controller*/
-				temp = REG_READ(dspcntr_reg);
-				REG_WRITE(dspcntr_reg,
+				temp = REG_READ(map->cntr);
+				REG_WRITE(map->cntr,
 						temp & ~DISPLAY_PLANE_ENABLE);
-				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+				REG_WRITE(map->base, REG_READ(map->base));
 				/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
 				REG_WRITE(0xb048, 1);
 				msleep(100);
-				temp = REG_READ(pipeconf_reg);
+				temp = REG_READ(map->conf);
 				temp &= ~PIPEACONF_ENABLE;
-				REG_WRITE(pipeconf_reg, temp);
+				REG_WRITE(map->conf, temp);
 				msleep(100); /*wait for pipe disable*/
 				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
 				msleep(100);
 				REG_WRITE(0xb004, REG_READ(0xb004));
 				/* try to bring the controller back up again*/
 				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
-				temp = REG_READ(dspcntr_reg);
-				REG_WRITE(dspcntr_reg,
+				temp = REG_READ(map->cntr);
+				REG_WRITE(map->cntr,
 						temp | DISPLAY_PLANE_ENABLE);
-				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+				REG_WRITE(map->base, REG_READ(map->base));
 				/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
 				REG_WRITE(0xb048, 2);
 				msleep(100);
-				temp = REG_READ(pipeconf_reg);
+				temp = REG_READ(map->conf);
 				temp |= PIPEACONF_ENABLE;
-				REG_WRITE(pipeconf_reg, temp);
+				REG_WRITE(map->conf, temp);
 			}
 		}
 
@@ -529,35 +456,35 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
 		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
 
 		/* Disable display plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				  temp & ~DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
-			REG_READ(dspbase_reg);
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
 		}
 
 		/* Next, disable display pipes */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) != 0) {
 			temp &= ~PIPEACONF_ENABLE;
 			temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
-			REG_WRITE(pipeconf_reg, temp);
-			REG_READ(pipeconf_reg);
+			REG_WRITE(map->conf, temp);
+			REG_READ(map->conf);
 
 			/* Wait for for the pipe disable to take effect. */
 			mdfldWaitForPipeDisable(dev, pipe);
 		}
 
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 		if (temp & DPLL_VCO_ENABLE) {
 			if ((pipe != 1 && !((REG_READ(PIPEACONF)
 				| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
 					|| pipe == 1) {
 				temp &= ~(DPLL_VCO_ENABLE);
-				REG_WRITE(dpll_reg, temp);
-				REG_READ(dpll_reg);
+				REG_WRITE(map->dpll, temp);
+				REG_READ(map->dpll);
 				/* Wait for the clocks to turn off. */
 				/* FIXME_MDFLD PO may need more delay */
 				udelay(500);
@@ -764,21 +691,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	int pipe = psb_intel_crtc->pipe;
-	int fp_reg = MRST_FPA0;
-	int dpll_reg = MRST_DPLL_A;
-	int dspcntr_reg = DSPACNTR;
-	int pipeconf_reg = PIPEACONF;
-	int htot_reg = HTOTAL_A;
-	int hblank_reg = HBLANK_A;
-	int hsync_reg = HSYNC_A;
-	int vtot_reg = VTOTAL_A;
-	int vblank_reg = VBLANK_A;
-	int vsync_reg = VSYNC_A;
-	int dspsize_reg = DSPASIZE;
-	int dsppos_reg = DSPAPOS;
-	int pipesrc_reg = PIPEASRC;
-	u32 *pipeconf = &dev_priv->pipeconf[pipe];
-	u32 *dspcntr = &dev_priv->dspcntr[pipe];
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk = 0;
 	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
 								clk_tmp = 0;
@@ -806,45 +719,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 	}
 #endif
 
-	switch (pipe) {
-	case 0:
-		break;
-	case 1:
-		fp_reg = FPB0;
-		dpll_reg = DPLL_B;
-		dspcntr_reg = DSPBCNTR;
-		pipeconf_reg = PIPEBCONF;
-		htot_reg = HTOTAL_B;
-		hblank_reg = HBLANK_B;
-		hsync_reg = HSYNC_B;
-		vtot_reg = VTOTAL_B;
-		vblank_reg = VBLANK_B;
-		vsync_reg = VSYNC_B;
-		dspsize_reg = DSPBSIZE;
-		dsppos_reg = DSPBPOS;
-		pipesrc_reg = PIPEBSRC;
-		fp_reg = MDFLD_DPLL_DIV0;
-		dpll_reg = MDFLD_DPLL_B;
-		break;
-	case 2:
-		dpll_reg = MRST_DPLL_A;
-		dspcntr_reg = DSPCCNTR;
-		pipeconf_reg = PIPECCONF;
-		htot_reg = HTOTAL_C;
-		hblank_reg = HBLANK_C;
-		hsync_reg = HSYNC_C;
-		vtot_reg = VTOTAL_C;
-		vblank_reg = VBLANK_C;
-		vsync_reg = VSYNC_C;
-		dspsize_reg = DSPCSIZE;
-		dsppos_reg = DSPCPOS;
-		pipesrc_reg = PIPECSRC;
-		break;
-	default:
-		DRM_ERROR("Illegal Pipe Number.\n");
-		return 0;
-	}
-
 	ret = check_fb(crtc->fb);
 	if (ret)
 		return ret;
@@ -929,21 +803,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 		 * contained within the displayable area of the screen image
 		 * (frame buffer).
 		 */
-		REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+		REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
 				| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
 		/* Set the CRTC with encoder mode. */
-		REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+		REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
 				 | (mode->crtc_vdisplay - 1));
 	} else {
-		REG_WRITE(dspsize_reg,
+		REG_WRITE(map->size,
 				((mode->crtc_vdisplay - 1) << 16) |
 						(mode->crtc_hdisplay - 1));
-		REG_WRITE(pipesrc_reg,
+		REG_WRITE(map->src,
 				((mode->crtc_hdisplay - 1) << 16) |
 						(mode->crtc_vdisplay - 1));
 	}
 
-	REG_WRITE(dsppos_reg, 0);
+	REG_WRITE(map->pos, 0);
 
 	if (psb_intel_encoder)
 		drm_connector_property_get_value(connector,
@@ -961,34 +835,34 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 		offsetY = (adjusted_mode->crtc_vdisplay -
 					mode->crtc_vdisplay) / 2;
 
-		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+		REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
 			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+		REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
 			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
+		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
 								offsetX - 1) |
 			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
-		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
+		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
 								offsetX - 1) |
 			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
-		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
+		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
 								offsetY - 1) |
 			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
-		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
+		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
 								offsetY - 1) |
 			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
 	} else {
-		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+		REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
 			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+		REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
 			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
 			((adjusted_mode->crtc_hblank_end - 1) << 16));
-		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
 			((adjusted_mode->crtc_hsync_end - 1) << 16));
-		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
 			((adjusted_mode->crtc_vblank_end - 1) << 16));
-		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
 			((adjusted_mode->crtc_vsync_end - 1) << 16));
 	}
 
@@ -1000,12 +874,12 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 	}
 
 	/* setup pipeconf */
-	*pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+	dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
 
 	/* Set up the display plane register */
-	*dspcntr = REG_READ(dspcntr_reg);
-	*dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
-	*dspcntr |= DISPLAY_PLANE_ENABLE;
+	dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
+	dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
+	dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
 
 	if (is_mipi2)
 		goto mrst_crtc_mode_set_exit;
@@ -1070,21 +944,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 					clock.p1, m_conv);
 		}
 
-		dpll = REG_READ(dpll_reg);
+		dpll = REG_READ(map->dpll);
 
 		if (dpll & DPLL_VCO_ENABLE) {
 			dpll &= ~DPLL_VCO_ENABLE;
-			REG_WRITE(dpll_reg, dpll);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, dpll);
+			REG_READ(map->dpll);
 
 			/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
 			/* FIXME_MDFLD PO - change 500 to 1 after PO */
 			udelay(500);
 
 			/* reset M1, N1 & P1 */
-			REG_WRITE(fp_reg, 0);
+			REG_WRITE(map->fp0, 0);
 			dpll &= ~MDFLD_P1_MASK;
-			REG_WRITE(dpll_reg, dpll);
+			REG_WRITE(map->dpll, dpll);
 			/* FIXME_MDFLD PO - change 500 to 1 after PO */
 			udelay(500);
 		}
@@ -1093,7 +967,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 		 * enable the VCO */
 		if (dpll & MDFLD_PWR_GATE_EN) {
 			dpll &= ~MDFLD_PWR_GATE_EN;
-			REG_WRITE(dpll_reg, dpll);
+			REG_WRITE(map->dpll, dpll);
 			/* FIXME_MDFLD PO - change 500 to 1 after PO */
 			udelay(500);
 		}
@@ -1134,18 +1008,18 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 		fp = 0x000000c1;
 	}
 
-	REG_WRITE(fp_reg, fp);
-	REG_WRITE(dpll_reg, dpll);
+	REG_WRITE(map->fp0, fp);
+	REG_WRITE(map->dpll, dpll);
 	/* FIXME_MDFLD PO - change 500 to 1 after PO */
 	udelay(500);
 
 	dpll |= DPLL_VCO_ENABLE;
-	REG_WRITE(dpll_reg, dpll);
-	REG_READ(dpll_reg);
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
 
 	/* wait for DSI PLL to lock */
 	while (timeout < 20000 &&
-			!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+			!(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
 		udelay(150);
 		timeout++;
 	}
@@ -1155,11 +1029,11 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
 
 	dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
 
-	REG_WRITE(pipeconf_reg, *pipeconf);
-	REG_READ(pipeconf_reg);
+	REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
+	REG_READ(map->conf);
 
 	/* Wait for for the pipe enable to take effect. */
-	REG_WRITE(dspcntr_reg, *dspcntr);
+	REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
 	psb_intel_wait_for_vblank(dev);
 
 mrst_crtc_mode_set_exit:

+ 60 - 74
drivers/gpu/drm/gma500/oaktrail_crtc.c

@@ -162,12 +162,10 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
 static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-	int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
 
 	if (!gma_power_begin(dev, true))
@@ -181,32 +179,32 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 		/* Enable the DPLL */
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 		if ((temp & DPLL_VCO_ENABLE) == 0) {
-			REG_WRITE(dpll_reg, temp);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
 		}
 		/* Enable the pipe */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) == 0)
-			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
 		/* Enable the plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				  temp | DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+			REG_WRITE(map->base, REG_READ(map->base));
 		}
 
 		psb_intel_crtc_load_lut(crtc);
@@ -223,28 +221,28 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 		/* Disable the VGA plane that we never use */
 		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
 		/* Disable display plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				  temp & ~DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
-			REG_READ(dspbase_reg);
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
 		}
 
 		/* Next, disable display pipes */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) != 0) {
-			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-			REG_READ(pipeconf_reg);
+			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+			REG_READ(map->conf);
 		}
 		/* Wait for for the pipe disable to take effect. */
 		psb_intel_wait_for_vblank(dev);
 
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 		if ((temp & DPLL_VCO_ENABLE) != 0) {
-			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 		}
 
 		/* Wait for the clocks to turn off. */
@@ -292,17 +290,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	int pipe = psb_intel_crtc->pipe;
-	int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
-	int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk = 0;
 	struct oaktrail_clock_t clock;
 	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -350,7 +338,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	if (oaktrail_panel_fitter_pipe(dev) == pipe)
 		REG_WRITE(PFIT_CONTROL, 0);
 
-	REG_WRITE(pipesrc_reg,
+	REG_WRITE(map->src,
 		  ((mode->crtc_hdisplay - 1) << 16) |
 		  (mode->crtc_vdisplay - 1));
 
@@ -369,34 +357,34 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 		offsetY = (adjusted_mode->crtc_vdisplay -
 			   mode->crtc_vdisplay) / 2;
 
-		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+		REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
 			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+		REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
 			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(hblank_reg,
+		REG_WRITE(map->hblank,
 			(adjusted_mode->crtc_hblank_start - offsetX - 1) |
 			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
-		REG_WRITE(hsync_reg,
+		REG_WRITE(map->hsync,
 			(adjusted_mode->crtc_hsync_start - offsetX - 1) |
 			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
-		REG_WRITE(vblank_reg,
+		REG_WRITE(map->vblank,
 			(adjusted_mode->crtc_vblank_start - offsetY - 1) |
 			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
-		REG_WRITE(vsync_reg,
+		REG_WRITE(map->vsync,
 			(adjusted_mode->crtc_vsync_start - offsetY - 1) |
 			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
 	} else {
-		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+		REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
 			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+		REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
 			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
 			((adjusted_mode->crtc_hblank_end - 1) << 16));
-		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
 			((adjusted_mode->crtc_hsync_end - 1) << 16));
-		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
 			((adjusted_mode->crtc_vblank_end - 1) << 16));
-		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
 			((adjusted_mode->crtc_vsync_end - 1) << 16));
 	}
 
@@ -408,10 +396,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	}
 
 	/* setup pipeconf */
-	pipeconf = REG_READ(pipeconf_reg);
+	pipeconf = REG_READ(map->conf);
 
 	/* Set up the display plane register */
-	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr = REG_READ(map->cntr);
 	dspcntr |= DISPPLANE_GAMMA_ENABLE;
 
 	if (pipe == 0)
@@ -467,30 +455,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	mrstPrintPll("chosen", &clock);
 
 	if (dpll & DPLL_VCO_ENABLE) {
-		REG_WRITE(fp_reg, fp);
-		REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
-		REG_READ(dpll_reg);
+		REG_WRITE(map->fp0, fp);
+		REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
 		/* Check the DPLLA lock bit PIPEACONF[29] */
 		udelay(150);
 	}
 
-	REG_WRITE(fp_reg, fp);
-	REG_WRITE(dpll_reg, dpll);
-	REG_READ(dpll_reg);
+	REG_WRITE(map->fp0, fp);
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
 	/* Wait for the clocks to stabilize. */
 	udelay(150);
 
 	/* write it again -- the BIOS does, after all */
-	REG_WRITE(dpll_reg, dpll);
-	REG_READ(dpll_reg);
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
 	/* Wait for the clocks to stabilize. */
 	udelay(150);
 
-	REG_WRITE(pipeconf_reg, pipeconf);
-	REG_READ(pipeconf_reg);
+	REG_WRITE(map->conf, pipeconf);
+	REG_READ(map->conf);
 	psb_intel_wait_for_vblank(dev);
 
-	REG_WRITE(dspcntr_reg, dspcntr);
+	REG_WRITE(map->cntr, dspcntr);
 	psb_intel_wait_for_vblank(dev);
 
 oaktrail_crtc_mode_set_exit:
@@ -509,15 +497,13 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
 			    int x, int y, struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
 	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	unsigned long start, offset;
 
-	int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
-	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
-	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
 	u32 dspcntr;
 	int ret = 0;
 
@@ -533,9 +519,9 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
 	start = psbfb->gtt->offset;
 	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-	REG_WRITE(dspstride, crtc->fb->pitches[0]);
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
 
-	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr = REG_READ(map->cntr);
 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
 
 	switch (crtc->fb->bits_per_pixel) {
@@ -557,12 +543,12 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
 		ret = -EINVAL;
 		goto pipe_set_base_exit;
 	}
-	REG_WRITE(dspcntr_reg, dspcntr);
+	REG_WRITE(map->cntr, dspcntr);
 
-	REG_WRITE(dspbase, offset);
-	REG_READ(dspbase);
-	REG_WRITE(dspsurf, start);
-	REG_READ(dspsurf);
+	REG_WRITE(map->base, offset);
+	REG_READ(map->base);
+	REG_WRITE(map->surf, start);
+	REG_READ(map->surf);
 
 pipe_set_base_exit:
 	gma_power_end(dev);

+ 91 - 35
drivers/gpu/drm/gma500/oaktrail_device.c

@@ -187,6 +187,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_save_area *regs = &dev_priv->regs;
+	struct psb_pipe *p = &regs->pipe[0];
 	int i;
 	u32 pp_stat;
 
@@ -201,24 +202,24 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
 	regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
 
 	/* Pipe & plane A info */
-	regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF);
-	regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC);
-	regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0);
-	regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1);
-	regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
-	regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
-	regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A);
-	regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A);
-	regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
-	regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A);
-	regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+	p->conf = PSB_RVDC32(PIPEACONF);
+	p->src = PSB_RVDC32(PIPEASRC);
+	p->fp0 = PSB_RVDC32(MRST_FPA0);
+	p->fp1 = PSB_RVDC32(MRST_FPA1);
+	p->dpll = PSB_RVDC32(MRST_DPLL_A);
+	p->htotal = PSB_RVDC32(HTOTAL_A);
+	p->hblank = PSB_RVDC32(HBLANK_A);
+	p->hsync = PSB_RVDC32(HSYNC_A);
+	p->vtotal = PSB_RVDC32(VTOTAL_A);
+	p->vblank = PSB_RVDC32(VBLANK_A);
+	p->vsync = PSB_RVDC32(VSYNC_A);
 	regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
-	regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR);
-	regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
-	regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE);
-	regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF);
-	regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
-	regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+	p->cntr = PSB_RVDC32(DSPACNTR);
+	p->stride = PSB_RVDC32(DSPASTRIDE);
+	p->addr = PSB_RVDC32(DSPABASE);
+	p->surf = PSB_RVDC32(DSPASURF);
+	p->linoff = PSB_RVDC32(DSPALINOFF);
+	p->tileoff = PSB_RVDC32(DSPATILEOFF);
 
 	/* Save cursor regs */
 	regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
@@ -227,7 +228,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
 
 	/* Save palette (gamma) */
 	for (i = 0; i < 256; i++)
-		regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+		p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
 
 	if (dev_priv->hdmi_priv)
 		oaktrail_hdmi_save(dev);
@@ -300,6 +301,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_save_area *regs = &dev_priv->regs;
+	struct psb_pipe *p = &regs->pipe[0];
 	u32 pp_stat;
 	int i;
 
@@ -317,21 +319,21 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
 	PSB_WVDC32(0x80000000, VGACNTRL);
 
 	/* set the plls */
-	PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0);
-	PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1);
+	PSB_WVDC32(p->fp0, MRST_FPA0);
+	PSB_WVDC32(p->fp1, MRST_FPA1);
 
 	/* Actually enable it */
-	PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A);
+	PSB_WVDC32(p->dpll, MRST_DPLL_A);
 	DRM_UDELAY(150);
 
 	/* Restore mode */
-	PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A);
-	PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A);
-	PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A);
-	PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A);
-	PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A);
-	PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A);
-	PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC);
+	PSB_WVDC32(p->htotal, HTOTAL_A);
+	PSB_WVDC32(p->hblank, HBLANK_A);
+	PSB_WVDC32(p->hsync, HSYNC_A);
+	PSB_WVDC32(p->vtotal, VTOTAL_A);
+	PSB_WVDC32(p->vblank, VBLANK_A);
+	PSB_WVDC32(p->vsync, VSYNC_A);
+	PSB_WVDC32(p->src, PIPEASRC);
 	PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
 
 	/* Restore performance mode*/
@@ -339,16 +341,16 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
 
 	/* Enable the pipe*/
 	if (dev_priv->iLVDS_enable)
-		PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF);
+		PSB_WVDC32(p->conf, PIPEACONF);
 
 	/* Set up the plane*/
-	PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF);
-	PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE);
-	PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF);
+	PSB_WVDC32(p->linoff, DSPALINOFF);
+	PSB_WVDC32(p->stride, DSPASTRIDE);
+	PSB_WVDC32(p->tileoff, DSPATILEOFF);
 
 	/* Enable the plane */
-	PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR);
-	PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF);
+	PSB_WVDC32(p->cntr, DSPACNTR);
+	PSB_WVDC32(p->surf, DSPASURF);
 
 	/* Enable Cursor A */
 	PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
@@ -357,7 +359,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
 
 	/* Restore palette (gamma) */
 	for (i = 0; i < 256; i++)
-		PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2));
+		PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
 
 	if (dev_priv->hdmi_priv)
 		oaktrail_hdmi_restore(dev);
@@ -454,11 +456,65 @@ static int oaktrail_power_up(struct drm_device *dev)
 	return 0;
 }
 
+/* Oaktrail */
+static const struct psb_offset oaktrail_regmap[2] = {
+	{
+		.fp0 = MRST_FPA0,
+		.fp1 = MRST_FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = MRST_DPLL_A,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.surf = DSPASURF,
+		.addr = MRST_DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = FPB0,
+		.fp1 = FPB1,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = DPLL_B,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.surf = DSPBSURF,
+		.addr = DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	},
+};
 
 static int oaktrail_chip_setup(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	int ret;
+	
+	if (pci_enable_msi(dev->pdev))
+		dev_warn(dev->dev, "Enabling MSI failed!\n");
+
+	dev_priv->regmap = oaktrail_regmap;
 
 	ret = mid_chip_setup(dev);
 	if (ret < 0)

+ 31 - 29
drivers/gpu/drm/gma500/oaktrail_hdmi.c

@@ -434,6 +434,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
 	struct psb_state *regs = &dev_priv->regs.psb;
+	struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
 	int i;
 
 	/* dpll */
@@ -444,14 +445,14 @@ void oaktrail_hdmi_save(struct drm_device *dev)
 	hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
 
 	/* pipe B */
-	regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
-	regs->savePIPEBSRC  = PSB_RVDC32(PIPEBSRC);
-	regs->saveHTOTAL_B  = PSB_RVDC32(HTOTAL_B);
-	regs->saveHBLANK_B  = PSB_RVDC32(HBLANK_B);
-	regs->saveHSYNC_B   = PSB_RVDC32(HSYNC_B);
-	regs->saveVTOTAL_B  = PSB_RVDC32(VTOTAL_B);
-	regs->saveVBLANK_B  = PSB_RVDC32(VBLANK_B);
-	regs->saveVSYNC_B   = PSB_RVDC32(VSYNC_B);
+	pipeb->conf = PSB_RVDC32(PIPEBCONF);
+	pipeb->src = PSB_RVDC32(PIPEBSRC);
+	pipeb->htotal = PSB_RVDC32(HTOTAL_B);
+	pipeb->hblank = PSB_RVDC32(HBLANK_B);
+	pipeb->hsync = PSB_RVDC32(HSYNC_B);
+	pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
+	pipeb->vblank = PSB_RVDC32(VBLANK_B);
+	pipeb->vsync = PSB_RVDC32(VSYNC_B);
 
 	hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
 	hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
@@ -463,12 +464,12 @@ void oaktrail_hdmi_save(struct drm_device *dev)
 	hdmi_dev->savePCH_VSYNC_B  = PSB_RVDC32(PCH_VSYNC_B);
 
 	/* plane */
-	regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
-	regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
-	regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
-	regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
-	regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
-	regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+	pipeb->cntr = PSB_RVDC32(DSPBCNTR);
+	pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
+	pipeb->addr = PSB_RVDC32(DSPBBASE);
+	pipeb->surf = PSB_RVDC32(DSPBSURF);
+	pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
+	pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
 
 	/* cursor B */
 	regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
@@ -477,7 +478,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
 
 	/* save palette */
 	for (i = 0; i < 256; i++)
-		regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+		pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
 }
 
 /* restore HDMI register state */
@@ -486,6 +487,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
 	struct psb_state *regs = &dev_priv->regs.psb;
+	struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
 	int i;
 
 	/* dpll */
@@ -497,13 +499,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
 	DRM_UDELAY(150);
 
 	/* pipe */
-	PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC);
-	PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B);
-	PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B);
-	PSB_WVDC32(regs->saveHSYNC_B,  HSYNC_B);
-	PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B);
-	PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B);
-	PSB_WVDC32(regs->saveVSYNC_B,  VSYNC_B);
+	PSB_WVDC32(pipeb->src, PIPEBSRC);
+	PSB_WVDC32(pipeb->htotal, HTOTAL_B);
+	PSB_WVDC32(pipeb->hblank, HBLANK_B);
+	PSB_WVDC32(pipeb->hsync,  HSYNC_B);
+	PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
+	PSB_WVDC32(pipeb->vblank, VBLANK_B);
+	PSB_WVDC32(pipeb->vsync,  VSYNC_B);
 
 	PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
 	PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
@@ -513,15 +515,15 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
 	PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
 	PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B,  PCH_VSYNC_B);
 
-	PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF);
+	PSB_WVDC32(pipeb->conf, PIPEBCONF);
 	PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
 
 	/* plane */
-	PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF);
-	PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE);
-	PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF);
-	PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR);
-	PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF);
+	PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
+	PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
+	PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
+	PSB_WVDC32(pipeb->cntr, DSPBCNTR);
+	PSB_WVDC32(pipeb->surf, DSPBSURF);
 
 	/* cursor B */
 	PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
@@ -530,5 +532,5 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
 
 	/* restore palette */
 	for (i = 0; i < 256; i++)
-		PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2));
+		PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
 }

+ 6 - 12
drivers/gpu/drm/gma500/opregion.c

@@ -21,10 +21,8 @@
  * DEALINGS IN THE SOFTWARE.
  *
  */
-#ifdef CONFIG_ACPI
 #include <linux/acpi.h>
 #include <linux/acpi_io.h>
-#endif
 #include "psb_drv.h"
 #include "psb_intel_reg.h"
 
@@ -151,7 +149,6 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 	struct backlight_device *bd = dev_priv->backlight_device;
-	u32 max;
 
 	DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
 
@@ -165,11 +162,12 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 	if (bclp > 255)
 		return ASLE_BACKLIGHT_FAILED;
 
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-	max = bd->props.max_brightness;
-	bd->props.brightness = bclp * max / 255;
-	backlight_update_status(bd);
-#endif
+	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
+		int max = bd->props.max_brightness;
+		bd->props.brightness = bclp * max / 255;
+		backlight_update_status(bd);
+	}
+
 	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
 
 	return 0;
@@ -311,11 +309,7 @@ int psb_intel_opregion_setup(struct drm_device *dev)
 		return -ENOTSUPP;
 	}
 	DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
-#ifdef CONFIG_ACPI
 	base = acpi_os_ioremap(opregion_phy, 8*1024);
-#else
-	base = ioremap(opregion_phy, 8*1024);
-#endif
 	if (!base)
 		return -ENOMEM;
 

+ 21 - 1
drivers/gpu/drm/gma500/opregion.h

@@ -22,8 +22,28 @@
  *
  */
 
+#if defined(CONFIG_ACPI)
 extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
-extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
 extern void psb_intel_opregion_init(struct drm_device *dev);
 extern void psb_intel_opregion_fini(struct drm_device *dev);
 extern int psb_intel_opregion_setup(struct drm_device *dev);
+
+#else
+
+extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_init(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_fini(struct drm_device *dev)
+{
+}
+
+extern inline int psb_intel_opregion_setup(struct drm_device *dev)
+{
+	return 0;
+}
+#endif

+ 64 - 0
drivers/gpu/drm/gma500/psb_device.c

@@ -289,8 +289,62 @@ static void psb_get_core_freq(struct drm_device *dev)
 	}
 }
 
+/* Poulsbo */
+static const struct psb_offset psb_regmap[2] = {
+	{
+		.fp0 = FPA0,
+		.fp1 = FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = DPLL_A,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.base = DSPABASE,
+		.surf = DSPASURF,
+		.addr = DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = FPB0,
+		.fp1 = FPB1,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = DPLL_B,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.base = DSPBBASE,
+		.surf = DSPBSURF,
+		.addr = DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	}
+};
+
 static int psb_chip_setup(struct drm_device *dev)
 {
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->regmap = psb_regmap;
 	psb_get_core_freq(dev);
 	gma_intel_setup_gmbus(dev);
 	psb_intel_opregion_init(dev);
@@ -298,8 +352,17 @@ static int psb_chip_setup(struct drm_device *dev)
 	return 0;
 }
 
+/* Not exactly an erratum more an irritation */
+static void psb_chip_errata(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	psb_lid_timer_init(dev_priv);
+}
+
 static void psb_chip_teardown(struct drm_device *dev)
 {
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	psb_lid_timer_takedown(dev_priv);
 	gma_intel_teardown_gmbus(dev);
 }
 
@@ -313,6 +376,7 @@ const struct psb_ops psb_chip_ops = {
 	.sgx_offset = PSB_SGX_OFFSET,
 	.chip_setup = psb_chip_setup,
 	.chip_teardown = psb_chip_teardown,
+	.errata = psb_chip_errata,
 
 	.crtc_helper = &psb_intel_helper_funcs,
 	.crtc_funcs = &psb_intel_crtc_funcs,

+ 17 - 42
drivers/gpu/drm/gma500/psb_drv.c

@@ -79,6 +79,14 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
 	{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
 	{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
 	{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
 #endif
 	{ 0, }
 };
@@ -144,10 +152,6 @@ static void psb_lastclose(struct drm_device *dev)
 	return;
 }
 
-static void psb_do_takedown(struct drm_device *dev)
-{
-}
-
 static int psb_do_init(struct drm_device *dev)
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
@@ -172,24 +176,6 @@ static int psb_do_init(struct drm_device *dev)
 	dev_priv->gatt_free_offset = pg->mmu_gatt_start +
 	    (stolen_gtt << PAGE_SHIFT) * 1024;
 
-	if (1 || drm_debug) {
-		uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
-		uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
-		DRM_INFO("SGX core id = 0x%08x\n", core_id);
-		DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
-			 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
-			 _PSB_CC_REVISION_MAJOR_SHIFT,
-			 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
-			 _PSB_CC_REVISION_MINOR_SHIFT);
-		DRM_INFO
-		    ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
-		     (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
-		     _PSB_CC_REVISION_MAINTENANCE_SHIFT,
-		     (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
-		     _PSB_CC_REVISION_DESIGNER_SHIFT);
-	}
-
-
 	spin_lock_init(&dev_priv->irqmask_lock);
 	spin_lock_init(&dev_priv->lock_2d);
 
@@ -204,7 +190,6 @@ static int psb_do_init(struct drm_device *dev)
 	PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
 	return 0;
 out_err:
-	psb_do_takedown(dev);
 	return ret;
 }
 
@@ -214,17 +199,16 @@ static int psb_driver_unload(struct drm_device *dev)
 
 	/* Kill vblank etc here */
 
-	gma_backlight_exit(dev);
-	psb_modeset_cleanup(dev);
 
 	if (dev_priv) {
-		psb_intel_opregion_fini(dev);
-		psb_lid_timer_takedown(dev_priv);
+		if (dev_priv->backlight_device)
+			gma_backlight_exit(dev);
+		psb_modeset_cleanup(dev);
 
 		if (dev_priv->ops->chip_teardown)
 			dev_priv->ops->chip_teardown(dev);
-		psb_do_takedown(dev);
 
+		psb_intel_opregion_fini(dev);
 
 		if (dev_priv->pf_pd) {
 			psb_mmu_free_pagedir(dev_priv->pf_pd);
@@ -258,15 +242,13 @@ static int psb_driver_unload(struct drm_device *dev)
 			dev_priv->sgx_reg = NULL;
 		}
 
+		/* Destroy VBT data */
+		psb_intel_destroy_bios(dev);
+
 		kfree(dev_priv);
 		dev->dev_private = NULL;
-
-		/*destroy VBT data*/
-		psb_intel_destroy_bios(dev);
 	}
-
 	gma_power_uninit(dev);
-
 	return 0;
 }
 
@@ -290,11 +272,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 
 	pci_set_master(dev->pdev);
 
-	if (!IS_PSB(dev)) {
-		if (pci_enable_msi(dev->pdev))
-			dev_warn(dev->dev, "Enabling MSI failed!\n");
-	}
-
 	dev_priv->num_pipe = dev_priv->ops->pipes;
 
 	resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
@@ -351,8 +328,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 	PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
 
 	acpi_video_register();
-	if (dev_priv->opregion.lid_state)
-		psb_lid_timer_init(dev_priv);
 
 	ret = drm_vblank_init(dev, dev_priv->num_pipe);
 	if (ret)
@@ -371,8 +346,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 	PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
 	PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
 	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-	if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_irq_install(dev);
+
+	drm_irq_install(dev);
 
 	dev->vblank_disable_allowed = 1;
 

+ 63 - 111
drivers/gpu/drm/gma500/psb_drv.h

@@ -280,51 +280,73 @@ struct intel_gmbus {
 	u32 reg0;
 };
 
+/*
+ *	Register offset maps
+ */
+
+struct psb_offset {
+	u32	fp0;
+	u32	fp1;
+	u32	cntr;
+	u32	conf;
+	u32	src;
+	u32	dpll;
+	u32	dpll_md;
+	u32	htotal;
+	u32	hblank;
+	u32	hsync;
+	u32	vtotal;
+	u32	vblank;
+	u32	vsync;
+	u32	stride;
+	u32	size;
+	u32	pos;
+	u32	surf;
+	u32	addr;
+	u32	base;
+	u32	status;
+	u32	linoff;
+	u32	tileoff;
+	u32	palette;
+};
+
 /*
  *	Register save state. This is used to hold the context when the
  *	device is powered off. In the case of Oaktrail this can (but does not
  *	yet) include screen blank. Operations occuring during the save
  *	update the register cache instead.
  */
+
+/*
+ *	Common status for pipes.
+ */
+struct psb_pipe {
+	u32	fp0;
+	u32	fp1;
+	u32	cntr;
+	u32	conf;
+	u32	src;
+	u32	dpll;
+	u32	dpll_md;
+	u32	htotal;
+	u32	hblank;
+	u32	hsync;
+	u32	vtotal;
+	u32	vblank;
+	u32	vsync;
+	u32	stride;
+	u32	size;
+	u32	pos;
+	u32	base;
+	u32	surf;
+	u32	addr;
+	u32	status;
+	u32	linoff;
+	u32	tileoff;
+	u32	palette[256];
+};
+
 struct psb_state {
-	uint32_t saveDSPACNTR;
-	uint32_t saveDSPBCNTR;
-	uint32_t savePIPEACONF;
-	uint32_t savePIPEBCONF;
-	uint32_t savePIPEASRC;
-	uint32_t savePIPEBSRC;
-	uint32_t saveFPA0;
-	uint32_t saveFPA1;
-	uint32_t saveDPLL_A;
-	uint32_t saveDPLL_A_MD;
-	uint32_t saveHTOTAL_A;
-	uint32_t saveHBLANK_A;
-	uint32_t saveHSYNC_A;
-	uint32_t saveVTOTAL_A;
-	uint32_t saveVBLANK_A;
-	uint32_t saveVSYNC_A;
-	uint32_t saveDSPASTRIDE;
-	uint32_t saveDSPASIZE;
-	uint32_t saveDSPAPOS;
-	uint32_t saveDSPABASE;
-	uint32_t saveDSPASURF;
-	uint32_t saveDSPASTATUS;
-	uint32_t saveFPB0;
-	uint32_t saveFPB1;
-	uint32_t saveDPLL_B;
-	uint32_t saveDPLL_B_MD;
-	uint32_t saveHTOTAL_B;
-	uint32_t saveHBLANK_B;
-	uint32_t saveHSYNC_B;
-	uint32_t saveVTOTAL_B;
-	uint32_t saveVBLANK_B;
-	uint32_t saveVSYNC_B;
-	uint32_t saveDSPBSTRIDE;
-	uint32_t saveDSPBSIZE;
-	uint32_t saveDSPBPOS;
-	uint32_t saveDSPBBASE;
-	uint32_t saveDSPBSURF;
-	uint32_t saveDSPBSTATUS;
 	uint32_t saveVCLK_DIVISOR_VGA0;
 	uint32_t saveVCLK_DIVISOR_VGA1;
 	uint32_t saveVCLK_POST_DIV;
@@ -339,14 +361,8 @@ struct psb_state {
 	uint32_t savePP_CONTROL;
 	uint32_t savePP_CYCLE;
 	uint32_t savePFIT_CONTROL;
-	uint32_t savePaletteA[256];
-	uint32_t savePaletteB[256];
 	uint32_t saveCLOCKGATING;
 	uint32_t saveDSPARB;
-	uint32_t saveDSPATILEOFF;
-	uint32_t saveDSPBTILEOFF;
-	uint32_t saveDSPAADDR;
-	uint32_t saveDSPBADDR;
 	uint32_t savePFIT_AUTO_RATIOS;
 	uint32_t savePFIT_PGM_RATIOS;
 	uint32_t savePP_ON_DELAYS;
@@ -354,8 +370,6 @@ struct psb_state {
 	uint32_t savePP_DIVISOR;
 	uint32_t saveBCLRPAT_A;
 	uint32_t saveBCLRPAT_B;
-	uint32_t saveDSPALINOFF;
-	uint32_t saveDSPBLINOFF;
 	uint32_t savePERF_MODE;
 	uint32_t saveDSPFW1;
 	uint32_t saveDSPFW2;
@@ -370,8 +384,6 @@ struct psb_state {
 	uint32_t saveDSPBCURSOR_BASE;
 	uint32_t saveDSPACURSOR_POS;
 	uint32_t saveDSPBCURSOR_POS;
-	uint32_t save_palette_a[256];
-	uint32_t save_palette_b[256];
 	uint32_t saveOV_OVADD;
 	uint32_t saveOV_OGAMC0;
 	uint32_t saveOV_OGAMC1;
@@ -394,64 +406,7 @@ struct psb_state {
 };
 
 struct medfield_state {
-	uint32_t saveDPLL_A;
-	uint32_t saveFPA0;
-	uint32_t savePIPEACONF;
-	uint32_t saveHTOTAL_A;
-	uint32_t saveHBLANK_A;
-	uint32_t saveHSYNC_A;
-	uint32_t saveVTOTAL_A;
-	uint32_t saveVBLANK_A;
-	uint32_t saveVSYNC_A;
-	uint32_t savePIPEASRC;
-	uint32_t saveDSPASTRIDE;
-	uint32_t saveDSPALINOFF;
-	uint32_t saveDSPATILEOFF;
-	uint32_t saveDSPASIZE;
-	uint32_t saveDSPAPOS;
-	uint32_t saveDSPASURF;
-	uint32_t saveDSPACNTR;
-	uint32_t saveDSPASTATUS;
-	uint32_t save_palette_a[256];
 	uint32_t saveMIPI;
-
-	uint32_t saveDPLL_B;
-	uint32_t saveFPB0;
-	uint32_t savePIPEBCONF;
-	uint32_t saveHTOTAL_B;
-	uint32_t saveHBLANK_B;
-	uint32_t saveHSYNC_B;
-	uint32_t saveVTOTAL_B;
-	uint32_t saveVBLANK_B;
-	uint32_t saveVSYNC_B;
-	uint32_t savePIPEBSRC;
-	uint32_t saveDSPBSTRIDE;
-	uint32_t saveDSPBLINOFF;
-	uint32_t saveDSPBTILEOFF;
-	uint32_t saveDSPBSIZE;
-	uint32_t saveDSPBPOS;
-	uint32_t saveDSPBSURF;
-	uint32_t saveDSPBCNTR;
-	uint32_t saveDSPBSTATUS;
-	uint32_t save_palette_b[256];
-
-	uint32_t savePIPECCONF;
-	uint32_t saveHTOTAL_C;
-	uint32_t saveHBLANK_C;
-	uint32_t saveHSYNC_C;
-	uint32_t saveVTOTAL_C;
-	uint32_t saveVBLANK_C;
-	uint32_t saveVSYNC_C;
-	uint32_t savePIPECSRC;
-	uint32_t saveDSPCSTRIDE;
-	uint32_t saveDSPCLINOFF;
-	uint32_t saveDSPCTILEOFF;
-	uint32_t saveDSPCSIZE;
-	uint32_t saveDSPCPOS;
-	uint32_t saveDSPCSURF;
-	uint32_t saveDSPCCNTR;
-	uint32_t saveDSPCSTATUS;
-	uint32_t save_palette_c[256];
 	uint32_t saveMIPI_C;
 
 	uint32_t savePFIT_CONTROL;
@@ -480,6 +435,7 @@ struct cdv_state {
 };
 
 struct psb_save_area {
+	struct psb_pipe pipe[3];
 	uint32_t saveBSM;
 	uint32_t saveVBT;
 	union {
@@ -498,6 +454,7 @@ struct psb_ops;
 struct drm_psb_private {
 	struct drm_device *dev;
 	const struct psb_ops *ops;
+	const struct psb_offset *regmap;
 	
 	struct child_device_config *child_dev;
 	int child_dev_num;
@@ -550,6 +507,7 @@ struct drm_psb_private {
 	 * Modesetting
 	 */
 	struct psb_intel_mode_device mode_dev;
+	bool modeset;	/* true if we have done the mode_device setup */
 
 	struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
 	struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
@@ -807,12 +765,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
 
 extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
 
-/*
- * intel_opregion.c
- */
-extern int gma_intel_opregion_init(struct drm_device *dev);
-extern int gma_intel_opregion_exit(struct drm_device *dev);
-
 /*
  * framebuffer.c
  */

+ 121 - 165
drivers/gpu/drm/gma500/psb_intel_display.c

@@ -337,15 +337,12 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
 			    int x, int y, struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	/* struct drm_i915_master_private *master_priv; */
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
 	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	unsigned long start, offset;
-	int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
-	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
-	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
 	u32 dspcntr;
 	int ret = 0;
 
@@ -367,9 +364,9 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
 
 	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
 
-	REG_WRITE(dspstride, crtc->fb->pitches[0]);
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
 
-	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr = REG_READ(map->cntr);
 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
 
 	switch (crtc->fb->bits_per_pixel) {
@@ -392,18 +389,10 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
 		psb_gtt_unpin(psbfb->gtt);
 		goto psb_intel_pipe_set_base_exit;
 	}
-	REG_WRITE(dspcntr_reg, dspcntr);
-
+	REG_WRITE(map->cntr, dspcntr);
 
-	if (0 /* FIXMEAC - check what PSB needs */) {
-		REG_WRITE(dspbase, offset);
-		REG_READ(dspbase);
-		REG_WRITE(dspsurf, start);
-		REG_READ(dspsurf);
-	} else {
-		REG_WRITE(dspbase, start + offset);
-		REG_READ(dspbase);
-	}
+	REG_WRITE(map->base, start + offset);
+	REG_READ(map->base);
 
 psb_intel_pipe_cleaner:
 	/* If there was a previous display we can now unpin it */
@@ -424,14 +413,10 @@ psb_intel_pipe_set_base_exit:
 static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
-	/* struct drm_i915_master_private *master_priv; */
-	/* struct drm_i915_private *dev_priv = dev->dev_private; */
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	int pipe = psb_intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-	int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
 
 	/* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -442,34 +427,34 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 		/* Enable the DPLL */
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 		if ((temp & DPLL_VCO_ENABLE) == 0) {
-			REG_WRITE(dpll_reg, temp);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 			/* Wait for the clocks to stabilize. */
 			udelay(150);
 		}
 
 		/* Enable the pipe */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) == 0)
-			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
 
 		/* Enable the plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				  temp | DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+			REG_WRITE(map->base, REG_READ(map->base));
 		}
 
 		psb_intel_crtc_load_lut(crtc);
@@ -487,29 +472,29 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
 		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
 
 		/* Disable display plane */
-		temp = REG_READ(dspcntr_reg);
+		temp = REG_READ(map->cntr);
 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-			REG_WRITE(dspcntr_reg,
+			REG_WRITE(map->cntr,
 				  temp & ~DISPLAY_PLANE_ENABLE);
 			/* Flush the plane changes */
-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
-			REG_READ(dspbase_reg);
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
 		}
 
 		/* Next, disable display pipes */
-		temp = REG_READ(pipeconf_reg);
+		temp = REG_READ(map->conf);
 		if ((temp & PIPEACONF_ENABLE) != 0) {
-			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-			REG_READ(pipeconf_reg);
+			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+			REG_READ(map->conf);
 		}
 
 		/* Wait for vblank for the disable to take effect. */
 		psb_intel_wait_for_vblank(dev);
 
-		temp = REG_READ(dpll_reg);
+		temp = REG_READ(map->dpll);
 		if ((temp & DPLL_VCO_ENABLE) != 0) {
-			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
-			REG_READ(dpll_reg);
+			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
 		}
 
 		/* Wait for the clocks to turn off. */
@@ -589,22 +574,11 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
 			       struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	int pipe = psb_intel_crtc->pipe;
-	int fp_reg = (pipe == 0) ? FPA0 : FPB0;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
-	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk;
 	struct psb_intel_clock_t clock;
 	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -690,7 +664,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
 	dpll |= PLL_REF_INPUT_DREFCLK;
 
 	/* setup pipeconf */
-	pipeconf = REG_READ(pipeconf_reg);
+	pipeconf = REG_READ(map->conf);
 
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -712,9 +686,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
 	drm_mode_debug_printmodeline(mode);
 
 	if (dpll & DPLL_VCO_ENABLE) {
-		REG_WRITE(fp_reg, fp);
-		REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
-		REG_READ(dpll_reg);
+		REG_WRITE(map->fp0, fp);
+		REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
 		udelay(150);
 	}
 
@@ -747,45 +721,45 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
 		REG_READ(LVDS);
 	}
 
-	REG_WRITE(fp_reg, fp);
-	REG_WRITE(dpll_reg, dpll);
-	REG_READ(dpll_reg);
+	REG_WRITE(map->fp0, fp);
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
 	/* Wait for the clocks to stabilize. */
 	udelay(150);
 
 	/* write it again -- the BIOS does, after all */
-	REG_WRITE(dpll_reg, dpll);
+	REG_WRITE(map->dpll, dpll);
 
-	REG_READ(dpll_reg);
+	REG_READ(map->dpll);
 	/* Wait for the clocks to stabilize. */
 	udelay(150);
 
-	REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+	REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
 		  ((adjusted_mode->crtc_htotal - 1) << 16));
-	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+	REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
 		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
-	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+	REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
 		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
-	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+	REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
 		  ((adjusted_mode->crtc_vtotal - 1) << 16));
-	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+	REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
 		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
-	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+	REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
 		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
 	/* pipesrc and dspsize control the size that is scaled from,
 	 * which should always be the user's requested size.
 	 */
-	REG_WRITE(dspsize_reg,
+	REG_WRITE(map->size,
 		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-	REG_WRITE(dsppos_reg, 0);
-	REG_WRITE(pipesrc_reg,
+	REG_WRITE(map->pos, 0);
+	REG_WRITE(map->src,
 		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-	REG_WRITE(pipeconf_reg, pipeconf);
-	REG_READ(pipeconf_reg);
+	REG_WRITE(map->conf, pipeconf);
+	REG_READ(map->conf);
 
 	psb_intel_wait_for_vblank(dev);
 
-	REG_WRITE(dspcntr_reg, dspcntr);
+	REG_WRITE(map->cntr, dspcntr);
 
 	/* Flush the plane changes */
 	crtc_funcs->mode_set_base(crtc, x, y, old_fb);
@@ -799,10 +773,10 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
 void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_psb_private *dev_priv =
-				(struct drm_psb_private *)dev->dev_private;
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
-	int palreg = PALETTE_A;
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+	int palreg = map->palette;
 	int i;
 
 	/* The clocks have to be on to load the palette. */
@@ -811,12 +785,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
 
 	switch (psb_intel_crtc->pipe) {
 	case 0:
-		break;
 	case 1:
-		palreg = PALETTE_B;
-		break;
-	case 2:
-		palreg = PALETTE_C;
 		break;
 	default:
 		dev_err(dev->dev, "Illegal Pipe Number.\n");
@@ -836,7 +805,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
 		gma_power_end(dev);
 	} else {
 		for (i = 0; i < 256; i++) {
-			dev_priv->regs.psb.save_palette_a[i] =
+			dev_priv->regs.pipe[0].palette[i] =
 				  ((psb_intel_crtc->lut_r[i] +
 				  psb_intel_crtc->lut_adj[i]) << 16) |
 				  ((psb_intel_crtc->lut_g[i] +
@@ -854,11 +823,10 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
 static void psb_intel_crtc_save(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	/* struct drm_psb_private *dev_priv =
-			(struct drm_psb_private *)dev->dev_private; */
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
 	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
-	int pipeA = (psb_intel_crtc->pipe == 0);
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
 	uint32_t paletteReg;
 	int i;
 
@@ -867,27 +835,27 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
 		return;
 	}
 
-	crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
-	crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
-	crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
-	crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
-	crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
-	crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
-	crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
-	crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
-	crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
-	crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
-	crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
-	crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
-	crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+	crtc_state->savePIPECONF = REG_READ(map->conf);
+	crtc_state->savePIPESRC = REG_READ(map->src);
+	crtc_state->saveFP0 = REG_READ(map->fp0);
+	crtc_state->saveFP1 = REG_READ(map->fp1);
+	crtc_state->saveDPLL = REG_READ(map->dpll);
+	crtc_state->saveHTOTAL = REG_READ(map->htotal);
+	crtc_state->saveHBLANK = REG_READ(map->hblank);
+	crtc_state->saveHSYNC = REG_READ(map->hsync);
+	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+	crtc_state->saveVBLANK = REG_READ(map->vblank);
+	crtc_state->saveVSYNC = REG_READ(map->vsync);
+	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
 
 	/*NOTE: DSPSIZE DSPPOS only for psb*/
-	crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
-	crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+	crtc_state->saveDSPSIZE = REG_READ(map->size);
+	crtc_state->saveDSPPOS = REG_READ(map->pos);
 
-	crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+	crtc_state->saveDSPBASE = REG_READ(map->base);
 
-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+	paletteReg = map->palette;
 	for (i = 0; i < 256; ++i)
 		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
 }
@@ -898,12 +866,10 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
 static void psb_intel_crtc_restore(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
-	/* struct drm_psb_private * dev_priv =
-				(struct drm_psb_private *)dev->dev_private; */
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
 	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
-	/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
-	int pipeA = (psb_intel_crtc->pipe == 0);
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
 	uint32_t paletteReg;
 	int i;
 
@@ -913,45 +879,45 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc)
 	}
 
 	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
-		REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+		REG_WRITE(map->dpll,
 			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
-		REG_READ(pipeA ? DPLL_A : DPLL_B);
+		REG_READ(map->dpll);
 		udelay(150);
 	}
 
-	REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
-	REG_READ(pipeA ? FPA0 : FPB0);
+	REG_WRITE(map->fp0, crtc_state->saveFP0);
+	REG_READ(map->fp0);
 
-	REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
-	REG_READ(pipeA ? FPA1 : FPB1);
+	REG_WRITE(map->fp1, crtc_state->saveFP1);
+	REG_READ(map->fp1);
 
-	REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
-	REG_READ(pipeA ? DPLL_A : DPLL_B);
+	REG_WRITE(map->dpll, crtc_state->saveDPLL);
+	REG_READ(map->dpll);
 	udelay(150);
 
-	REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
-	REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
-	REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
-	REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
-	REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
-	REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
-	REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
 
-	REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
-	REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
 
-	REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
-	REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+	REG_WRITE(map->src, crtc_state->savePIPESRC);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+	REG_WRITE(map->conf, crtc_state->savePIPECONF);
 
 	psb_intel_wait_for_vblank(dev);
 
-	REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
 
 	psb_intel_wait_for_vblank(dev);
 
-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+	paletteReg = map->palette;
 	for (i = 0; i < 256; ++i)
 		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
 }
@@ -1115,34 +1081,30 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
 				struct drm_crtc *crtc)
 {
 	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_psb_private *dev_priv = dev->dev_private;
 	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 dpll;
 	u32 fp;
 	struct psb_intel_clock_t clock;
 	bool is_lvds;
-	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
 
 	if (gma_power_begin(dev, false)) {
-		dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+		dpll = REG_READ(map->dpll);
 		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-			fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+			fp = REG_READ(map->fp0);
 		else
-			fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+			fp = REG_READ(map->fp1);
 		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
 		gma_power_end(dev);
 	} else {
-		dpll = (pipe == 0) ?
-			dev_priv->regs.psb.saveDPLL_A :
-			dev_priv->regs.psb.saveDPLL_B;
+		dpll = p->dpll;
 
 		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-			fp = (pipe == 0) ?
-				dev_priv->regs.psb.saveFPA0 :
-				dev_priv->regs.psb.saveFPB0;
+			fp = p->fp0;
 		else
-			fp = (pipe == 0) ?
-				dev_priv->regs.psb.saveFPA1 :
-				dev_priv->regs.psb.saveFPB1;
+		        fp = p->fp1;
 
 		is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
 								LVDS_PORT_EN);
@@ -1202,26 +1164,20 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
 	int vtot;
 	int vsync;
 	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
 
 	if (gma_power_begin(dev, false)) {
-		htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
-		hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
-		vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
-		vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+		htot = REG_READ(map->htotal);
+		hsync = REG_READ(map->hsync);
+		vtot = REG_READ(map->vtotal);
+		vsync = REG_READ(map->vsync);
 		gma_power_end(dev);
 	} else {
-		htot = (pipe == 0) ?
-			dev_priv->regs.psb.saveHTOTAL_A :
-			dev_priv->regs.psb.saveHTOTAL_B;
-		hsync = (pipe == 0) ?
-			dev_priv->regs.psb.saveHSYNC_A :
-			dev_priv->regs.psb.saveHSYNC_B;
-		vtot = (pipe == 0) ?
-			dev_priv->regs.psb.saveVTOTAL_A :
-			dev_priv->regs.psb.saveVTOTAL_B;
-		vsync = (pipe == 0) ?
-			dev_priv->regs.psb.saveVSYNC_A :
-			dev_priv->regs.psb.saveVSYNC_B;
+		htot = p->htotal;
+		hsync = p->hsync;
+		vtot = p->vtotal;
+		vsync = p->vsync;
 	}
 
 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);

+ 1 - 2
drivers/gpu/drm/gma500/psb_intel_sdvo.c

@@ -2038,8 +2038,7 @@ psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
 	struct drm_device *dev = connector->base.base.dev;
 
 	intel_attach_force_audio_property(&connector->base.base);
-	if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
-		intel_attach_broadcast_rgb_property(&connector->base.base);
+	intel_attach_broadcast_rgb_property(&connector->base.base);
 	*/
 }
 

+ 2 - 0
drivers/gpu/drm/i915/Makefile

@@ -11,6 +11,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  i915_gem_evict.o \
 	  i915_gem_execbuffer.o \
 	  i915_gem_gtt.o \
+	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
 	  i915_sysfs.o \
 	  i915_trace_points.o \
@@ -18,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  intel_crt.o \
 	  intel_lvds.o \
 	  intel_bios.o \
+	  intel_ddi.o \
 	  intel_dp.o \
 	  intel_hdmi.o \
 	  intel_sdvo.o \

+ 164 - 115
drivers/gpu/drm/i915/i915_debugfs.c

@@ -47,7 +47,6 @@ enum {
 	FLUSHING_LIST,
 	INACTIVE_LIST,
 	PINNED_LIST,
-	DEFERRED_FREE_LIST,
 };
 
 static const char *yesno(int v)
@@ -178,18 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 		seq_printf(m, "Inactive:\n");
 		head = &dev_priv->mm.inactive_list;
 		break;
-	case PINNED_LIST:
-		seq_printf(m, "Pinned:\n");
-		head = &dev_priv->mm.pinned_list;
-		break;
 	case FLUSHING_LIST:
 		seq_printf(m, "Flushing:\n");
 		head = &dev_priv->mm.flushing_list;
 		break;
-	case DEFERRED_FREE_LIST:
-		seq_printf(m, "Deferred free:\n");
-		head = &dev_priv->mm.deferred_free_list;
-		break;
 	default:
 		mutex_unlock(&dev->struct_mutex);
 		return -EINVAL;
@@ -251,21 +242,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
-	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.pinned_list, mm_list);
-	seq_printf(m, "  %u [%u] pinned objects, %zu [%zu] bytes\n",
-		   count, mappable_count, size, mappable_size);
-
 	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.inactive_list, mm_list);
 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
-	size = count = mappable_size = mappable_count = 0;
-	count_objects(&dev_priv->mm.deferred_free_list, mm_list);
-	seq_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
-		   count, mappable_count, size, mappable_size);
-
 	size = count = mappable_size = mappable_count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 		if (obj->fault_mappable) {
@@ -294,6 +275,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
+	uintptr_t list = (uintptr_t) node->info_ent->data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	size_t total_obj_size, total_gtt_size;
@@ -305,6 +287,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
 
 	total_obj_size = total_gtt_size = count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (list == PINNED_LIST && obj->pin_count == 0)
+			continue;
+
 		seq_printf(m, "   ");
 		describe_obj(m, obj);
 		seq_printf(m, "\n");
@@ -321,7 +306,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
 	return 0;
 }
 
-
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -430,10 +414,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
 	if (ring->get_seqno) {
 		seq_printf(m, "Current sequence (%s): %d\n",
 			   ring->name, ring->get_seqno(ring));
-		seq_printf(m, "Waiter sequence (%s):  %d\n",
-			   ring->name, ring->waiting_seqno);
-		seq_printf(m, "IRQ sequence (%s):     %d\n",
-			   ring->name, ring->irq_seqno);
 	}
 }
 
@@ -602,69 +582,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	return 0;
 }
 
-static int i915_ringbuffer_data(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_ring_buffer *ring;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
-	if (!ring->obj) {
-		seq_printf(m, "No ringbuffer setup\n");
-	} else {
-		const u8 __iomem *virt = ring->virtual_start;
-		uint32_t off;
-
-		for (off = 0; off < ring->size; off += 4) {
-			uint32_t *ptr = (uint32_t *)(virt + off);
-			seq_printf(m, "%08x :  %08x\n", off, *ptr);
-		}
-	}
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-static int i915_ringbuffer_info(struct seq_file *m, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_ring_buffer *ring;
-	int ret;
-
-	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
-	if (ring->size == 0)
-		return 0;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	seq_printf(m, "Ring %s:\n", ring->name);
-	seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
-	seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
-	seq_printf(m, "  Size :    %08x\n", ring->size);
-	seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
-	seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
-	if (IS_GEN6(dev) || IS_GEN7(dev)) {
-		seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
-		seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
-	}
-	seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
-	seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
 static const char *ring_str(int ring)
 {
 	switch (ring) {
@@ -766,31 +683,35 @@ static void i915_ring_error_state(struct seq_file *m,
 			   error->semaphore_mboxes[ring][1]);
 	}
 	seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
+	seq_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
 	seq_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 	seq_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
 }
 
+struct i915_error_state_file_priv {
+	struct drm_device *dev;
+	struct drm_i915_error_state *error;
+};
+
 static int i915_error_state(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct i915_error_state_file_priv *error_priv = m->private;
+	struct drm_device *dev = error_priv->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_error_state *error;
-	unsigned long flags;
+	struct drm_i915_error_state *error = error_priv->error;
+	struct intel_ring_buffer *ring;
 	int i, j, page, offset, elt;
 
-	spin_lock_irqsave(&dev_priv->error_lock, flags);
-	if (!dev_priv->first_error) {
+	if (!error) {
 		seq_printf(m, "no error state collected\n");
-		goto out;
+		return 0;
 	}
 
-	error = dev_priv->first_error;
-
 	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 		   error->time.tv_usec);
 	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
 	seq_printf(m, "EIR: 0x%08x\n", error->eir);
+	seq_printf(m, "IER: 0x%08x\n", error->ier);
 	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -801,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
 		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
-	i915_ring_error_state(m, dev, error, RCS);
-	if (HAS_BLT(dev))
-		i915_ring_error_state(m, dev, error, BCS);
-	if (HAS_BSD(dev))
-		i915_ring_error_state(m, dev, error, VCS);
+	for_each_ring(ring, dev_priv, i)
+		i915_ring_error_state(m, dev, error, i);
 
 	if (error->active_bo)
 		print_error_buffers(m, "Active",
@@ -867,12 +785,71 @@ static int i915_error_state(struct seq_file *m, void *unused)
 	if (error->display)
 		intel_display_print_error_state(m, dev, error->display);
 
-out:
+	return 0;
+}
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+		       const char __user *ubuf,
+		       size_t cnt,
+		       loff_t *ppos)
+{
+	struct seq_file *m = filp->private_data;
+	struct i915_error_state_file_priv *error_priv = m->private;
+	struct drm_device *dev = error_priv->dev;
+
+	DRM_DEBUG_DRIVER("Resetting error state\n");
+
+	mutex_lock(&dev->struct_mutex);
+	i915_destroy_error_state(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct i915_error_state_file_priv *error_priv;
+	unsigned long flags;
+
+	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+	if (!error_priv)
+		return -ENOMEM;
+
+	error_priv->dev = dev;
+
+	spin_lock_irqsave(&dev_priv->error_lock, flags);
+	error_priv->error = dev_priv->first_error;
+	if (error_priv->error)
+		kref_get(&error_priv->error->ref);
 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
-	return 0;
+	return single_open(file, i915_error_state, error_priv);
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *m = file->private_data;
+	struct i915_error_state_file_priv *error_priv = m->private;
+
+	if (error_priv->error)
+		kref_put(&error_priv->error->ref, i915_error_state_free);
+	kfree(error_priv);
+
+	return single_release(inode, file);
 }
 
+static const struct file_operations i915_error_state_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_error_state_open,
+	.read = seq_read,
+	.write = i915_error_state_write,
+	.llseek = default_llseek,
+	.release = i915_error_state_release,
+};
+
 static int i915_rstdby_delays(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1356,17 +1333,25 @@ static int i915_opregion(struct seq_file *m, void *unused)
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_opregion *opregion = &dev_priv->opregion;
+	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
 	int ret;
 
+	if (data == NULL)
+		return -ENOMEM;
+
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
-		return ret;
+		goto out;
 
-	if (opregion->header)
-		seq_write(m, opregion->header, OPREGION_SIZE);
+	if (opregion->header) {
+		memcpy_fromio(data, opregion->header, OPREGION_SIZE);
+		seq_write(m, data, OPREGION_SIZE);
+	}
 
 	mutex_unlock(&dev->struct_mutex);
 
+out:
+	kfree(data);
 	return 0;
 }
 
@@ -1658,6 +1643,65 @@ static const struct file_operations i915_wedged_fops = {
 	.llseek = default_llseek,
 };
 
+static ssize_t
+i915_ring_stop_read(struct file *filp,
+		    char __user *ubuf,
+		    size_t max,
+		    loff_t *ppos)
+{
+	struct drm_device *dev = filp->private_data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	char buf[20];
+	int len;
+
+	len = snprintf(buf, sizeof(buf),
+		       "0x%08x\n", dev_priv->stop_rings);
+
+	if (len > sizeof(buf))
+		len = sizeof(buf);
+
+	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_ring_stop_write(struct file *filp,
+		     const char __user *ubuf,
+		     size_t cnt,
+		     loff_t *ppos)
+{
+	struct drm_device *dev = filp->private_data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	char buf[20];
+	int val = 0;
+
+	if (cnt > 0) {
+		if (cnt > sizeof(buf) - 1)
+			return -EINVAL;
+
+		if (copy_from_user(buf, ubuf, cnt))
+			return -EFAULT;
+		buf[cnt] = 0;
+
+		val = simple_strtoul(buf, NULL, 0);
+	}
+
+	DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+
+	mutex_lock(&dev->struct_mutex);
+	dev_priv->stop_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return cnt;
+}
+
+static const struct file_operations i915_ring_stop_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = i915_ring_stop_read,
+	.write = i915_ring_stop_write,
+	.llseek = default_llseek,
+};
+
 static ssize_t
 i915_max_freq_read(struct file *filp,
 		   char __user *ubuf,
@@ -1900,11 +1944,10 @@ static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
+	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
-	{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
-	{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
 	{"i915_gem_request", i915_gem_request_info, 0},
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -1913,13 +1956,6 @@ static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
-	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
-	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
-	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
-	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
-	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
-	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
-	{"i915_error_state", i915_error_state, 0},
 	{"i915_rstdby_delays", i915_rstdby_delays, 0},
 	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
 	{"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -1965,6 +2001,17 @@ int i915_debugfs_init(struct drm_minor *minor)
 				  &i915_cache_sharing_fops);
 	if (ret)
 		return ret;
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_ring_stop",
+				  &i915_ring_stop_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_error_state",
+				  &i915_error_state_fops);
+	if (ret)
+		return ret;
 
 	return drm_debugfs_create_files(i915_debugfs_list,
 					I915_DEBUGFS_ENTRIES,
@@ -1983,6 +2030,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
 				 1, minor);
 	drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
 				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
+				 1, minor);
 }
 
 #endif /* CONFIG_DEBUG_FS */

File diff suppressed because it is too large
+ 249 - 810
drivers/gpu/drm/i915/i915_dma.c


+ 103 - 62
drivers/gpu/drm/i915/i915_drv.c

@@ -345,6 +345,13 @@ static const struct pci_device_id pciidlist[] = {		/* aka */
 	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
 	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
 	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+	INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
 	{0, 0, 0}
 };
 
@@ -377,18 +384,23 @@ void intel_detect_pch(struct drm_device *dev)
 
 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_IBX;
+				dev_priv->num_pch_pll = 2;
 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_CPT;
+				dev_priv->num_pch_pll = 2;
 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 				/* PantherPoint is CPT compatible */
 				dev_priv->pch_type = PCH_CPT;
+				dev_priv->num_pch_pll = 2;
 				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
+				dev_priv->num_pch_pll = 0;
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
 			}
+			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
 		}
 		pci_dev_put(pch);
 	}
@@ -402,9 +414,11 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
 	if (i915_semaphores >= 0)
 		return i915_semaphores;
 
+#ifdef CONFIG_INTEL_IOMMU
 	/* Enable semaphores on SNB when IO remapping is off */
-	if (INTEL_INFO(dev)->gen == 6)
-		return !intel_iommu_enabled;
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+#endif
 
 	return 1;
 }
@@ -433,7 +447,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
 	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
 		udelay(10);
 
-	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
 	POSTING_READ(FORCEWAKE_MT);
 
 	count = 0;
@@ -475,7 +489,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 
 void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
 	/* The below doubles as a POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 }
@@ -617,15 +631,16 @@ static int i915_drm_thaw(struct drm_device *dev)
 
 	/* KMS EnterVT equivalent */
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_init_pch_refclk(dev);
+
 		mutex_lock(&dev->struct_mutex);
 		dev_priv->mm.suspended = 0;
 
 		error = i915_gem_init_hw(dev);
 		mutex_unlock(&dev->struct_mutex);
 
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_init_pch_refclk(dev);
-
+		intel_modeset_init_hw(dev);
 		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
 
@@ -633,9 +648,6 @@ static int i915_drm_thaw(struct drm_device *dev)
 		mutex_lock(&dev->mode_config.mutex);
 		drm_helper_resume_force_mode(dev);
 		mutex_unlock(&dev->mode_config.mutex);
-
-		if (IS_IRONLAKE_M(dev))
-			ironlake_enable_rc6(dev);
 	}
 
 	intel_opregion_init(dev);
@@ -668,7 +680,7 @@ int i915_resume(struct drm_device *dev)
 	return 0;
 }
 
-static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+static int i8xx_do_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -702,11 +714,12 @@ static int i965_reset_complete(struct drm_device *dev)
 {
 	u8 gdrst;
 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-	return gdrst & 0x1;
+	return (gdrst & GRDOM_RESET_ENABLE) == 0;
 }
 
-static int i965_do_reset(struct drm_device *dev, u8 flags)
+static int i965_do_reset(struct drm_device *dev)
 {
+	int ret;
 	u8 gdrst;
 
 	/*
@@ -715,20 +728,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
 	 * triggers the reset; when done, the hardware will clear it.
 	 */
 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-	pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+	pci_write_config_byte(dev->pdev, I965_GDRST,
+			      gdrst | GRDOM_RENDER |
+			      GRDOM_RESET_ENABLE);
+	ret =  wait_for(i965_reset_complete(dev), 500);
+	if (ret)
+		return ret;
+
+	/* We can't reset render&media without also resetting display ... */
+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+	pci_write_config_byte(dev->pdev, I965_GDRST,
+			      gdrst | GRDOM_MEDIA |
+			      GRDOM_RESET_ENABLE);
 
 	return wait_for(i965_reset_complete(dev), 500);
 }
 
-static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+static int ironlake_do_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
-	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+	u32 gdrst;
+	int ret;
+
+	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+	if (ret)
+		return ret;
+
+	/* We can't reset render&media without also resetting display ... */
+	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
 }
 
-static int gen6_do_reset(struct drm_device *dev, u8 flags)
+static int gen6_do_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int	ret;
@@ -763,10 +799,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
 	return ret;
 }
 
+static int intel_gpu_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = -ENODEV;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+		ret = gen6_do_reset(dev);
+		break;
+	case 5:
+		ret = ironlake_do_reset(dev);
+		break;
+	case 4:
+		ret = i965_do_reset(dev);
+		break;
+	case 2:
+		ret = i8xx_do_reset(dev);
+		break;
+	}
+
+	/* Also reset the gpu hangman. */
+	if (dev_priv->stop_rings) {
+		DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+		dev_priv->stop_rings = 0;
+		if (ret == -ENODEV) {
+			DRM_ERROR("Reset not implemented, but ignoring "
+				  "error for simulated gpu hangs\n");
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
 /**
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
- * @flags: reset domains
  *
  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
  * reset or otherwise an error code.
@@ -779,14 +849,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
  *   - re-init interrupt state
  *   - re-init display
  */
-int i915_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	/*
-	 * We really should only reset the display subsystem if we actually
-	 * need to
-	 */
-	bool need_display = true;
 	int ret;
 
 	if (!i915_try_reset)
@@ -795,26 +860,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
 	if (!mutex_trylock(&dev->struct_mutex))
 		return -EBUSY;
 
+	dev_priv->stop_rings = 0;
+
 	i915_gem_reset(dev);
 
 	ret = -ENODEV;
-	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+	if (get_seconds() - dev_priv->last_gpu_reset < 5)
 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-	} else switch (INTEL_INFO(dev)->gen) {
-	case 7:
-	case 6:
-		ret = gen6_do_reset(dev, flags);
-		break;
-	case 5:
-		ret = ironlake_do_reset(dev, flags);
-		break;
-	case 4:
-		ret = i965_do_reset(dev, flags);
-		break;
-	case 2:
-		ret = i8xx_do_reset(dev, flags);
-		break;
-	}
+	else
+		ret = intel_gpu_reset(dev);
+
 	dev_priv->last_gpu_reset = get_seconds();
 	if (ret) {
 		DRM_ERROR("Failed to reset chip.\n");
@@ -838,15 +893,15 @@ int i915_reset(struct drm_device *dev, u8 flags)
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->mm.suspended) {
+		struct intel_ring_buffer *ring;
+		int i;
+
 		dev_priv->mm.suspended = 0;
 
 		i915_gem_init_swizzling(dev);
 
-		dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
-		if (HAS_BSD(dev))
-		    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
-		if (HAS_BLT(dev))
-		    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+		for_each_ring(ring, dev_priv, i)
+			ring->init(ring);
 
 		i915_gem_init_ppgtt(dev);
 
@@ -856,23 +911,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
 			intel_modeset_init_hw(dev);
 
 		drm_irq_uninstall(dev);
-		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
-
-		mutex_lock(&dev->struct_mutex);
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-	/*
-	 * Perform a full modeset as on later generations, e.g. Ironlake, we may
-	 * need to retrain the display link and cannot just restore the register
-	 * values.
-	 */
-	if (need_display) {
-		mutex_lock(&dev->mode_config.mutex);
-		drm_helper_resume_force_mode(dev);
-		mutex_unlock(&dev->mode_config.mutex);
+	} else {
+		mutex_unlock(&dev->struct_mutex);
 	}
 
 	return 0;

+ 64 - 69
drivers/gpu/drm/i915/i915_drv.h

@@ -39,6 +39,7 @@
 #include <drm/intel-gtt.h>
 #include <linux/backlight.h>
 #include <linux/intel-iommu.h>
+#include <linux/kref.h>
 
 /* General customization:
  */
@@ -78,6 +79,16 @@ enum port {
 
 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
 
+struct intel_pch_pll {
+	int refcount; /* count of number of CRTCs sharing this PLL */
+	int active; /* count of number of active CRTCs (i.e. DPMS on) */
+	bool on; /* is the PLL actually active? Disabled during modeset */
+	int pll_reg;
+	int fp0_reg;
+	int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
 /* Interface history:
  *
  * 1.1: Original.
@@ -122,11 +133,11 @@ struct opregion_asle;
 struct drm_i915_private;
 
 struct intel_opregion {
-	struct opregion_header *header;
-	struct opregion_acpi *acpi;
-	struct opregion_swsci *swsci;
-	struct opregion_asle *asle;
-	void *vbt;
+	struct opregion_header __iomem *header;
+	struct opregion_acpi __iomem *acpi;
+	struct opregion_swsci __iomem *swsci;
+	struct opregion_asle __iomem *asle;
+	void __iomem *vbt;
 	u32 __iomem *lid_state;
 };
 #define OPREGION_SIZE            (8*1024)
@@ -161,8 +172,11 @@ struct sdvo_device_mapping {
 struct intel_display_error_state;
 
 struct drm_i915_error_state {
+	struct kref ref;
 	u32 eir;
 	u32 pgtbl_er;
+	u32 ier;
+	bool waiting[I915_NUM_RINGS];
 	u32 pipestat[I915_MAX_PIPES];
 	u32 tail[I915_NUM_RINGS];
 	u32 head[I915_NUM_RINGS];
@@ -228,11 +242,15 @@ struct drm_i915_display_funcs {
 	void (*update_wm)(struct drm_device *dev);
 	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
 				 uint32_t sprite_width, int pixel_size);
+	void (*sanitize_pm)(struct drm_device *dev);
+	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+				 struct drm_display_mode *mode);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
 			     struct drm_display_mode *mode,
 			     struct drm_display_mode *adjusted_mode,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
+	void (*off)(struct drm_crtc *crtc);
 	void (*write_eld)(struct drm_connector *connector,
 			  struct drm_crtc *crtc);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -328,7 +346,6 @@ typedef struct drm_i915_private {
 
 	const struct intel_device_info *info;
 
-	int has_gem;
 	int relative_constants_mode;
 
 	void __iomem *regs;
@@ -357,7 +374,6 @@ typedef struct drm_i915_private {
 
 	drm_dma_handle_t *status_page_dmah;
 	uint32_t counter;
-	drm_local_map_t hws_map;
 	struct drm_i915_gem_object *pwrctx;
 	struct drm_i915_gem_object *renderctx;
 
@@ -386,22 +402,20 @@ typedef struct drm_i915_private {
 	u32 hotplug_supported_mask;
 	struct work_struct hotplug_work;
 
-	int tex_lru_log_granularity;
-	int allow_batchbuffer;
 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
-	int vblank_pipe;
 	int num_pipe;
+	int num_pch_pll;
 
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 	struct timer_list hangcheck_timer;
 	int hangcheck_count;
-	uint32_t last_acthd;
-	uint32_t last_acthd_bsd;
-	uint32_t last_acthd_blt;
+	uint32_t last_acthd[I915_NUM_RINGS];
 	uint32_t last_instdone;
 	uint32_t last_instdone1;
 
+	unsigned int stop_rings;
+
 	unsigned long cfb_size;
 	unsigned int cfb_fb;
 	enum plane cfb_plane;
@@ -453,6 +467,7 @@ typedef struct drm_i915_private {
 	unsigned int fsb_freq, mem_freq, is_ddr3;
 
 	spinlock_t error_lock;
+	/* Protected by dev->error_lock. */
 	struct drm_i915_error_state *first_error;
 	struct work_struct error_work;
 	struct completion error_completion;
@@ -677,23 +692,9 @@ typedef struct drm_i915_private {
 		 */
 		struct list_head inactive_list;
 
-		/**
-		 * LRU list of objects which are not in the ringbuffer but
-		 * are still pinned in the GTT.
-		 */
-		struct list_head pinned_list;
-
 		/** LRU list of objects with fence regs on them. */
 		struct list_head fence_list;
 
-		/**
-		 * List of objects currently pending being freed.
-		 *
-		 * These objects are no longer in use, but due to a signal
-		 * we were prevented from freeing them at the appointed time.
-		 */
-		struct list_head deferred_free_list;
-
 		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
@@ -742,6 +743,16 @@ typedef struct drm_i915_private {
 		size_t object_memory;
 		u32 object_count;
 	} mm;
+
+	/* Old dri1 support infrastructure, beware the dragons ya fools entering
+	 * here! */
+	struct {
+		unsigned allow_batchbuffer : 1;
+		u32 __iomem *gfx_hws_cpu_addr;
+	} dri1;
+
+	/* Kernel Modesetting */
+
 	struct sdvo_device_mapping sdvo_mappings[2];
 	/* indicate whether the LVDS_BORDER should be enabled or not */
 	unsigned int lvds_border_bits;
@@ -751,7 +762,8 @@ typedef struct drm_i915_private {
 	struct drm_crtc *plane_to_crtc_mapping[3];
 	struct drm_crtc *pipe_to_crtc_mapping[3];
 	wait_queue_head_t pending_flip_queue;
-	bool flip_pending_is_done;
+
+	struct intel_pch_pll pch_plls[I915_NUM_PLLS];
 
 	/* Reclocking support */
 	bool render_reclock_avail;
@@ -806,6 +818,11 @@ typedef struct drm_i915_private {
 	struct drm_property *force_audio_property;
 } drm_i915_private_t;
 
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
 	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
@@ -869,7 +886,14 @@ struct drm_i915_gem_object {
 	 * Current tiling mode for the object.
 	 */
 	unsigned int tiling_mode:2;
-	unsigned int tiling_changed:1;
+	/**
+	 * Whether the tiling parameters for the currently associated fence
+	 * register have changed. Note that for the purposes of tracking
+	 * tiling changes we also treat the unfenced register, the register
+	 * slot that the object occupies whilst it executes a fenced
+	 * command (such as BLT on gen2/3), as a "fence".
+	 */
+	unsigned int fence_dirty:1;
 
 	/** How many users have pinned this object in GTT space. The following
 	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -1116,6 +1140,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
 				/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
 extern void i915_kernel_lost_context(struct drm_device * dev);
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
@@ -1133,7 +1158,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 extern int i915_emit_box(struct drm_device *dev,
 			 struct drm_clip_rect *box,
 			 int DR1, int DR4);
-extern int i915_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1143,19 +1168,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
 void i915_handle_error(struct drm_device *dev, bool wedged);
-extern int i915_irq_emit(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv);
-extern int i915_irq_wait(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv);
 
 extern void intel_irq_init(struct drm_device *dev);
 
-extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-extern int i915_vblank_swap(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv);
+void i915_error_state_free(struct kref *error_ref);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1287,18 +1303,18 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
 					    uint32_t read_domains,
 					    uint32_t write_domain);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_init_ppgtt(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
 int __must_check i915_add_request(struct intel_ring_buffer *ring,
 				  struct drm_file *file,
 				  struct drm_i915_gem_request *request);
 int __must_check i915_wait_request(struct intel_ring_buffer *ring,
-				   uint32_t seqno,
-				   bool do_retire);
+				   uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1349,10 +1365,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
 					  unsigned alignment, bool mappable);
-int __must_check i915_gem_evict_everything(struct drm_device *dev,
-					   bool purgeable_only);
-int __must_check i915_gem_evict_inactive(struct drm_device *dev,
-					 bool purgeable_only);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1467,28 +1484,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
 					    struct intel_display_error_state *error);
 #endif
 
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
-	intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
-	intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
-	intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
-	if (LP_RING(dev->dev_private)->obj == NULL)			\
-		LOCK_TEST_WITH_RETURN(dev, file);			\
-} while (0)
-
 /* On SNB platform, before reading ring registers forcewake bit
  * must be set to prevent GT core from power down and stale values being
  * returned.

+ 224 - 247
drivers/gpu/drm/i915/i915_gem.c

@@ -46,7 +46,6 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
 				struct drm_i915_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
 				struct drm_file *file);
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
 static void i915_gem_write_fence(struct drm_device *dev, int reg,
 				 struct drm_i915_gem_object *obj);
@@ -66,7 +65,7 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 	/* As we do not have an associated fence register, we will force
 	 * a tiling change if we ever need to acquire one.
 	 */
-	obj->tiling_changed = false;
+	obj->fence_dirty = false;
 	obj->fence_reg = I915_FENCE_REG_NONE;
 }
 
@@ -132,7 +131,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-	return obj->gtt_space && !obj->active && obj->pin_count == 0;
+	return !obj->active;
 }
 
 int
@@ -141,6 +140,9 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_init *args = data;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
 	if (args->gtt_start >= args->gtt_end ||
 	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
 		return -EINVAL;
@@ -166,13 +168,11 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_object *obj;
 	size_t pinned;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
-		pinned += obj->gtt_space->size;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+		if (obj->pin_count)
+			pinned += obj->gtt_space->size;
 	mutex_unlock(&dev->struct_mutex);
 
 	args->aper_size = dev_priv->mm.gtt_total;
@@ -243,6 +243,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file)
 {
 	struct drm_i915_gem_create *args = data;
+
 	return i915_gem_create(file, dev,
 			       args->size, &args->handle);
 }
@@ -282,8 +283,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr,
 }
 
 static inline int
-__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
-			  const char *cpu_vaddr,
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+			  const char __user *cpu_vaddr,
 			  int length)
 {
 	int ret, cpu_offset = 0;
@@ -558,11 +559,14 @@ fast_user_write(struct io_mapping *mapping,
 		char __user *user_data,
 		int length)
 {
-	char *vaddr_atomic;
+	void __iomem *vaddr_atomic;
+	void *vaddr;
 	unsigned long unwritten;
 
 	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
-	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+	/* We can use the cpu mem copy function because this is X86. */
+	vaddr = (void __force*)vaddr_atomic + page_offset;
+	unwritten = __copy_from_user_inatomic_nocache(vaddr,
 						      user_data, length);
 	io_mapping_unmap_atomic(vaddr_atomic);
 	return unwritten;
@@ -925,9 +929,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	uint32_t write_domain = args->write_domain;
 	int ret;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
 	/* Only handle setting domains to types used by the CPU. */
 	if (write_domain & I915_GEM_GPU_DOMAINS)
 		return -EINVAL;
@@ -981,9 +982,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
@@ -1019,9 +1017,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	struct drm_gem_object *obj;
 	unsigned long addr;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
 	obj = drm_gem_object_lookup(dev, file, args->handle);
 	if (obj == NULL)
 		return -ENOENT;
@@ -1247,9 +1242,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
@@ -1307,9 +1299,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_mmap_gtt *args = data;
 
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
 	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
@@ -1450,10 +1439,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (obj->pin_count != 0)
-		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
-	else
-		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
 	BUG_ON(!list_empty(&obj->gpu_write_list));
 	BUG_ON(!obj->active);
@@ -1669,10 +1655,11 @@ void i915_gem_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
+	struct intel_ring_buffer *ring;
 	int i;
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_reset_ring_lists(dev_priv, ring);
 
 	/* Remove anything from the flushing lists. The GPU cache is likely
 	 * to be lost on reset along with the data, so simply move the
@@ -1777,24 +1764,11 @@ void
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int i;
 
-	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
-	    struct drm_i915_gem_object *obj, *next;
-
-	    /* We must be careful that during unbind() we do not
-	     * accidentally infinitely recurse into retire requests.
-	     * Currently:
-	     *   retire -> free -> unbind -> wait -> retire_ring
-	     */
-	    list_for_each_entry_safe(obj, next,
-				     &dev_priv->mm.deferred_free_list,
-				     mm_list)
-		    i915_gem_free_object_tail(obj);
-	}
-
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_retire_requests_ring(ring);
 }
 
 static void
@@ -1802,6 +1776,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv;
 	struct drm_device *dev;
+	struct intel_ring_buffer *ring;
 	bool idle;
 	int i;
 
@@ -1821,9 +1796,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	 * objects indefinitely.
 	 */
 	idle = true;
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
+	for_each_ring(ring, dev_priv, i) {
 		if (!list_empty(&ring->gpu_write_list)) {
 			struct drm_i915_gem_request *request;
 			int ret;
@@ -1845,20 +1818,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct intel_ring_buffer *ring,
-		  uint32_t seqno,
-		  bool do_retire)
+static int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv)
 {
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	u32 ier;
-	int ret = 0;
-
-	BUG_ON(seqno == 0);
+	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
 	if (atomic_read(&dev_priv->mm.wedged)) {
 		struct completion *x = &dev_priv->error_completion;
@@ -1873,6 +1836,20 @@ i915_wait_request(struct intel_ring_buffer *ring,
 		return recovery_complete ? -EIO : -EAGAIN;
 	}
 
+	return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+	int ret = 0;
+
+	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
 	if (seqno == ring->outstanding_lazy_request) {
 		struct drm_i915_gem_request *request;
 
@@ -1886,56 +1863,67 @@ i915_wait_request(struct intel_ring_buffer *ring,
 			return ret;
 		}
 
-		seqno = request->seqno;
+		BUG_ON(seqno != request->seqno);
 	}
 
-	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-		if (HAS_PCH_SPLIT(ring->dev))
-			ier = I915_READ(DEIER) | I915_READ(GTIER);
-		else if (IS_VALLEYVIEW(ring->dev))
-			ier = I915_READ(GTIER) | I915_READ(VLV_IER);
-		else
-			ier = I915_READ(IER);
-		if (!ier) {
-			DRM_ERROR("something (likely vbetool) disabled "
-				  "interrupts, re-enabling\n");
-			ring->dev->driver->irq_preinstall(ring->dev);
-			ring->dev->driver->irq_postinstall(ring->dev);
-		}
+	return ret;
+}
 
-		trace_i915_gem_request_wait_begin(ring, seqno);
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+			bool interruptible)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	int ret = 0;
 
-		ring->waiting_seqno = seqno;
-		if (ring->irq_get(ring)) {
-			if (dev_priv->mm.interruptible)
-				ret = wait_event_interruptible(ring->irq_queue,
-							       i915_seqno_passed(ring->get_seqno(ring), seqno)
-							       || atomic_read(&dev_priv->mm.wedged));
-			else
-				wait_event(ring->irq_queue,
-					   i915_seqno_passed(ring->get_seqno(ring), seqno)
-					   || atomic_read(&dev_priv->mm.wedged));
+	if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+		return 0;
 
-			ring->irq_put(ring);
-		} else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
-							     seqno) ||
-					   atomic_read(&dev_priv->mm.wedged), 3000))
-			ret = -EBUSY;
-		ring->waiting_seqno = 0;
+	trace_i915_gem_request_wait_begin(ring, seqno);
+	if (WARN_ON(!ring->irq_get(ring)))
+		return -ENODEV;
 
-		trace_i915_gem_request_wait_end(ring, seqno);
-	}
+#define EXIT_COND \
+	(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
+	atomic_read(&dev_priv->mm.wedged))
+
+	if (interruptible)
+		ret = wait_event_interruptible(ring->irq_queue,
+					       EXIT_COND);
+	else
+		wait_event(ring->irq_queue, EXIT_COND);
+
+	ring->irq_put(ring);
+	trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+	return ret;
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct intel_ring_buffer *ring,
+		  uint32_t seqno)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	int ret = 0;
+
+	BUG_ON(seqno == 0);
+
+	ret = i915_gem_check_wedge(dev_priv);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
 	if (atomic_read(&dev_priv->mm.wedged))
 		ret = -EAGAIN;
 
-	/* Directly dispatch request retiring.  While we have the work queue
-	 * to handle this, the waiter on a request often wants an associated
-	 * buffer to have made it to the inactive list, and we would need
-	 * a separate wait queue to handle that.
-	 */
-	if (ret == 0 && do_retire)
-		i915_gem_retire_requests_ring(ring);
-
 	return ret;
 }
 
@@ -1957,10 +1945,10 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
 	 * it.
 	 */
 	if (obj->active) {
-		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
-					true);
+		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
 		if (ret)
 			return ret;
+		i915_gem_retire_requests_ring(obj->ring);
 	}
 
 	return 0;
@@ -1998,22 +1986,9 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (seqno <= from->sync_seqno[idx])
 		return 0;
 
-	if (seqno == from->outstanding_lazy_request) {
-		struct drm_i915_gem_request *request;
-
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-
-		ret = i915_add_request(from, NULL, request);
-		if (ret) {
-			kfree(request);
-			return ret;
-		}
-
-		seqno = request->seqno;
-	}
-
+	ret = i915_gem_check_olr(obj->ring, seqno);
+	if (ret)
+		return ret;
 
 	ret = to->sync_to(to, from, seqno);
 	if (!ret)
@@ -2064,7 +2039,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	}
 
 	ret = i915_gem_object_finish_gpu(obj);
-	if (ret == -ERESTARTSYS)
+	if (ret)
 		return ret;
 	/* Continue on if we fail due to EIO, the GPU is hung so we
 	 * should be safe and we need to cleanup or else we might
@@ -2091,7 +2066,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 
 	/* release the fence reg _after_ flushing */
 	ret = i915_gem_object_put_fence(obj);
-	if (ret == -ERESTARTSYS)
+	if (ret)
 		return ret;
 
 	trace_i915_gem_object_unbind(obj);
@@ -2143,7 +2118,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
 	return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
 	int ret;
 
@@ -2157,20 +2132,24 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
 			return ret;
 	}
 
-	return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
-				 do_retire);
+	return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
 }
 
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int i915_gpu_idle(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret, i;
 
 	/* Flush everything onto the inactive list. */
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+	for_each_ring(ring, dev_priv, i) {
+		ret = i915_ring_idle(ring);
 		if (ret)
 			return ret;
+
+		/* Is the device fubar? */
+		if (WARN_ON(!list_empty(&ring->gpu_write_list)))
+			return -EBUSY;
 	}
 
 	return 0;
@@ -2357,9 +2336,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
 	}
 
 	if (obj->last_fenced_seqno) {
-		ret = i915_wait_request(obj->ring,
-					obj->last_fenced_seqno,
-					false);
+		ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
 		if (ret)
 			return ret;
 
@@ -2454,7 +2431,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 	/* Have we updated the tiling parameters upon the object and so
 	 * will need to serialise the write to the associated fence register?
 	 */
-	if (obj->tiling_changed) {
+	if (obj->fence_dirty) {
 		ret = i915_gem_object_flush_fence(obj);
 		if (ret)
 			return ret;
@@ -2463,7 +2440,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 	/* Just update our place in the LRU if our fence is getting reused. */
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
 		reg = &dev_priv->fence_regs[obj->fence_reg];
-		if (!obj->tiling_changed) {
+		if (!obj->fence_dirty) {
 			list_move_tail(&reg->lru_list,
 				       &dev_priv->mm.fence_list);
 			return 0;
@@ -2486,7 +2463,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 		return 0;
 
 	i915_gem_object_update_fence(obj, reg, enable);
-	obj->tiling_changed = false;
+	obj->fence_dirty = false;
 
 	return 0;
 }
@@ -2732,6 +2709,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
+	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
@@ -2772,6 +2750,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 					    old_read_domains,
 					    old_write_domain);
 
+	/* And bump the LRU for this access */
+	if (i915_gem_object_is_inactive(obj))
+		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
 	return 0;
 }
 
@@ -3020,28 +3002,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (seqno == 0)
 		return 0;
 
-	ret = 0;
-	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
-		/* And wait for the seqno passing without holding any locks and
-		 * causing extra latency for others. This is safe as the irq
-		 * generation is designed to be run atomically and so is
-		 * lockless.
-		 */
-		if (ring->irq_get(ring)) {
-			ret = wait_event_interruptible(ring->irq_queue,
-						       i915_seqno_passed(ring->get_seqno(ring), seqno)
-						       || atomic_read(&dev_priv->mm.wedged));
-			ring->irq_put(ring);
-
-			if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
-				ret = -EIO;
-		} else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
-							     seqno) ||
-				    atomic_read(&dev_priv->mm.wedged), 3000)) {
-			ret = -EBUSY;
-		}
-	}
-
+	ret = __wait_seqno(ring, seqno, true);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -3053,12 +3014,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    uint32_t alignment,
 		    bool map_and_fenceable)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
 	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
-	WARN_ON(i915_verify_lists(dev));
 
 	if (obj->gtt_space != NULL) {
 		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3086,34 +3044,20 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	if (!obj->has_global_gtt_mapping && map_and_fenceable)
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 
-	if (obj->pin_count++ == 0) {
-		if (!obj->active)
-			list_move_tail(&obj->mm_list,
-				       &dev_priv->mm.pinned_list);
-	}
+	obj->pin_count++;
 	obj->pin_mappable |= map_and_fenceable;
 
-	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
 void
 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	WARN_ON(i915_verify_lists(dev));
 	BUG_ON(obj->pin_count == 0);
 	BUG_ON(obj->gtt_space == NULL);
 
-	if (--obj->pin_count == 0) {
-		if (!obj->active)
-			list_move_tail(&obj->mm_list,
-				       &dev_priv->mm.inactive_list);
+	if (--obj->pin_count == 0)
 		obj->pin_mappable = false;
-	}
-	WARN_ON(i915_verify_lists(dev));
 }
 
 int
@@ -3237,20 +3181,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
 			ret = i915_gem_flush_ring(obj->ring,
 						  0, obj->base.write_domain);
-		} else if (obj->ring->outstanding_lazy_request ==
-			   obj->last_rendering_seqno) {
-			struct drm_i915_gem_request *request;
-
-			/* This ring is not being cleared by active usage,
-			 * so emit a request to do so.
-			 */
-			request = kzalloc(sizeof(*request), GFP_KERNEL);
-			if (request) {
-				ret = i915_add_request(obj->ring, NULL, request);
-				if (ret)
-					kfree(request);
-			} else
-				ret = -ENOMEM;
+		} else {
+			ret = i915_gem_check_olr(obj->ring,
+						 obj->last_rendering_seqno);
 		}
 
 		/* Update the active list for the hardware's current position.
@@ -3386,21 +3319,29 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 	return 0;
 }
 
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
-
-	ret = i915_gem_object_unbind(obj);
-	if (ret == -ERESTARTSYS) {
-		list_move(&obj->mm_list,
-			  &dev_priv->mm.deferred_free_list);
-		return;
-	}
 
 	trace_i915_gem_object_destroy(obj);
 
+	if (obj->phys_obj)
+		i915_gem_detach_phys_object(dev, obj);
+
+	obj->pin_count = 0;
+	if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
+		bool was_interruptible;
+
+		was_interruptible = dev_priv->mm.interruptible;
+		dev_priv->mm.interruptible = false;
+
+		WARN_ON(i915_gem_object_unbind(obj));
+
+		dev_priv->mm.interruptible = was_interruptible;
+	}
+
 	if (obj->base.map_list.map)
 		drm_gem_free_mmap_offset(&obj->base);
 
@@ -3411,20 +3352,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
 	kfree(obj);
 }
 
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
-	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-	struct drm_device *dev = obj->base.dev;
-
-	while (obj->pin_count > 0)
-		i915_gem_object_unpin(obj);
-
-	if (obj->phys_obj)
-		i915_gem_detach_phys_object(dev, obj);
-
-	i915_gem_free_object_tail(obj);
-}
-
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -3438,20 +3365,16 @@ i915_gem_idle(struct drm_device *dev)
 		return 0;
 	}
 
-	ret = i915_gpu_idle(dev, true);
+	ret = i915_gpu_idle(dev);
 	if (ret) {
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
+	i915_gem_retire_requests(dev);
 
 	/* Under UMS, be paranoid and evict. */
-	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_gem_evict_inactive(dev, false);
-		if (ret) {
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
-	}
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_gem_evict_everything(dev, false);
 
 	i915_gem_reset_fences(dev);
 
@@ -3489,9 +3412,9 @@ void i915_gem_init_swizzling(struct drm_device *dev)
 
 	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
 	if (IS_GEN6(dev))
-		I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
 	else
-		I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
 }
 
 void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -3540,18 +3463,16 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
 		ecochk = I915_READ(GAM_ECOCHK);
 		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
 				       ECOCHK_PPGTT_CACHE64B);
-		I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 	} else if (INTEL_INFO(dev)->gen >= 7) {
 		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
 		/* GFX_MODE is per-ring on gen7+ */
 	}
 
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		ring = &dev_priv->ring[i];
-
+	for_each_ring(ring, dev_priv, i) {
 		if (INTEL_INFO(dev)->gen >= 7)
 			I915_WRITE(RING_MODE_GEN7(ring),
-				   GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
 		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -3595,14 +3516,80 @@ cleanup_render_ring:
 	return ret;
 }
 
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+	if (i915_enable_ppgtt >= 0)
+		return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+	/* Disable ppgtt on SNB if VT-d is on. */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+#endif
+
+	return true;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long gtt_size, mappable_size;
+	int ret;
+
+	gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+	mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+	mutex_lock(&dev->struct_mutex);
+	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
+		 * aperture accordingly when using aliasing ppgtt. */
+		gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+		i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
+
+		ret = i915_gem_init_aliasing_ppgtt(dev);
+		if (ret) {
+			mutex_unlock(&dev->struct_mutex);
+			return ret;
+		}
+	} else {
+		/* Let GEM Manage all of the aperture.
+		 *
+		 * However, leave one page at the end still bound to the scratch
+		 * page.  There are a number of places where the hardware
+		 * apparently prefetches past the end of the object, and we've
+		 * seen multiple hangs with the GPU head pointer stuck in a
+		 * batchbuffer bound at the last page of the aperture.  One page
+		 * should be enough to keep any prefetching inside of the
+		 * aperture.
+		 */
+		i915_gem_init_global_gtt(dev, 0, mappable_size,
+					 gtt_size);
+	}
+
+	ret = i915_gem_init_hw(dev);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret) {
+		i915_gem_cleanup_aliasing_ppgtt(dev);
+		return ret;
+	}
+
+	/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		dev_priv->dri1.allow_batchbuffer = 1;
+	return 0;
+}
+
 void
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int i;
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+	for_each_ring(ring, dev_priv, i)
+		intel_cleanup_ring_buffer(ring);
 }
 
 int
@@ -3610,7 +3597,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret, i;
+	int ret;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
@@ -3632,10 +3619,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-	for (i = 0; i < I915_NUM_RINGS; i++) {
-		BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
-		BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
-	}
 	mutex_unlock(&dev->struct_mutex);
 
 	ret = drm_irq_install(dev);
@@ -3694,9 +3677,7 @@ i915_gem_load(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev_priv->mm.active_list);
 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
 	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
 	for (i = 0; i < I915_NUM_RINGS; i++)
 		init_ring_lists(&dev_priv->ring[i]);
@@ -3708,12 +3689,8 @@ i915_gem_load(struct drm_device *dev)
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
 	if (IS_GEN3(dev)) {
-		u32 tmp = I915_READ(MI_ARB_STATE);
-		if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
-			/* arb state is a masked write, so set bit + bit in mask */
-			tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
-			I915_WRITE(MI_ARB_STATE, tmp);
-		}
+		I915_WRITE(MI_ARB_STATE,
+			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
 	}
 
 	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -4016,7 +3993,7 @@ rescan:
 		 * This has a dramatic impact to reduce the number of
 		 * OOM-killer events whilst running the GPU aggressively.
 		 */
-		if (i915_gpu_idle(dev, true) == 0)
+		if (i915_gpu_idle(dev) == 0)
 			goto rescan;
 	}
 	mutex_unlock(&dev->struct_mutex);

+ 0 - 16
drivers/gpu/drm/i915/i915_gem_debug.c

@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
 		}
 	}
 
-	list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
-		if (obj->base.dev != dev ||
-		    !atomic_read(&obj->base.refcount.refcount)) {
-			DRM_ERROR("freed pinned %p\n", obj);
-			err++;
-			break;
-		} else if (!obj->pin_count || obj->active ||
-			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
-			DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
-				  obj,
-				  obj->pin_count, obj->active,
-				  obj->base.write_domain);
-			err++;
-		}
-	}
-
 	return warned = err;
 }
 #endif /* WATCH_INACTIVE */

+ 17 - 21
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -35,6 +35,9 @@
 static bool
 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
+	if (obj->pin_count)
+		return false;
+
 	list_add(&obj->exec_list, unwind);
 	return drm_mm_scan_add_block(obj->gtt_space);
 }
@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 	/* Now merge in the soon-to-be-expired objects... */
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		/* Does the object require an outstanding flush? */
-		if (obj->base.write_domain || obj->pin_count)
+		if (obj->base.write_domain)
 			continue;
 
 		if (mark_free(obj, &unwind_list))
@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 
 	/* Finally add anything with a pending flush (in order of retirement) */
 	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
-		if (obj->pin_count)
-			continue;
-
 		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		if (!obj->base.write_domain || obj->pin_count)
+		if (!obj->base.write_domain)
 			continue;
 
 		if (mark_free(obj, &unwind_list))
@@ -166,8 +166,9 @@ int
 i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	struct drm_i915_gem_object *obj, *next;
 	bool lists_empty;
+	int ret;
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
@@ -177,29 +178,24 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 
 	trace_i915_gem_evict_everything(dev, purgeable_only);
 
-	/* Flush everything (on to the inactive lists) and evict */
-	ret = i915_gpu_idle(dev, true);
+	/* The gpu_idle will flush everything in the write domain to the
+	 * active list. Then we must move everything off the active list
+	 * with retire requests.
+	 */
+	ret = i915_gpu_idle(dev);
 	if (ret)
 		return ret;
 
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+	i915_gem_retire_requests(dev);
 
-	return i915_gem_evict_inactive(dev, purgeable_only);
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj, *next;
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 
+	/* Having flushed everything, unbind() should never raise an error */
 	list_for_each_entry_safe(obj, next,
 				 &dev_priv->mm.inactive_list, mm_list) {
 		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
-			int ret = i915_gem_object_unbind(obj);
-			if (ret)
-				return ret;
+			if (obj->pin_count == 0)
+				WARN_ON(i915_gem_object_unbind(obj));
 		}
 	}
 

+ 17 - 10
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -967,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 			obj->pending_gpu_write = true;
 			list_move_tail(&obj->gpu_write_list,
 				       &ring->gpu_write_list);
-			intel_mark_busy(ring->dev, obj);
+			if (obj->pin_count) /* check for potential scanout */
+				intel_mark_busy(ring->dev, obj);
 		}
 
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
+
+	intel_mark_busy(ring->dev, NULL);
 }
 
 static void
@@ -1061,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		ring = &dev_priv->ring[RCS];
 		break;
 	case I915_EXEC_BSD:
-		if (!HAS_BSD(dev)) {
-			DRM_DEBUG("execbuf with invalid ring (BSD)\n");
-			return -EINVAL;
-		}
 		ring = &dev_priv->ring[VCS];
 		break;
 	case I915_EXEC_BLT:
-		if (!HAS_BLT(dev)) {
-			DRM_DEBUG("execbuf with invalid ring (BLT)\n");
-			return -EINVAL;
-		}
 		ring = &dev_priv->ring[BCS];
 		break;
 	default:
@@ -1079,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			  (int)(args->flags & I915_EXEC_RING_MASK));
 		return -EINVAL;
 	}
+	if (!intel_ring_initialized(ring)) {
+		DRM_DEBUG("execbuf with invalid ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		return -EINVAL;
+	}
 
 	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	mask = I915_EXEC_CONSTANTS_MASK;
@@ -1116,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			return -EINVAL;
 		}
 
+		if (INTEL_INFO(dev)->gen >= 5) {
+			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+			return -EINVAL;
+		}
+
 		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
 			DRM_DEBUG("execbuf with %u cliprects\n",
 				  args->num_cliprects);
 			return -EINVAL;
 		}
+
 		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
 				    GFP_KERNEL);
 		if (cliprects == NULL) {
@@ -1225,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			 * so every billion or so execbuffers, we need to stall
 			 * the GPU in order to reset the counters.
 			 */
-			ret = i915_gpu_idle(dev, true);
+			ret = i915_gpu_idle(dev);
 			if (ret)
 				goto err;
+			i915_gem_retire_requests(dev);
 
 			BUG_ON(ring->sync_seqno[i]);
 		}

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -317,7 +317,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
 	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
 		dev_priv->mm.interruptible = false;
-		if (i915_gpu_idle(dev_priv->dev, false)) {
+		if (i915_gpu_idle(dev_priv->dev)) {
 			DRM_ERROR("Couldn't idle GPU\n");
 			/* Wait a bit, in hopes it avoids the hang */
 			udelay(10);

+ 202 - 0
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+#define PTE_ADDRESS_MASK		0xfffff000
+#define PTE_ADDRESS_MASK_HIGH		0x000000f0 /* i915+ */
+#define PTE_MAPPING_TYPE_UNCACHED	(0 << 1)
+#define PTE_MAPPING_TYPE_DCACHE		(1 << 1) /* i830 only */
+#define PTE_MAPPING_TYPE_CACHED		(3 << 1)
+#define PTE_MAPPING_TYPE_MASK		(3 << 1)
+#define PTE_VALID			(1 << 0)
+
+/**
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ *                       a physical one
+ * @dev: drm device
+ * @offset: address to translate
+ *
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
+ */
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev_priv->bridge_dev;
+	u32 base;
+
+#if 0
+	/* On the machines I have tested the Graphics Base of Stolen Memory
+	 * is unreliable, so compute the base by subtracting the stolen memory
+	 * from the Top of Low Usable DRAM which is where the BIOS places
+	 * the graphics stolen memory.
+	 */
+	if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+		/* top 32bits are reserved = 0 */
+		pci_read_config_dword(pdev, 0xA4, &base);
+	} else {
+		/* XXX presume 8xx is the same as i915 */
+		pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+	}
+#else
+	if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+		u16 val;
+		pci_read_config_word(pdev, 0xb0, &val);
+		base = val >> 4 << 20;
+	} else {
+		u8 val;
+		pci_read_config_byte(pdev, 0x9c, &val);
+		base = val >> 3 << 27;
+	}
+	base -= dev_priv->mm.gtt->stolen_size;
+#endif
+
+	return base + offset;
+}
+
+static void i915_warn_stolen(struct drm_device *dev)
+{
+	DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
+	DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+}
+
+static void i915_setup_compression(struct drm_device *dev, int size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+	unsigned long cfb_base;
+	unsigned long ll_base = 0;
+
+	/* Just in case the BIOS is doing something questionable. */
+	intel_disable_fbc(dev);
+
+	compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+	if (compressed_fb)
+		compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+	if (!compressed_fb)
+		goto err;
+
+	cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+	if (!cfb_base)
+		goto err_fb;
+
+	if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+		compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+						    4096, 4096, 0);
+		if (compressed_llb)
+			compressed_llb = drm_mm_get_block(compressed_llb,
+							  4096, 4096);
+		if (!compressed_llb)
+			goto err_fb;
+
+		ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+		if (!ll_base)
+			goto err_llb;
+	}
+
+	dev_priv->cfb_size = size;
+
+	dev_priv->compressed_fb = compressed_fb;
+	if (HAS_PCH_SPLIT(dev))
+		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+	else if (IS_GM45(dev)) {
+		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+	} else {
+		I915_WRITE(FBC_CFB_BASE, cfb_base);
+		I915_WRITE(FBC_LL_BASE, ll_base);
+		dev_priv->compressed_llb = compressed_llb;
+	}
+
+	DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+		      cfb_base, ll_base, size >> 20);
+	return;
+
+err_llb:
+	drm_mm_put_block(compressed_llb);
+err_fb:
+	drm_mm_put_block(compressed_fb);
+err:
+	dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+	i915_warn_stolen(dev);
+}
+
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	drm_mm_put_block(dev_priv->compressed_fb);
+	if (dev_priv->compressed_llb)
+		drm_mm_put_block(dev_priv->compressed_llb);
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+	if (I915_HAS_FBC(dev) && i915_powersave)
+		i915_cleanup_compression(dev);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+	/* Basic memrange allocator for stolen space */
+	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+	/* Try to set up FBC with a reasonable compressed buffer size */
+	if (I915_HAS_FBC(dev) && i915_powersave) {
+		int cfb_size;
+
+		/* Leave 1M for line length buffer & misc. */
+
+		/* Try to get a 32M buffer... */
+		if (prealloc_size > (36*1024*1024))
+			cfb_size = 32*1024*1024;
+		else /* fall back to 7/8 of the stolen space */
+			cfb_size = prealloc_size * 7 / 8;
+		i915_setup_compression(dev, cfb_size);
+	}
+
+	return 0;
+}

+ 15 - 3
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 		/* We need to rebind the object if its current allocation
 		 * no longer meets the alignment restrictions for its new
 		 * tiling mode. Otherwise we can just leave it alone, but
-		 * need to ensure that any fence register is cleared.
+		 * need to ensure that any fence register is updated before
+		 * the next fenced (either through the GTT or by the BLT unit
+		 * on older GPUs) access.
+		 *
+		 * After updating the tiling parameters, we then flag whether
+		 * we need to update an associated fence register. Note this
+		 * has to also include the unfenced register the GPU uses
+		 * whilst executing a fenced command for an untiled object.
 		 */
-		i915_gem_release_mmap(obj);
 
 		obj->map_and_fenceable =
 			obj->gtt_space == NULL ||
@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 		}
 
 		if (ret == 0) {
-			obj->tiling_changed = true;
+			obj->fence_dirty =
+				obj->fenced_gpu_access ||
+				obj->fence_reg != I915_FENCE_REG_NONE;
+
 			obj->tiling_mode = args->tiling_mode;
 			obj->stride = args->stride;
+
+			/* Force the fence to be reacquired for GTT access */
+			i915_gem_release_mmap(obj);
 		}
 	}
 	/* we have to maintain this existing ABI... */

File diff suppressed because it is too large
+ 187 - 520
drivers/gpu/drm/i915/i915_irq.c


+ 57 - 13
drivers/gpu/drm/i915/i915_reg.h

@@ -29,6 +29,9 @@
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
 /*
  * The Bridge device's PCI config space has information about the
  * fb aperture size and the amount of pre-reserved memory.
@@ -79,6 +82,7 @@
 #define  GRDOM_FULL	(0<<2)
 #define  GRDOM_RENDER	(1<<2)
 #define  GRDOM_MEDIA	(3<<2)
+#define  GRDOM_RESET_ENABLE (1<<0)
 
 #define GEN6_MBCUNIT_SNPCR	0x900c /* for LLC config */
 #define   GEN6_MBC_SNPCR_SHIFT	21
@@ -425,8 +429,6 @@
 #define ARB_MODE		0x04030
 #define   ARB_MODE_SWIZZLE_SNB	(1<<4)
 #define   ARB_MODE_SWIZZLE_IVB	(1<<5)
-#define   ARB_MODE_ENABLE(x)	GFX_MODE_ENABLE(x)
-#define   ARB_MODE_DISABLE(x)	GFX_MODE_DISABLE(x)
 #define RENDER_HWS_PGA_GEN7	(0x04080)
 #define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
 #define DONE_REG		0x40b0
@@ -514,9 +516,6 @@
 #define   GFX_PSMI_GRANULARITY		(1<<10)
 #define   GFX_PPGTT_ENABLE		(1<<9)
 
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
-
 #define SCPD0		0x0209c /* 915+ only */
 #define IER		0x020a0
 #define IIR		0x020a4
@@ -572,7 +571,6 @@
 #define LM_BURST_LENGTH     0x00000700
 #define LM_FIFO_WATERMARK   0x0000001F
 #define MI_ARB_STATE	0x020e4 /* 915+ only */
-#define   MI_ARB_MASK_SHIFT	  16	/* shift for enable bits */
 
 /* Make render/texture TLB fetches lower priorty than associated data
  *   fetches. This is not turned on by default
@@ -637,7 +635,6 @@
 #define   MI_ARB_DISPLAY_PRIORITY_B_A		(1 << 0)	/* display B > display A */
 
 #define CACHE_MODE_0	0x02120 /* 915+ only */
-#define   CM0_MASK_SHIFT          16
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 #define   CM0_ZR_OPT_DISABLE      (1<<5)
 #define	  CM0_STC_EVICT_DISABLE_LRA_SNB	(1<<5)
@@ -1700,9 +1697,12 @@
 /* Video Data Island Packet control */
 #define VIDEO_DIP_DATA		0x61178
 #define VIDEO_DIP_CTL		0x61170
+/* Pre HSW: */
 #define   VIDEO_DIP_ENABLE		(1 << 31)
 #define   VIDEO_DIP_PORT_B		(1 << 29)
 #define   VIDEO_DIP_PORT_C		(2 << 29)
+#define   VIDEO_DIP_PORT_D		(3 << 29)
+#define   VIDEO_DIP_PORT_MASK		(3 << 29)
 #define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
 #define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
 #define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
@@ -1713,6 +1713,10 @@
 #define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
 #define   VIDEO_DIP_FREQ_VSYNC		(1 << 16)
 #define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
+#define   VIDEO_DIP_FREQ_MASK		(3 << 16)
+/* HSW and later: */
+#define   VIDEO_DIP_ENABLE_AVI_HSW	(1 << 12)
+#define   VIDEO_DIP_ENABLE_SPD_HSW	(1 << 0)
 
 /* Panel power sequencing */
 #define PP_STATUS	0x61200
@@ -2479,7 +2483,8 @@
 
 /* Pipe A */
 #define _PIPEADSL		0x70000
-#define   DSL_LINEMASK		0x00000fff
+#define   DSL_LINEMASK_GEN2	0x00000fff
+#define   DSL_LINEMASK_GEN3	0x00001fff
 #define _PIPEACONF		0x70008
 #define   PIPECONF_ENABLE	(1<<31)
 #define   PIPECONF_DISABLE	0
@@ -3224,11 +3229,14 @@
 #define DE_PCH_EVENT_IVB		(1<<28)
 #define DE_DP_A_HOTPLUG_IVB		(1<<27)
 #define DE_AUX_CHANNEL_A_IVB		(1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB	(1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB		(1<<13)
+#define DE_PIPEC_VBLANK_IVB		(1<<10)
 #define DE_SPRITEB_FLIP_DONE_IVB	(1<<9)
-#define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
 #define DE_PLANEB_FLIP_DONE_IVB		(1<<8)
-#define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
 #define DE_PIPEB_VBLANK_IVB		(1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
 #define DE_PIPEA_VBLANK_IVB		(1<<0)
 
 #define VLV_MASTER_IER			0x4400c /* Gunit master IER */
@@ -3402,15 +3410,15 @@
 
 #define _PCH_DPLL_A              0xc6014
 #define _PCH_DPLL_B              0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ?  _PCH_DPLL_A : _PCH_DPLL_B)
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
 #define _PCH_FPA0                0xc6040
 #define  FP_CB_TUNE		(0x3<<22)
 #define _PCH_FPA1                0xc6044
 #define _PCH_FPB0                0xc6048
 #define _PCH_FPB1                0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
 #define PCH_DPLL_TEST           0xc606c
 
@@ -3520,6 +3528,42 @@
 #define VLV_TVIDEO_DIP_GCP(pipe) \
 	_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
 
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A		0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A	0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A		0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A	0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A	0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A	0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A		0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A		0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A		0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A		0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A		0x60344
+#define HSW_VIDEO_DIP_GCP_A		0x60210
+
+#define HSW_VIDEO_DIP_CTL_B		0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B	0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B		0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B	0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B	0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B	0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B		0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B		0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B		0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B		0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B		0x61344
+#define HSW_VIDEO_DIP_GCP_B		0x61210
+
+#define HSW_TVIDEO_DIP_CTL(pipe) \
+	 _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
+	 _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
+	 _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(pipe) \
+	_PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+
 #define _TRANS_HTOTAL_B          0xe1000
 #define _TRANS_HBLANK_B          0xe1004
 #define _TRANS_HSYNC_B           0xe1008

+ 1 - 7
drivers/gpu/drm/i915/i915_suspend.c

@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
 		return false;
 
 	if (HAS_PCH_SPLIT(dev))
-		dpll_reg = PCH_DPLL(pipe);
+		dpll_reg = _PCH_DPLL(pipe);
 	else
 		dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
 
@@ -876,12 +876,6 @@ int i915_restore_state(struct drm_device *dev)
 		I915_WRITE(IER, dev_priv->saveIER);
 		I915_WRITE(IMR, dev_priv->saveIMR);
 	}
-	mutex_unlock(&dev->struct_mutex);
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		intel_modeset_init_hw(dev);
-
-	mutex_lock(&dev->struct_mutex);
 
 	/* Cache mode state */
 	I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);

+ 5 - 1
drivers/gpu/drm/i915/intel_crt.c

@@ -615,7 +615,11 @@ void intel_crt_init(struct drm_device *dev)
 	crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
 				1 << INTEL_ANALOG_CLONE_BIT |
 				1 << INTEL_SDVO_LVDS_CLONE_BIT);
-	crt->base.crtc_mask = (1 << 0) | (1 << 1);
+	if (IS_HASWELL(dev))
+		crt->base.crtc_mask = (1 << 0);
+	else
+		crt->base.crtc_mask = (1 << 0) | (1 << 1);
+
 	if (IS_GEN2(dev))
 		connector->interlace_allowed = 0;
 	else

+ 755 - 0
drivers/gpu/drm/i915/intel_ddi.c

@@ -0,0 +1,755 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+	0x00FFFFFF, 0x0006000E,		/* DP parameters */
+	0x00D75FFF, 0x0005000A,
+	0x00C30FFF, 0x00040006,
+	0x80AAAFFF, 0x000B0000,
+	0x00FFFFFF, 0x0005000A,
+	0x00D75FFF, 0x000C0004,
+	0x80C30FFF, 0x000B0000,
+	0x00FFFFFF, 0x00040006,
+	0x80D75FFF, 0x000B0000,
+	0x00FFFFFF, 0x00040006		/* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+	0x00FFFFFF, 0x0007000E,		/* FDI parameters */
+	0x00D75FFF, 0x000F000A,
+	0x00C30FFF, 0x00060006,
+	0x00AAAFFF, 0x001E0000,
+	0x00FFFFFF, 0x000F000A,
+	0x00D75FFF, 0x00160004,
+	0x00C30FFF, 0x001E0000,
+	0x00FFFFFF, 0x00060006,
+	0x00D75FFF, 0x001E0000,
+	0x00FFFFFF, 0x00040006		/* HDMI parameters */
+};
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg;
+	int i;
+	const u32 *ddi_translations = ((use_fdi_mode) ?
+		hsw_ddi_translations_fdi :
+		hsw_ddi_translations_dp);
+
+	DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+			port_name(port),
+			use_fdi_mode ? "FDI" : "DP");
+
+	WARN((use_fdi_mode && (port != PORT_E)),
+		"Programming port %c in FDI mode, this probably will not work.\n",
+		port_name(port));
+
+	for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+		I915_WRITE(reg, ddi_translations[i]);
+		reg += 4;
+	}
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+	int port;
+
+	if (IS_HASWELL(dev)) {
+		for (port = PORT_A; port < PORT_E; port++)
+			intel_prepare_ddi_buffers(dev, port, false);
+
+		/* DDI E is the suggested one to work in FDI mode, so program is as such by
+		 * default. It will have to be re-programmed in case a digital DP output
+		 * will be detected on it
+		 */
+		intel_prepare_ddi_buffers(dev, PORT_E, true);
+	}
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+	DDI_BUF_EMP_400MV_0DB_HSW,
+	DDI_BUF_EMP_400MV_3_5DB_HSW,
+	DDI_BUF_EMP_400MV_6DB_HSW,
+	DDI_BUF_EMP_400MV_9_5DB_HSW,
+	DDI_BUF_EMP_600MV_0DB_HSW,
+	DDI_BUF_EMP_600MV_3_5DB_HSW,
+	DDI_BUF_EMP_600MV_6DB_HSW,
+	DDI_BUF_EMP_800MV_0DB_HSW,
+	DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp, i;
+
+	/* Configure CPU PLL, wait for warmup */
+	I915_WRITE(SPLL_CTL,
+			SPLL_PLL_ENABLE |
+			SPLL_PLL_FREQ_1350MHz |
+			SPLL_PLL_SCC);
+
+	/* Use SPLL to drive the output when in FDI mode */
+	I915_WRITE(PORT_CLK_SEL(PORT_E),
+			PORT_CLK_SEL_SPLL);
+	I915_WRITE(PIPE_CLK_SEL(pipe),
+			PIPE_CLK_SEL_PORT(PORT_E));
+
+	udelay(20);
+
+	/* Start the training iterating through available voltages and emphasis */
+	for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+		/* Configure DP_TP_CTL with auto-training */
+		I915_WRITE(DP_TP_CTL(PORT_E),
+					DP_TP_CTL_FDI_AUTOTRAIN |
+					DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+					DP_TP_CTL_LINK_TRAIN_PAT1 |
+					DP_TP_CTL_ENABLE);
+
+		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+		temp = I915_READ(DDI_BUF_CTL(PORT_E));
+		temp = (temp & ~DDI_BUF_EMP_MASK);
+		I915_WRITE(DDI_BUF_CTL(PORT_E),
+				temp |
+				DDI_BUF_CTL_ENABLE |
+				DDI_PORT_WIDTH_X2 |
+				hsw_ddi_buf_ctl_values[i]);
+
+		udelay(600);
+
+		/* Enable CPU FDI Receiver with auto-training */
+		reg = FDI_RX_CTL(pipe);
+		I915_WRITE(reg,
+				I915_READ(reg) |
+					FDI_LINK_TRAIN_AUTO |
+					FDI_RX_ENABLE |
+					FDI_LINK_TRAIN_PATTERN_1_CPT |
+					FDI_RX_ENHANCE_FRAME_ENABLE |
+					FDI_PORT_WIDTH_2X_LPT |
+					FDI_RX_PLL_ENABLE);
+		POSTING_READ(reg);
+		udelay(100);
+
+		temp = I915_READ(DP_TP_STATUS(PORT_E));
+		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+			DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+
+			/* Enable normal pixel sending for FDI */
+			I915_WRITE(DP_TP_CTL(PORT_E),
+						DP_TP_CTL_FDI_AUTOTRAIN |
+						DP_TP_CTL_LINK_TRAIN_NORMAL |
+						DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+						DP_TP_CTL_ENABLE);
+
+			/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
+			temp = I915_READ(DDI_FUNC_CTL(pipe));
+			temp &= ~PIPE_DDI_PORT_MASK;
+			temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
+					PIPE_DDI_MODE_SELECT_FDI |
+					PIPE_DDI_FUNC_ENABLE |
+					PIPE_DDI_PORT_WIDTH_X2;
+			I915_WRITE(DDI_FUNC_CTL(pipe),
+					temp);
+			break;
+		} else {
+			DRM_ERROR("Error training BUF_CTL %d\n", i);
+
+			/* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
+			I915_WRITE(DP_TP_CTL(PORT_E),
+					I915_READ(DP_TP_CTL(PORT_E)) &
+						~DP_TP_CTL_ENABLE);
+			I915_WRITE(FDI_RX_CTL(pipe),
+					I915_READ(FDI_RX_CTL(pipe)) &
+						~FDI_RX_PLL_ENABLE);
+			continue;
+		}
+	}
+
+	DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* For DDI connections, it is possible to support different outputs over the
+ * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
+ * the time the output is detected what exactly is on the other end of it. This
+ * function aims at providing support for this detection and proper output
+ * configuration.
+ */
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+	/* For now, we don't do any proper output detection and assume that we
+	 * handle HDMI only */
+
+	switch(port){
+	case PORT_A:
+		/* We don't handle eDP and DP yet */
+		DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
+		break;
+	/* Assume that the  ports B, C and D are working in HDMI mode for now */
+	case PORT_B:
+	case PORT_C:
+	case PORT_D:
+		intel_hdmi_init(dev, DDI_BUF_CTL(port));
+		break;
+	default:
+		DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
+				port);
+		break;
+	}
+}
+
+/* WRPLL clock dividers */
+struct wrpll_tmds_clock {
+	u32 clock;
+	u16 p;		/* Post divider */
+	u16 n2;		/* Feedback divider */
+	u16 r2;		/* Reference divider */
+};
+
+/* Table of matching values for WRPLL clocks programming for each frequency */
+static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
+	{19750,	38,	25,	18},
+	{20000,	48,	32,	18},
+	{21000,	36,	21,	15},
+	{21912,	42,	29,	17},
+	{22000,	36,	22,	15},
+	{23000,	36,	23,	15},
+	{23500,	40,	40,	23},
+	{23750,	26,	16,	14},
+	{23750,	26,	16,	14},
+	{24000,	36,	24,	15},
+	{25000,	36,	25,	15},
+	{25175,	26,	40,	33},
+	{25200,	30,	21,	15},
+	{26000,	36,	26,	15},
+	{27000,	30,	21,	14},
+	{27027,	18,	100,	111},
+	{27500,	30,	29,	19},
+	{28000,	34,	30,	17},
+	{28320,	26,	30,	22},
+	{28322,	32,	42,	25},
+	{28750,	24,	23,	18},
+	{29000,	30,	29,	18},
+	{29750,	32,	30,	17},
+	{30000,	30,	25,	15},
+	{30750,	30,	41,	24},
+	{31000,	30,	31,	18},
+	{31500,	30,	28,	16},
+	{32000,	30,	32,	18},
+	{32500,	28,	32,	19},
+	{33000,	24,	22,	15},
+	{34000,	28,	30,	17},
+	{35000,	26,	32,	19},
+	{35500,	24,	30,	19},
+	{36000,	26,	26,	15},
+	{36750,	26,	46,	26},
+	{37000,	24,	23,	14},
+	{37762,	22,	40,	26},
+	{37800,	20,	21,	15},
+	{38000,	24,	27,	16},
+	{38250,	24,	34,	20},
+	{39000,	24,	26,	15},
+	{40000,	24,	32,	18},
+	{40500,	20,	21,	14},
+	{40541,	22,	147,	89},
+	{40750,	18,	19,	14},
+	{41000,	16,	17,	14},
+	{41500,	22,	44,	26},
+	{41540,	22,	44,	26},
+	{42000,	18,	21,	15},
+	{42500,	22,	45,	26},
+	{43000,	20,	43,	27},
+	{43163,	20,	24,	15},
+	{44000,	18,	22,	15},
+	{44900,	20,	108,	65},
+	{45000,	20,	25,	15},
+	{45250,	20,	52,	31},
+	{46000,	18,	23,	15},
+	{46750,	20,	45,	26},
+	{47000,	20,	40,	23},
+	{48000,	18,	24,	15},
+	{49000,	18,	49,	30},
+	{49500,	16,	22,	15},
+	{50000,	18,	25,	15},
+	{50500,	18,	32,	19},
+	{51000,	18,	34,	20},
+	{52000,	18,	26,	15},
+	{52406,	14,	34,	25},
+	{53000,	16,	22,	14},
+	{54000,	16,	24,	15},
+	{54054,	16,	173,	108},
+	{54500,	14,	24,	17},
+	{55000,	12,	22,	18},
+	{56000,	14,	45,	31},
+	{56250,	16,	25,	15},
+	{56750,	14,	25,	17},
+	{57000,	16,	27,	16},
+	{58000,	16,	43,	25},
+	{58250,	16,	38,	22},
+	{58750,	16,	40,	23},
+	{59000,	14,	26,	17},
+	{59341,	14,	40,	26},
+	{59400,	16,	44,	25},
+	{60000,	16,	32,	18},
+	{60500,	12,	39,	29},
+	{61000,	14,	49,	31},
+	{62000,	14,	37,	23},
+	{62250,	14,	42,	26},
+	{63000,	12,	21,	15},
+	{63500,	14,	28,	17},
+	{64000,	12,	27,	19},
+	{65000,	14,	32,	19},
+	{65250,	12,	29,	20},
+	{65500,	12,	32,	22},
+	{66000,	12,	22,	15},
+	{66667,	14,	38,	22},
+	{66750,	10,	21,	17},
+	{67000,	14,	33,	19},
+	{67750,	14,	58,	33},
+	{68000,	14,	30,	17},
+	{68179,	14,	46,	26},
+	{68250,	14,	46,	26},
+	{69000,	12,	23,	15},
+	{70000,	12,	28,	18},
+	{71000,	12,	30,	19},
+	{72000,	12,	24,	15},
+	{73000,	10,	23,	17},
+	{74000,	12,	23,	14},
+	{74176,	8,	100,	91},
+	{74250,	10,	22,	16},
+	{74481,	12,	43,	26},
+	{74500,	10,	29,	21},
+	{75000,	12,	25,	15},
+	{75250,	10,	39,	28},
+	{76000,	12,	27,	16},
+	{77000,	12,	53,	31},
+	{78000,	12,	26,	15},
+	{78750,	12,	28,	16},
+	{79000,	10,	38,	26},
+	{79500,	10,	28,	19},
+	{80000,	12,	32,	18},
+	{81000,	10,	21,	14},
+	{81081,	6,	100,	111},
+	{81624,	8,	29,	24},
+	{82000,	8,	17,	14},
+	{83000,	10,	40,	26},
+	{83950,	10,	28,	18},
+	{84000,	10,	28,	18},
+	{84750,	6,	16,	17},
+	{85000,	6,	17,	18},
+	{85250,	10,	30,	19},
+	{85750,	10,	27,	17},
+	{86000,	10,	43,	27},
+	{87000,	10,	29,	18},
+	{88000,	10,	44,	27},
+	{88500,	10,	41,	25},
+	{89000,	10,	28,	17},
+	{89012,	6,	90,	91},
+	{89100,	10,	33,	20},
+	{90000,	10,	25,	15},
+	{91000,	10,	32,	19},
+	{92000,	10,	46,	27},
+	{93000,	10,	31,	18},
+	{94000,	10,	40,	23},
+	{94500,	10,	28,	16},
+	{95000,	10,	44,	25},
+	{95654,	10,	39,	22},
+	{95750,	10,	39,	22},
+	{96000,	10,	32,	18},
+	{97000,	8,	23,	16},
+	{97750,	8,	42,	29},
+	{98000,	8,	45,	31},
+	{99000,	8,	22,	15},
+	{99750,	8,	34,	23},
+	{100000,	6,	20,	18},
+	{100500,	6,	19,	17},
+	{101000,	6,	37,	33},
+	{101250,	8,	21,	14},
+	{102000,	6,	17,	15},
+	{102250,	6,	25,	22},
+	{103000,	8,	29,	19},
+	{104000,	8,	37,	24},
+	{105000,	8,	28,	18},
+	{106000,	8,	22,	14},
+	{107000,	8,	46,	29},
+	{107214,	8,	27,	17},
+	{108000,	8,	24,	15},
+	{108108,	8,	173,	108},
+	{109000,	6,	23,	19},
+	{109000,	6,	23,	19},
+	{110000,	6,	22,	18},
+	{110013,	6,	22,	18},
+	{110250,	8,	49,	30},
+	{110500,	8,	36,	22},
+	{111000,	8,	23,	14},
+	{111264,	8,	150,	91},
+	{111375,	8,	33,	20},
+	{112000,	8,	63,	38},
+	{112500,	8,	25,	15},
+	{113100,	8,	57,	34},
+	{113309,	8,	42,	25},
+	{114000,	8,	27,	16},
+	{115000,	6,	23,	18},
+	{116000,	8,	43,	25},
+	{117000,	8,	26,	15},
+	{117500,	8,	40,	23},
+	{118000,	6,	38,	29},
+	{119000,	8,	30,	17},
+	{119500,	8,	46,	26},
+	{119651,	8,	39,	22},
+	{120000,	8,	32,	18},
+	{121000,	6,	39,	29},
+	{121250,	6,	31,	23},
+	{121750,	6,	23,	17},
+	{122000,	6,	42,	31},
+	{122614,	6,	30,	22},
+	{123000,	6,	41,	30},
+	{123379,	6,	37,	27},
+	{124000,	6,	51,	37},
+	{125000,	6,	25,	18},
+	{125250,	4,	13,	14},
+	{125750,	4,	27,	29},
+	{126000,	6,	21,	15},
+	{127000,	6,	24,	17},
+	{127250,	6,	41,	29},
+	{128000,	6,	27,	19},
+	{129000,	6,	43,	30},
+	{129859,	4,	25,	26},
+	{130000,	6,	26,	18},
+	{130250,	6,	42,	29},
+	{131000,	6,	32,	22},
+	{131500,	6,	38,	26},
+	{131850,	6,	41,	28},
+	{132000,	6,	22,	15},
+	{132750,	6,	28,	19},
+	{133000,	6,	34,	23},
+	{133330,	6,	37,	25},
+	{134000,	6,	61,	41},
+	{135000,	6,	21,	14},
+	{135250,	6,	167,	111},
+	{136000,	6,	62,	41},
+	{137000,	6,	35,	23},
+	{138000,	6,	23,	15},
+	{138500,	6,	40,	26},
+	{138750,	6,	37,	24},
+	{139000,	6,	34,	22},
+	{139050,	6,	34,	22},
+	{139054,	6,	34,	22},
+	{140000,	6,	28,	18},
+	{141000,	6,	36,	23},
+	{141500,	6,	22,	14},
+	{142000,	6,	30,	19},
+	{143000,	6,	27,	17},
+	{143472,	4,	17,	16},
+	{144000,	6,	24,	15},
+	{145000,	6,	29,	18},
+	{146000,	6,	47,	29},
+	{146250,	6,	26,	16},
+	{147000,	6,	49,	30},
+	{147891,	6,	23,	14},
+	{148000,	6,	23,	14},
+	{148250,	6,	28,	17},
+	{148352,	4,	100,	91},
+	{148500,	6,	33,	20},
+	{149000,	6,	48,	29},
+	{150000,	6,	25,	15},
+	{151000,	4,	19,	17},
+	{152000,	6,	27,	16},
+	{152280,	6,	44,	26},
+	{153000,	6,	34,	20},
+	{154000,	6,	53,	31},
+	{155000,	6,	31,	18},
+	{155250,	6,	50,	29},
+	{155750,	6,	45,	26},
+	{156000,	6,	26,	15},
+	{157000,	6,	61,	35},
+	{157500,	6,	28,	16},
+	{158000,	6,	65,	37},
+	{158250,	6,	44,	25},
+	{159000,	6,	53,	30},
+	{159500,	6,	39,	22},
+	{160000,	6,	32,	18},
+	{161000,	4,	31,	26},
+	{162000,	4,	18,	15},
+	{162162,	4,	131,	109},
+	{162500,	4,	53,	44},
+	{163000,	4,	29,	24},
+	{164000,	4,	17,	14},
+	{165000,	4,	22,	18},
+	{166000,	4,	32,	26},
+	{167000,	4,	26,	21},
+	{168000,	4,	46,	37},
+	{169000,	4,	104,	83},
+	{169128,	4,	64,	51},
+	{169500,	4,	39,	31},
+	{170000,	4,	34,	27},
+	{171000,	4,	19,	15},
+	{172000,	4,	51,	40},
+	{172750,	4,	32,	25},
+	{172800,	4,	32,	25},
+	{173000,	4,	41,	32},
+	{174000,	4,	49,	38},
+	{174787,	4,	22,	17},
+	{175000,	4,	35,	27},
+	{176000,	4,	30,	23},
+	{177000,	4,	38,	29},
+	{178000,	4,	29,	22},
+	{178500,	4,	37,	28},
+	{179000,	4,	53,	40},
+	{179500,	4,	73,	55},
+	{180000,	4,	20,	15},
+	{181000,	4,	55,	41},
+	{182000,	4,	31,	23},
+	{183000,	4,	42,	31},
+	{184000,	4,	30,	22},
+	{184750,	4,	26,	19},
+	{185000,	4,	37,	27},
+	{186000,	4,	51,	37},
+	{187000,	4,	36,	26},
+	{188000,	4,	32,	23},
+	{189000,	4,	21,	15},
+	{190000,	4,	38,	27},
+	{190960,	4,	41,	29},
+	{191000,	4,	41,	29},
+	{192000,	4,	27,	19},
+	{192250,	4,	37,	26},
+	{193000,	4,	20,	14},
+	{193250,	4,	53,	37},
+	{194000,	4,	23,	16},
+	{194208,	4,	23,	16},
+	{195000,	4,	26,	18},
+	{196000,	4,	45,	31},
+	{197000,	4,	35,	24},
+	{197750,	4,	41,	28},
+	{198000,	4,	22,	15},
+	{198500,	4,	25,	17},
+	{199000,	4,	28,	19},
+	{200000,	4,	37,	25},
+	{201000,	4,	61,	41},
+	{202000,	4,	112,	75},
+	{202500,	4,	21,	14},
+	{203000,	4,	146,	97},
+	{204000,	4,	62,	41},
+	{204750,	4,	44,	29},
+	{205000,	4,	38,	25},
+	{206000,	4,	29,	19},
+	{207000,	4,	23,	15},
+	{207500,	4,	40,	26},
+	{208000,	4,	37,	24},
+	{208900,	4,	48,	31},
+	{209000,	4,	48,	31},
+	{209250,	4,	31,	20},
+	{210000,	4,	28,	18},
+	{211000,	4,	25,	16},
+	{212000,	4,	22,	14},
+	{213000,	4,	30,	19},
+	{213750,	4,	38,	24},
+	{214000,	4,	46,	29},
+	{214750,	4,	35,	22},
+	{215000,	4,	43,	27},
+	{216000,	4,	24,	15},
+	{217000,	4,	37,	23},
+	{218000,	4,	42,	26},
+	{218250,	4,	42,	26},
+	{218750,	4,	34,	21},
+	{219000,	4,	47,	29},
+	{219000,	4,	47,	29},
+	{220000,	4,	44,	27},
+	{220640,	4,	49,	30},
+	{220750,	4,	36,	22},
+	{221000,	4,	36,	22},
+	{222000,	4,	23,	14},
+	{222525,	4,	28,	17},
+	{222750,	4,	33,	20},
+	{227000,	4,	37,	22},
+	{230250,	4,	29,	17},
+	{233500,	4,	38,	22},
+	{235000,	4,	40,	23},
+	{238000,	4,	30,	17},
+	{241500,	2,	17,	19},
+	{245250,	2,	20,	22},
+	{247750,	2,	22,	24},
+	{253250,	2,	15,	16},
+	{256250,	2,	18,	19},
+	{262500,	2,	31,	32},
+	{267250,	2,	66,	67},
+	{268500,	2,	94,	95},
+	{270000,	2,	14,	14},
+	{272500,	2,	77,	76},
+	{273750,	2,	57,	56},
+	{280750,	2,	24,	23},
+	{281250,	2,	23,	22},
+	{286000,	2,	17,	16},
+	{291750,	2,	26,	24},
+	{296703,	2,	56,	51},
+	{297000,	2,	22,	20},
+	{298000,	2,	21,	19},
+};
+
+void intel_ddi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	int port = intel_hdmi->ddi_port;
+	int pipe = intel_crtc->pipe;
+	int p, n2, r2, valid=0;
+	u32 temp, i;
+
+	/* On Haswell, we need to enable the clocks and prepare DDI function to
+	 * work in HDMI mode for this pipe.
+	 */
+	DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+
+	for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
+		if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
+			p = wrpll_tmds_clock_table[i].p;
+			n2 = wrpll_tmds_clock_table[i].n2;
+			r2 = wrpll_tmds_clock_table[i].r2;
+
+			DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
+					crtc->mode.clock,
+					p, n2, r2);
+
+			valid = 1;
+			break;
+		}
+	}
+
+	if (!valid) {
+		DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
+				crtc->mode.clock);
+		return;
+	}
+
+	/* Enable LCPLL if disabled */
+	temp = I915_READ(LCPLL_CTL);
+	if (temp & LCPLL_PLL_DISABLE)
+		I915_WRITE(LCPLL_CTL,
+				temp & ~LCPLL_PLL_DISABLE);
+
+	/* Configure WR PLL 1, program the correct divider values for
+	 * the desired frequency and wait for warmup */
+	I915_WRITE(WRPLL_CTL1,
+			WRPLL_PLL_ENABLE |
+			WRPLL_PLL_SELECT_LCPLL_2700 |
+			WRPLL_DIVIDER_REFERENCE(r2) |
+			WRPLL_DIVIDER_FEEDBACK(n2) |
+			WRPLL_DIVIDER_POST(p));
+
+	udelay(20);
+
+	/* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
+	 * this port for connection.
+	 */
+	I915_WRITE(PORT_CLK_SEL(port),
+			PORT_CLK_SEL_WRPLL1);
+	I915_WRITE(PIPE_CLK_SEL(pipe),
+			PIPE_CLK_SEL_PORT(port));
+
+	udelay(20);
+
+	if (intel_hdmi->has_audio) {
+		/* Proper support for digital audio needs a new logic and a new set
+		 * of registers, so we leave it for future patch bombing.
+		 */
+		DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+				 pipe_name(intel_crtc->pipe));
+	}
+
+	/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+	temp = I915_READ(DDI_FUNC_CTL(pipe));
+	temp &= ~PIPE_DDI_PORT_MASK;
+	temp &= ~PIPE_DDI_BPC_12;
+	temp |= PIPE_DDI_SELECT_PORT(port) |
+			PIPE_DDI_MODE_SELECT_HDMI |
+			((intel_crtc->bpp > 24) ?
+				PIPE_DDI_BPC_12 :
+				PIPE_DDI_BPC_8) |
+			PIPE_DDI_FUNC_ENABLE;
+
+	I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	int port = intel_hdmi->ddi_port;
+	u32 temp;
+
+	temp = I915_READ(DDI_BUF_CTL(port));
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		temp &= ~DDI_BUF_CTL_ENABLE;
+	} else {
+		temp |= DDI_BUF_CTL_ENABLE;
+	}
+
+	/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
+	 * and swing/emphasis values are ignored so nothing special needs
+	 * to be done besides enabling the port.
+	 */
+	I915_WRITE(DDI_BUF_CTL(port),
+			temp);
+}

File diff suppressed because it is too large
+ 479 - 164
drivers/gpu/drm/i915/intel_display.c


+ 13 - 6
drivers/gpu/drm/i915/intel_dp.c

@@ -688,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	int lane_count, clock;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
-	int bpp;
+	int bpp, mode_rate;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -702,24 +702,30 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		mode->clock = intel_dp->panel_fixed_mode->clock;
 	}
 
+	DRM_DEBUG_KMS("DP link computation with max lane count %i "
+		      "max bw %02x pixel clock %iKHz\n",
+		      max_lane_count, bws[max_clock], mode->clock);
+
 	if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
 		return false;
 
 	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+	mode_rate = intel_dp_link_required(mode->clock, bpp);
 
 	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
 		for (clock = 0; clock <= max_clock; clock++) {
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-			if (intel_dp_link_required(mode->clock, bpp)
-					<= link_avail) {
+			if (mode_rate <= link_avail) {
 				intel_dp->link_bw = bws[clock];
 				intel_dp->lane_count = lane_count;
 				adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
-				DRM_DEBUG_KMS("Display port link bw %02x lane "
-						"count %d clock %d\n",
+				DRM_DEBUG_KMS("DP link bw %02x lane "
+						"count %d clock %d bpp %d\n",
 				       intel_dp->link_bw, intel_dp->lane_count,
-				       adjusted_mode->clock);
+				       adjusted_mode->clock, bpp);
+				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+					      mode_rate, link_avail);
 				return true;
 			}
 		}
@@ -2439,6 +2445,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 	}
 
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
 	connector->interlace_allowed = true;
 	connector->doublescan_allowed = 0;
 

+ 46 - 11
drivers/gpu/drm/i915/intel_drv.h

@@ -183,8 +183,8 @@ struct intel_crtc {
 	bool cursor_visible;
 	unsigned int bpp;
 
-	bool no_pll; /* tertiary pipe for IVB */
-	bool use_pll_a;
+	/* We can share PLLs across outputs if the timings match */
+	struct intel_pch_pll *pch_pll;
 };
 
 struct intel_plane {
@@ -238,6 +238,8 @@ struct cxsr_latency {
 #define DIP_TYPE_AVI    0x82
 #define DIP_VERSION_AVI 0x2
 #define DIP_LEN_AVI     13
+#define DIP_AVI_PR_1    0
+#define DIP_AVI_PR_2    1
 
 #define DIP_TYPE_SPD	0x83
 #define DIP_VERSION_SPD	0x1
@@ -271,23 +273,36 @@ struct dip_infoframe {
 			uint8_t ITC_EC_Q_SC;
 			/* PB4 - VIC 6:0 */
 			uint8_t VIC;
-			/* PB5 - PR 3:0 */
-			uint8_t PR;
+			/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+			uint8_t YQ_CN_PR;
 			/* PB6 to PB13 */
 			uint16_t top_bar_end;
 			uint16_t bottom_bar_start;
 			uint16_t left_bar_end;
 			uint16_t right_bar_start;
-		} avi;
+		} __attribute__ ((packed)) avi;
 		struct {
 			uint8_t vn[8];
 			uint8_t pd[16];
 			uint8_t sdi;
-		} spd;
+		} __attribute__ ((packed)) spd;
 		uint8_t payload[27];
 	} __attribute__ ((packed)) body;
 } __attribute__((packed));
 
+struct intel_hdmi {
+	struct intel_encoder base;
+	u32 sdvox_reg;
+	int ddc_bus;
+	int ddi_port;
+	uint32_t color_range;
+	bool has_hdmi_sink;
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	void (*write_infoframe)(struct drm_encoder *encoder,
+				struct dip_infoframe *frame);
+};
+
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
@@ -327,7 +342,11 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+			    struct drm_display_mode *adjusted_mode);
+extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
 			    bool is_sdvob);
 extern void intel_dvo_init(struct drm_device *dev);
@@ -346,6 +365,8 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
 extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane);
 
+void intel_sanitize_pm(struct drm_device *dev);
+
 /* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
@@ -405,10 +426,6 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
 extern void ironlake_disable_rc6(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
 				      struct drm_i915_gem_object *obj,
@@ -446,12 +463,17 @@ extern void intel_init_clock_gating(struct drm_device *dev);
 extern void intel_write_eld(struct drm_encoder *encoder,
 			    struct drm_display_mode *mode);
 extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
 
 /* For use by IVB LP watermark workaround in intel_sprite.c */
 extern void intel_update_watermarks(struct drm_device *dev);
 extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
 					   uint32_t sprite_width,
 					   int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+			 struct drm_display_mode *mode);
 
 extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
@@ -466,5 +488,18 @@ extern void intel_init_pm(struct drm_device *dev);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
+
+extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_ddi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode);
 
 #endif /* __INTEL_DRV_H__ */

+ 213 - 71
drivers/gpu/drm/i915/intel_hdmi.c

@@ -37,19 +37,7 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-struct intel_hdmi {
-	struct intel_encoder base;
-	u32 sdvox_reg;
-	int ddc_bus;
-	uint32_t color_range;
-	bool has_hdmi_sink;
-	bool has_audio;
-	enum hdmi_force_audio force_audio;
-	void (*write_infoframe)(struct drm_encoder *encoder,
-				struct dip_infoframe *frame);
-};
-
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
 	return container_of(encoder, struct intel_hdmi, base.base);
 }
@@ -75,107 +63,182 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
 	frame->checksum = 0x100 - sum;
 }
 
-static u32 intel_infoframe_index(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
 {
-	u32 flags = 0;
-
 	switch (frame->type) {
 	case DIP_TYPE_AVI:
-		flags |= VIDEO_DIP_SELECT_AVI;
-		break;
+		return VIDEO_DIP_SELECT_AVI;
 	case DIP_TYPE_SPD:
-		flags |= VIDEO_DIP_SELECT_SPD;
-		break;
+		return VIDEO_DIP_SELECT_SPD;
 	default:
 		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
-		break;
+		return 0;
 	}
-
-	return flags;
 }
 
-static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
 {
-	u32 flags = 0;
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return VIDEO_DIP_ENABLE_AVI;
+	case DIP_TYPE_SPD:
+		return VIDEO_DIP_ENABLE_SPD;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
+}
 
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
 	switch (frame->type) {
 	case DIP_TYPE_AVI:
-		flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
-		break;
+		return VIDEO_DIP_ENABLE_AVI_HSW;
 	case DIP_TYPE_SPD:
-		flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
-		break;
+		return VIDEO_DIP_ENABLE_SPD_HSW;
 	default:
 		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
-		break;
+		return 0;
 	}
+}
 
-	return flags;
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+{
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+	case DIP_TYPE_SPD:
+		return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
 }
 
-static void i9xx_write_infoframe(struct drm_encoder *encoder,
-				 struct dip_infoframe *frame)
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
 {
 	uint32_t *data = (uint32_t *)frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-	u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+	u32 val = I915_READ(VIDEO_DIP_CTL);
 	unsigned i, len = DIP_HEADER_SIZE + frame->len;
 
-
-	/* XXX first guess at handling video port, is this corrent? */
+	val &= ~VIDEO_DIP_PORT_MASK;
 	if (intel_hdmi->sdvox_reg == SDVOB)
-		port = VIDEO_DIP_PORT_B;
+		val |= VIDEO_DIP_PORT_B;
 	else if (intel_hdmi->sdvox_reg == SDVOC)
-		port = VIDEO_DIP_PORT_C;
+		val |= VIDEO_DIP_PORT_C;
 	else
 		return;
 
-	flags = intel_infoframe_index(frame);
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
 
-	val &= ~VIDEO_DIP_SELECT_MASK;
+	val &= ~g4x_infoframe_enable(frame);
+	val |= VIDEO_DIP_ENABLE;
 
-	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+	I915_WRITE(VIDEO_DIP_CTL, val);
 
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(VIDEO_DIP_DATA, *data);
 		data++;
 	}
 
-	flags |= intel_infoframe_flags(frame);
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+	I915_WRITE(VIDEO_DIP_CTL, val);
 }
 
-static void ironlake_write_infoframe(struct drm_encoder *encoder,
-				     struct dip_infoframe *frame)
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
 {
 	uint32_t *data = (uint32_t *)frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	unsigned i, len = DIP_HEADER_SIZE + frame->len;
-	u32 flags, val = I915_READ(reg);
+	u32 val = I915_READ(reg);
+
+	val &= ~VIDEO_DIP_PORT_MASK;
+	switch (intel_hdmi->sdvox_reg) {
+	case HDMIB:
+		val |= VIDEO_DIP_PORT_B;
+		break;
+	case HDMIC:
+		val |= VIDEO_DIP_PORT_C;
+		break;
+	case HDMID:
+		val |= VIDEO_DIP_PORT_D;
+		break;
+	default:
+		return;
+	}
 
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-	flags = intel_infoframe_index(frame);
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	val &= ~g4x_infoframe_enable(frame);
+	val |= VIDEO_DIP_ENABLE;
+
+	I915_WRITE(reg, val);
+
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	unsigned i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(reg);
+
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	/* The DIP control register spec says that we need to update the AVI
+	 * infoframe without clearing its enable bit */
+	if (frame->type == DIP_TYPE_AVI)
+		val |= VIDEO_DIP_ENABLE_AVI;
+	else
+		val &= ~g4x_infoframe_enable(frame);
 
-	I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+	val |= VIDEO_DIP_ENABLE;
+
+	I915_WRITE(reg, val);
 
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 		data++;
 	}
 
-	flags |= intel_infoframe_flags(frame);
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+	I915_WRITE(reg, val);
 }
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -184,28 +247,60 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
 	uint32_t *data = (uint32_t *)frame;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	unsigned i, len = DIP_HEADER_SIZE + frame->len;
-	u32 flags, val = I915_READ(reg);
+	u32 val = I915_READ(reg);
 
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 
-	flags = intel_infoframe_index(frame);
-
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	val &= ~g4x_infoframe_enable(frame);
+	val |= VIDEO_DIP_ENABLE;
 
-	I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+	I915_WRITE(reg, val);
 
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 		data++;
 	}
 
-	flags |= intel_infoframe_flags(frame);
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+	I915_WRITE(reg, val);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+	unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(ctl_reg);
+
+	if (data_reg == 0)
+		return;
+
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	val &= ~hsw_infoframe_enable(frame);
+	I915_WRITE(ctl_reg, val);
+
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(data_reg + i, *data);
+		data++;
+	}
+
+	val |= hsw_infoframe_enable(frame);
+	I915_WRITE(ctl_reg, val);
 }
 
 static void intel_set_infoframe(struct drm_encoder *encoder,
@@ -220,7 +315,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
 	intel_hdmi->write_infoframe(encoder, frame);
 }
 
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+					 struct drm_display_mode *adjusted_mode)
 {
 	struct dip_infoframe avi_if = {
 		.type = DIP_TYPE_AVI,
@@ -228,10 +324,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
 		.len = DIP_LEN_AVI,
 	};
 
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+		avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
 	intel_set_infoframe(encoder, &avi_if);
 }
 
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
 {
 	struct dip_infoframe spd_if;
 
@@ -252,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = encoder->crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 sdvox;
 
@@ -290,7 +388,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
 	POSTING_READ(intel_hdmi->sdvox_reg);
 
-	intel_hdmi_set_avi_infoframe(encoder);
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
 	intel_hdmi_set_spd_infoframe(encoder);
 }
 
@@ -427,8 +525,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
 
 static int
 intel_hdmi_set_property(struct drm_connector *connector,
-		      struct drm_property *property,
-		      uint64_t val)
+			struct drm_property *property,
+			uint64_t val)
 {
 	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
@@ -487,6 +585,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
 	kfree(connector);
 }
 
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
+	.dpms = intel_ddi_dpms,
+	.mode_fixup = intel_hdmi_mode_fixup,
+	.prepare = intel_encoder_prepare,
+	.mode_set = intel_ddi_mode_set,
+	.commit = intel_encoder_commit,
+};
+
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
 	.dpms = intel_hdmi_dpms,
 	.mode_fixup = intel_hdmi_mode_fixup,
@@ -576,24 +682,60 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
 		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+	} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
+		DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
+		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+		intel_hdmi->ddi_port = PORT_B;
+		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+	} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
+		DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
+		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+		intel_hdmi->ddi_port = PORT_C;
+		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+	} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
+		DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
+		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+		intel_hdmi->ddi_port = PORT_D;
+		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+	} else {
+		/* If we got an unknown sdvox_reg, things are pretty much broken
+		 * in a way that we should let the kernel know about it */
+		BUG();
 	}
 
 	intel_hdmi->sdvox_reg = sdvox_reg;
 
 	if (!HAS_PCH_SPLIT(dev)) {
-		intel_hdmi->write_infoframe = i9xx_write_infoframe;
+		intel_hdmi->write_infoframe = g4x_write_infoframe;
 		I915_WRITE(VIDEO_DIP_CTL, 0);
 	} else if (IS_VALLEYVIEW(dev)) {
 		intel_hdmi->write_infoframe = vlv_write_infoframe;
 		for_each_pipe(i)
 			I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
-	}  else {
-		intel_hdmi->write_infoframe = ironlake_write_infoframe;
+	} else if (IS_HASWELL(dev)) {
+		/* FIXME: Haswell has a new set of DIP frame registers, but we are
+		 * just doing the minimal required for HDMI to work at this stage.
+		 */
+		intel_hdmi->write_infoframe = hsw_write_infoframe;
+		for_each_pipe(i)
+			I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+	} else if (HAS_PCH_IBX(dev)) {
+		intel_hdmi->write_infoframe = ibx_write_infoframe;
+		for_each_pipe(i)
+			I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+	} else {
+		intel_hdmi->write_infoframe = cpt_write_infoframe;
 		for_each_pipe(i)
 			I915_WRITE(TVIDEO_DIP_CTL(i), 0);
 	}
 
-	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+	if (IS_HASWELL(dev))
+		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
+	else
+		drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 
 	intel_hdmi_add_properties(intel_hdmi, connector);
 

+ 4 - 0
drivers/gpu/drm/i915/intel_i2c.c

@@ -490,6 +490,10 @@ int intel_setup_gmbus(struct drm_device *dev)
 		/* By default use a conservative clock rate */
 		bus->reg0 = port | GMBUS_RATE_100KHZ;
 
+		/* gmbus seems to be broken on i830 */
+		if (IS_I830(dev))
+			bus->force_bit = true;
+
 		intel_gpio_setup(bus, port);
 	}
 

+ 38 - 29
drivers/gpu/drm/i915/intel_opregion.c

@@ -151,7 +151,7 @@ struct opregion_asle {
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle *asle = dev_priv->opregion.asle;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 	u32 max;
 
 	if (!(bclp & ASLE_BCLP_VALID))
@@ -163,7 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 
 	max = intel_panel_get_max_backlight(dev);
 	intel_panel_set_backlight(dev, bclp * max / 255);
-	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+	iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
 
 	return 0;
 }
@@ -200,14 +200,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
 void intel_opregion_asle_intr(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle *asle = dev_priv->opregion.asle;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 	u32 asle_stat = 0;
 	u32 asle_req;
 
 	if (!asle)
 		return;
 
-	asle_req = asle->aslc & ASLE_REQ_MSK;
+	asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
 
 	if (!asle_req) {
 		DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -215,31 +215,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
 	}
 
 	if (asle_req & ASLE_SET_ALS_ILLUM)
-		asle_stat |= asle_set_als_illum(dev, asle->alsi);
+		asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
 
 	if (asle_req & ASLE_SET_BACKLIGHT)
-		asle_stat |= asle_set_backlight(dev, asle->bclp);
+		asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
 
 	if (asle_req & ASLE_SET_PFIT)
-		asle_stat |= asle_set_pfit(dev, asle->pfit);
+		asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
 
 	if (asle_req & ASLE_SET_PWM_FREQ)
-		asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+		asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
 
-	asle->aslc = asle_stat;
+	iowrite32(asle_stat, &asle->aslc);
 }
 
 void intel_opregion_gse_intr(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle *asle = dev_priv->opregion.asle;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 	u32 asle_stat = 0;
 	u32 asle_req;
 
 	if (!asle)
 		return;
 
-	asle_req = asle->aslc & ASLE_REQ_MSK;
+	asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
 
 	if (!asle_req) {
 		DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -252,7 +252,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
 	}
 
 	if (asle_req & ASLE_SET_BACKLIGHT)
-		asle_stat |= asle_set_backlight(dev, asle->bclp);
+		asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
 
 	if (asle_req & ASLE_SET_PFIT) {
 		DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -264,7 +264,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
 		asle_stat |= ASLE_PWM_FREQ_FAILED;
 	}
 
-	asle->aslc = asle_stat;
+	iowrite32(asle_stat, &asle->aslc);
 }
 #define ASLE_ALS_EN    (1<<0)
 #define ASLE_BLC_EN    (1<<1)
@@ -274,15 +274,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
 void intel_opregion_enable_asle(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle *asle = dev_priv->opregion.asle;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 
 	if (asle) {
 		if (IS_MOBILE(dev))
 			intel_enable_asle(dev);
 
-		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
-			ASLE_PFMB_EN;
-		asle->ardy = 1;
+		iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+			  ASLE_PFMB_EN,
+			  &asle->tche);
+		iowrite32(1, &asle->ardy);
 	}
 }
 
@@ -300,7 +301,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
 	   Linux, these are handled by the dock, button and video drivers.
 	*/
 
-	struct opregion_acpi *acpi;
+	struct opregion_acpi __iomem *acpi;
 	struct acpi_bus_event *event = data;
 	int ret = NOTIFY_OK;
 
@@ -312,10 +313,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
 
 	acpi = system_opregion->acpi;
 
-	if (event->type == 0x80 && !(acpi->cevt & 0x1))
+	if (event->type == 0x80 &&
+	    (ioread32(&acpi->cevt) & 1) == 0)
 		ret = NOTIFY_BAD;
 
-	acpi->csts = 0;
+	iowrite32(0, &acpi->csts);
 
 	return ret;
 }
@@ -339,6 +341,7 @@ static void intel_didl_outputs(struct drm_device *dev)
 	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
 	unsigned long long device_id;
 	acpi_status status;
+	u32 temp;
 	int i = 0;
 
 	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -373,7 +376,8 @@ static void intel_didl_outputs(struct drm_device *dev)
 		if (ACPI_SUCCESS(status)) {
 			if (!device_id)
 				goto blind_set;
-			opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+			iowrite32((u32)(device_id & 0x0f0f),
+				  &opregion->acpi->didl[i]);
 			i++;
 		}
 	}
@@ -381,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
 end:
 	/* If fewer than 8 outputs, the list must be null terminated */
 	if (i < 8)
-		opregion->acpi->didl[i] = 0;
+		iowrite32(0, &opregion->acpi->didl[i]);
 	return;
 
 blind_set:
@@ -415,7 +419,9 @@ blind_set:
 			output_type = ACPI_LVDS_OUTPUT;
 			break;
 		}
-		opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+		temp = ioread32(&opregion->acpi->didl[i]);
+		iowrite32(temp | (1<<31) | output_type | i,
+			  &opregion->acpi->didl[i]);
 		i++;
 	}
 	goto end;
@@ -436,8 +442,8 @@ void intel_opregion_init(struct drm_device *dev)
 		/* Notify BIOS we are ready to handle ACPI video ext notifs.
 		 * Right now, all the events are handled by the ACPI video module.
 		 * We don't actually need to do anything with them. */
-		opregion->acpi->csts = 0;
-		opregion->acpi->drdy = 1;
+		iowrite32(0, &opregion->acpi->csts);
+		iowrite32(1, &opregion->acpi->drdy);
 
 		system_opregion = opregion;
 		register_acpi_notifier(&intel_opregion_notifier);
@@ -456,7 +462,7 @@ void intel_opregion_fini(struct drm_device *dev)
 		return;
 
 	if (opregion->acpi) {
-		opregion->acpi->drdy = 0;
+		iowrite32(0, &opregion->acpi->drdy);
 
 		system_opregion = NULL;
 		unregister_acpi_notifier(&intel_opregion_notifier);
@@ -476,8 +482,9 @@ int intel_opregion_setup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_opregion *opregion = &dev_priv->opregion;
-	void *base;
+	void __iomem *base;
 	u32 asls, mboxes;
+	char buf[sizeof(OPREGION_SIGNATURE)];
 	int err = 0;
 
 	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -491,7 +498,9 @@ int intel_opregion_setup(struct drm_device *dev)
 	if (!base)
 		return -ENOMEM;
 
-	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+	memcpy_fromio(buf, base, sizeof(buf));
+
+	if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
 		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
 		err = -EINVAL;
 		goto err_out;
@@ -501,7 +510,7 @@ int intel_opregion_setup(struct drm_device *dev)
 
 	opregion->lid_state = base + ACPI_CLID;
 
-	mboxes = opregion->header->mboxes;
+	mboxes = ioread32(&opregion->header->mboxes);
 	if (mboxes & MBOX_ACPI) {
 		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
 		opregion->acpi = base + OPREGION_ACPI_OFFSET;

+ 112 - 97
drivers/gpu/drm/i915/intel_overlay.c

@@ -187,14 +187,14 @@ struct intel_overlay {
 	void (*flip_tail)(struct intel_overlay *);
 };
 
-static struct overlay_registers *
+static struct overlay_registers __iomem *
 intel_overlay_map_regs(struct intel_overlay *overlay)
 {
 	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-	struct overlay_registers *regs;
+	struct overlay_registers __iomem *regs;
 
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-		regs = overlay->reg_bo->phys_obj->handle->vaddr;
+		regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
 	else
 		regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
 					 overlay->reg_bo->gtt_offset);
@@ -203,7 +203,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
 }
 
 static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
-				     struct overlay_registers *regs)
+				     struct overlay_registers __iomem *regs)
 {
 	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
 		io_mapping_unmap(regs);
@@ -215,20 +215,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	int ret;
 
 	BUG_ON(overlay->last_flip_req);
-	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+	ret = i915_add_request(ring, NULL, request);
 	if (ret) {
 	    kfree(request);
 	    return ret;
 	}
 	overlay->last_flip_req = request->seqno;
 	overlay->flip_tail = tail;
-	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
-				true);
+	ret = i915_wait_request(ring, overlay->last_flip_req);
 	if (ret)
 		return ret;
+	i915_gem_retire_requests(dev);
 
 	overlay->last_flip_req = 0;
 	return 0;
@@ -262,7 +263,7 @@ i830_activate_pipe_a(struct drm_device *dev)
 	DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
 
 	mode = drm_mode_duplicate(dev, &vesa_640x480);
-	drm_mode_set_crtcinfo(mode, 0);
+
 	if (!drm_crtc_helper_set_mode(&crtc->base, mode,
 				       crtc->base.x, crtc->base.y,
 				       crtc->base.fb))
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	struct drm_i915_gem_request *request;
 	int pipe_a_quirk = 0;
 	int ret;
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 		goto out;
 	}
 
-	ret = BEGIN_LP_RING(4);
+	ret = intel_ring_begin(ring, 4);
 	if (ret) {
 		kfree(request);
 		goto out;
 	}
 
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	OUT_RING(overlay->flip_addr | OFC_UPDATE);
-	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	OUT_RING(MI_NOOP);
-	ADVANCE_LP_RING();
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	ret = intel_overlay_do_wait_request(overlay, request, NULL);
 out:
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	struct drm_i915_gem_request *request;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	ret = BEGIN_LP_RING(2);
+	ret = intel_ring_begin(ring, 2);
 	if (ret) {
 		kfree(request);
 		return ret;
 	}
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	OUT_RING(flip_addr);
-	ADVANCE_LP_RING();
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_advance(ring);
 
-	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+	ret = i915_add_request(ring, NULL, request);
 	if (ret) {
 		kfree(request);
 		return ret;
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	u32 flip_addr = overlay->flip_addr;
 	struct drm_i915_gem_request *request;
 	int ret;
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	ret = BEGIN_LP_RING(6);
+	ret = intel_ring_begin(ring, 6);
 	if (ret) {
 		kfree(request);
 		return ret;
 	}
 	/* wait for overlay to go idle */
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	OUT_RING(flip_addr);
-	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
-	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	OUT_RING(flip_addr);
-	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	ADVANCE_LP_RING();
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_advance(ring);
 
 	return intel_overlay_do_wait_request(overlay, request,
 					     intel_overlay_off_tail);
@@ -442,15 +446,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	int ret;
 
 	if (overlay->last_flip_req == 0)
 		return 0;
 
-	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
-				true);
+	ret = i915_wait_request(ring, overlay->last_flip_req);
 	if (ret)
 		return ret;
+	i915_gem_retire_requests(dev);
 
 	if (overlay->flip_tail)
 		overlay->flip_tail(overlay);
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	int ret;
 
 	/* Only wait if there is actually an old frame to release to
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 		if (request == NULL)
 			return -ENOMEM;
 
-		ret = BEGIN_LP_RING(2);
+		ret = intel_ring_begin(ring, 2);
 		if (ret) {
 			kfree(request);
 			return ret;
 		}
 
-		OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		OUT_RING(MI_NOOP);
-		ADVANCE_LP_RING();
+		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		ret = intel_overlay_do_wait_request(overlay, request,
 						    intel_overlay_release_old_vid_tail);
@@ -619,14 +625,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
 	0x3000, 0x0800, 0x3000
 };
 
-static void update_polyphase_filter(struct overlay_registers *regs)
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
 {
-	memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
-	memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+	memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+	memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+		    sizeof(uv_static_hcoeffs));
 }
 
 static bool update_scaling_factors(struct intel_overlay *overlay,
-				   struct overlay_registers *regs,
+				   struct overlay_registers __iomem *regs,
 				   struct put_image_params *params)
 {
 	/* fixed point with a 12 bit shift */
@@ -665,16 +672,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
 	overlay->old_xscale = xscale;
 	overlay->old_yscale = yscale;
 
-	regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
-			   ((xscale >> FP_SHIFT)  << 16) |
-			   ((xscale & FRACT_MASK) << 3));
+	iowrite32(((yscale & FRACT_MASK) << 20) |
+		  ((xscale >> FP_SHIFT)  << 16) |
+		  ((xscale & FRACT_MASK) << 3),
+		 &regs->YRGBSCALE);
 
-	regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
-			 ((xscale_UV >> FP_SHIFT)  << 16) |
-			 ((xscale_UV & FRACT_MASK) << 3));
+	iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+		  ((xscale_UV >> FP_SHIFT)  << 16) |
+		  ((xscale_UV & FRACT_MASK) << 3),
+		 &regs->UVSCALE);
 
-	regs->UVSCALEV = ((((yscale    >> FP_SHIFT) << 16) |
-			   ((yscale_UV >> FP_SHIFT) << 0)));
+	iowrite32((((yscale    >> FP_SHIFT) << 16) |
+		   ((yscale_UV >> FP_SHIFT) << 0)),
+		 &regs->UVSCALEV);
 
 	if (scale_changed)
 		update_polyphase_filter(regs);
@@ -683,30 +693,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
 }
 
 static void update_colorkey(struct intel_overlay *overlay,
-			    struct overlay_registers *regs)
+			    struct overlay_registers __iomem *regs)
 {
 	u32 key = overlay->color_key;
 
 	switch (overlay->crtc->base.fb->bits_per_pixel) {
 	case 8:
-		regs->DCLRKV = 0;
-		regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+		iowrite32(0, &regs->DCLRKV);
+		iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
 		break;
 
 	case 16:
 		if (overlay->crtc->base.fb->depth == 15) {
-			regs->DCLRKV = RGB15_TO_COLORKEY(key);
-			regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+			iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+			iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+				  &regs->DCLRKM);
 		} else {
-			regs->DCLRKV = RGB16_TO_COLORKEY(key);
-			regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+			iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+			iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+				  &regs->DCLRKM);
 		}
 		break;
 
 	case 24:
 	case 32:
-		regs->DCLRKV = key;
-		regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+		iowrite32(key, &regs->DCLRKV);
+		iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
 		break;
 	}
 }
@@ -761,9 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 				      struct put_image_params *params)
 {
 	int ret, tmp_width;
-	struct overlay_registers *regs;
+	struct overlay_registers __iomem *regs;
 	bool scale_changed = false;
 	struct drm_device *dev = overlay->dev;
+	u32 swidth, swidthsw, sheight, ostride;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -782,16 +795,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 		goto out_unpin;
 
 	if (!overlay->active) {
+		u32 oconfig;
 		regs = intel_overlay_map_regs(overlay);
 		if (!regs) {
 			ret = -ENOMEM;
 			goto out_unpin;
 		}
-		regs->OCONFIG = OCONF_CC_OUT_8BIT;
+		oconfig = OCONF_CC_OUT_8BIT;
 		if (IS_GEN4(overlay->dev))
-			regs->OCONFIG |= OCONF_CSC_MODE_BT709;
-		regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+			oconfig |= OCONF_CSC_MODE_BT709;
+		oconfig |= overlay->crtc->pipe == 0 ?
 			OCONF_PIPE_A : OCONF_PIPE_B;
+		iowrite32(oconfig, &regs->OCONFIG);
 		intel_overlay_unmap_regs(overlay, regs);
 
 		ret = intel_overlay_on(overlay);
@@ -805,42 +820,46 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 		goto out_unpin;
 	}
 
-	regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
-	regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+	iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+	iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
 
 	if (params->format & I915_OVERLAY_YUV_PACKED)
 		tmp_width = packed_width_bytes(params->format, params->src_w);
 	else
 		tmp_width = params->src_w;
 
-	regs->SWIDTH = params->src_w;
-	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
-				       params->offset_Y, tmp_width);
-	regs->SHEIGHT = params->src_h;
-	regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
-	regs->OSTRIDE = params->stride_Y;
+	swidth = params->src_w;
+	swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+	sheight = params->src_h;
+	iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+	ostride = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
 		int uv_hscale = uv_hsubsampling(params->format);
 		int uv_vscale = uv_vsubsampling(params->format);
 		u32 tmp_U, tmp_V;
-		regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+		swidth |= (params->src_w/uv_hscale) << 16;
 		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
 				      params->src_w/uv_hscale);
 		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
 				      params->src_w/uv_hscale);
-		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
-		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
-		regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
-		regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
-		regs->OSTRIDE |= params->stride_UV << 16;
+		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+		sheight |= (params->src_h/uv_vscale) << 16;
+		iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
+		iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+		ostride |= params->stride_UV << 16;
 	}
 
+	iowrite32(swidth, &regs->SWIDTH);
+	iowrite32(swidthsw, &regs->SWIDTHSW);
+	iowrite32(sheight, &regs->SHEIGHT);
+	iowrite32(ostride, &regs->OSTRIDE);
+
 	scale_changed = update_scaling_factors(overlay, regs, params);
 
 	update_colorkey(overlay, regs);
 
-	regs->OCMD = overlay_cmd_reg(params);
+	iowrite32(overlay_cmd_reg(params), &regs->OCMD);
 
 	intel_overlay_unmap_regs(overlay, regs);
 
@@ -860,7 +879,7 @@ out_unpin:
 
 int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
-	struct overlay_registers *regs;
+	struct overlay_registers __iomem *regs;
 	struct drm_device *dev = overlay->dev;
 	int ret;
 
@@ -879,7 +898,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
 		return ret;
 
 	regs = intel_overlay_map_regs(overlay);
-	regs->OCMD = 0;
+	iowrite32(0, &regs->OCMD);
 	intel_overlay_unmap_regs(overlay, regs);
 
 	ret = intel_overlay_off(overlay);
@@ -1109,11 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
 	struct put_image_params *params;
 	int ret;
 
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
+	/* No need to check for DRIVER_MODESET - we don't set it up then. */
 	overlay = dev_priv->overlay;
 	if (!overlay) {
 		DRM_DEBUG("userspace bug: no overlay\n");
@@ -1250,10 +1265,11 @@ out_free:
 }
 
 static void update_reg_attrs(struct intel_overlay *overlay,
-			     struct overlay_registers *regs)
+			     struct overlay_registers __iomem *regs)
 {
-	regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
-	regs->OCLRC1 = overlay->saturation;
+	iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+		  &regs->OCLRC0);
+	iowrite32(overlay->saturation, &regs->OCLRC1);
 }
 
 static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1306,14 +1322,10 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
 	struct drm_intel_overlay_attrs *attrs = data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay;
-	struct overlay_registers *regs;
+	struct overlay_registers __iomem *regs;
 	int ret;
 
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
+	/* No need to check for DRIVER_MODESET - we don't set it up then. */
 	overlay = dev_priv->overlay;
 	if (!overlay) {
 		DRM_DEBUG("userspace bug: no overlay\n");
@@ -1396,7 +1408,7 @@ void intel_setup_overlay(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay;
 	struct drm_i915_gem_object *reg_bo;
-	struct overlay_registers *regs;
+	struct overlay_registers __iomem *regs;
 	int ret;
 
 	if (!HAS_OVERLAY(dev))
@@ -1451,7 +1463,7 @@ void intel_setup_overlay(struct drm_device *dev)
 	if (!regs)
 		goto out_unpin_bo;
 
-	memset(regs, 0, sizeof(struct overlay_registers));
+	memset_io(regs, 0, sizeof(struct overlay_registers));
 	update_polyphase_filter(regs);
 	update_reg_attrs(overlay, regs);
 
@@ -1499,14 +1511,17 @@ struct intel_overlay_error_state {
 	u32 isr;
 };
 
-static struct overlay_registers *
+static struct overlay_registers __iomem *
 intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 {
 	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-	struct overlay_registers *regs;
+	struct overlay_registers __iomem *regs;
 
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-		regs = overlay->reg_bo->phys_obj->handle->vaddr;
+		/* Cast to make sparse happy, but it's wc memory anyway, so
+		 * equivalent to the wc io mapping on X86. */
+		regs = (struct overlay_registers __iomem *)
+			overlay->reg_bo->phys_obj->handle->vaddr;
 	else
 		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 						overlay->reg_bo->gtt_offset);
@@ -1515,7 +1530,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 }
 
 static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
-					    struct overlay_registers *regs)
+					struct overlay_registers __iomem *regs)
 {
 	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
 		io_mapping_unmap_atomic(regs);
@@ -1540,9 +1555,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
 	error->dovsta = I915_READ(DOVSTA);
 	error->isr = I915_READ(ISR);
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-		error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+		error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
 	else
-		error->base = (long) overlay->reg_bo->gtt_offset;
+		error->base = overlay->reg_bo->gtt_offset;
 
 	regs = intel_overlay_map_regs_atomic(overlay);
 	if (!regs)

+ 744 - 23
drivers/gpu/drm/i915/intel_pm.c

@@ -28,6 +28,8 @@
 #include <linux/cpufreq.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
 
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -524,6 +526,113 @@ out_disable:
 	}
 }
 
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	tmp = I915_READ(CLKCFG);
+
+	switch (tmp & CLKCFG_FSB_MASK) {
+	case CLKCFG_FSB_533:
+		dev_priv->fsb_freq = 533; /* 133*4 */
+		break;
+	case CLKCFG_FSB_800:
+		dev_priv->fsb_freq = 800; /* 200*4 */
+		break;
+	case CLKCFG_FSB_667:
+		dev_priv->fsb_freq =  667; /* 167*4 */
+		break;
+	case CLKCFG_FSB_400:
+		dev_priv->fsb_freq = 400; /* 100*4 */
+		break;
+	}
+
+	switch (tmp & CLKCFG_MEM_MASK) {
+	case CLKCFG_MEM_533:
+		dev_priv->mem_freq = 533;
+		break;
+	case CLKCFG_MEM_667:
+		dev_priv->mem_freq = 667;
+		break;
+	case CLKCFG_MEM_800:
+		dev_priv->mem_freq = 800;
+		break;
+	}
+
+	/* detect pineview DDR3 setting */
+	tmp = I915_READ(CSHRDDR3CTL);
+	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u16 ddrpll, csipll;
+
+	ddrpll = I915_READ16(DDRMPLL1);
+	csipll = I915_READ16(CSIPLL0);
+
+	switch (ddrpll & 0xff) {
+	case 0xc:
+		dev_priv->mem_freq = 800;
+		break;
+	case 0x10:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 0x14:
+		dev_priv->mem_freq = 1333;
+		break;
+	case 0x18:
+		dev_priv->mem_freq = 1600;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+				 ddrpll & 0xff);
+		dev_priv->mem_freq = 0;
+		break;
+	}
+
+	dev_priv->r_t = dev_priv->mem_freq;
+
+	switch (csipll & 0x3ff) {
+	case 0x00c:
+		dev_priv->fsb_freq = 3200;
+		break;
+	case 0x00e:
+		dev_priv->fsb_freq = 3733;
+		break;
+	case 0x010:
+		dev_priv->fsb_freq = 4266;
+		break;
+	case 0x012:
+		dev_priv->fsb_freq = 4800;
+		break;
+	case 0x014:
+		dev_priv->fsb_freq = 5333;
+		break;
+	case 0x016:
+		dev_priv->fsb_freq = 5866;
+		break;
+	case 0x018:
+		dev_priv->fsb_freq = 6400;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+				 csipll & 0x3ff);
+		dev_priv->fsb_freq = 0;
+		break;
+	}
+
+	if (dev_priv->fsb_freq == 3200) {
+		dev_priv->c_m = 0;
+	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+		dev_priv->c_m = 1;
+	} else {
+		dev_priv->c_m = 2;
+	}
+}
+
 static const struct cxsr_latency cxsr_latency_table[] = {
 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
@@ -562,7 +671,7 @@ static const struct cxsr_latency cxsr_latency_table[] = {
 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
 };
 
-const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
 							 int is_ddr3,
 							 int fsb,
 							 int mem)
@@ -1694,8 +1803,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
 		enabled |= 2;
 	}
 
-	/* IVB has 3 pipes */
-	if (IS_IVYBRIDGE(dev) &&
+	if ((dev_priv->num_pipe == 3) &&
 	    g4x_compute_wm0(dev, 2,
 			    &sandybridge_display_wm_info, latency,
 			    &sandybridge_cursor_wm_info, latency,
@@ -1775,6 +1883,33 @@ static void sandybridge_update_wm(struct drm_device *dev)
 		   cursor_wm);
 }
 
+static void
+haswell_update_linetime_wm(struct drm_device *dev, int pipe,
+				 struct drm_display_mode *mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 temp;
+
+	temp = I915_READ(PIPE_WM_LINETIME(pipe));
+	temp &= ~PIPE_WM_LINETIME_MASK;
+
+	/* The WM are computed with base on how long it takes to fill a single
+	 * row at the given clock rate, multiplied by 8.
+	 * */
+	temp |= PIPE_WM_LINETIME_TIME(
+		((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+
+	/* IPS watermarks are only used by pipe A, and are ignored by
+	 * pipes B and C.  They are calculated similarly to the common
+	 * linetime values, except that we are using CD clock frequency
+	 * in MHz instead of pixel rate for the division.
+	 *
+	 * This is a placeholder for the IPS watermark calculation code.
+	 */
+
+	I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+}
+
 static bool
 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
 			      uint32_t sprite_width, int pixel_size,
@@ -1970,6 +2105,15 @@ void intel_update_watermarks(struct drm_device *dev)
 		dev_priv->display.update_wm(dev);
 }
 
+void intel_update_linetime_watermarks(struct drm_device *dev,
+		int pipe, struct drm_display_mode *mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.update_linetime_wm)
+		dev_priv->display.update_linetime_wm(dev, pipe, mode);
+}
+
 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
 				    uint32_t sprite_width, int pixel_size)
 {
@@ -2182,6 +2326,7 @@ int intel_enable_rc6(const struct drm_device *dev)
 
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
+	struct intel_ring_buffer *ring;
 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 	u32 pcu_mbox, rc6_mask = 0;
@@ -2216,8 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
-		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
 	I915_WRITE(GEN6_RC_SLEEP, 0);
 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@@ -2436,6 +2581,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
 void ironlake_enable_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 	int ret;
 
 	/* rc6 disabled by default due to repeated reports of hanging during
@@ -2455,31 +2601,31 @@ void ironlake_enable_rc6(struct drm_device *dev)
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
-	ret = BEGIN_LP_RING(6);
+	ret = intel_ring_begin(ring, 6);
 	if (ret) {
 		ironlake_teardown_rc6(dev);
 		mutex_unlock(&dev->struct_mutex);
 		return;
 	}
 
-	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
-	OUT_RING(MI_SET_CONTEXT);
-	OUT_RING(dev_priv->renderctx->gtt_offset |
-		 MI_MM_SPACE_GTT |
-		 MI_SAVE_EXT_STATE_EN |
-		 MI_RESTORE_EXT_STATE_EN |
-		 MI_RESTORE_INHIBIT);
-	OUT_RING(MI_SUSPEND_FLUSH);
-	OUT_RING(MI_NOOP);
-	OUT_RING(MI_FLUSH);
-	ADVANCE_LP_RING();
+	intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+	intel_ring_emit(ring, MI_SET_CONTEXT);
+	intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+			MI_MM_SPACE_GTT |
+			MI_SAVE_EXT_STATE_EN |
+			MI_RESTORE_EXT_STATE_EN |
+			MI_RESTORE_INHIBIT);
+	intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_advance(ring);
 
 	/*
 	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
 	 * does an implicit flush, combined with MI_FLUSH above, it should be
 	 * safe to assume that renderctx is valid
 	 */
-	ret = intel_wait_ring_idle(LP_RING(dev_priv));
+	ret = intel_wait_ring_idle(ring);
 	if (ret) {
 		DRM_ERROR("failed to enable ironlake power power savings\n");
 		ironlake_teardown_rc6(dev);
@@ -2507,6 +2653,485 @@ static unsigned long intel_pxfreq(u32 vidfreq)
 	return freq;
 }
 
+static const struct cparams {
+	u16 i;
+	u16 t;
+	u16 m;
+	u16 c;
+} cparams[] = {
+	{ 1, 1333, 301, 28664 },
+	{ 1, 1066, 294, 24460 },
+	{ 1, 800, 294, 25192 },
+	{ 0, 1333, 276, 27605 },
+	{ 0, 1066, 276, 27605 },
+	{ 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+	u64 total_count, diff, ret;
+	u32 count1, count2, count3, m = 0, c = 0;
+	unsigned long now = jiffies_to_msecs(jiffies), diff1;
+	int i;
+
+	diff1 = now - dev_priv->last_time1;
+
+	/* Prevent division-by-zero if we are asking too fast.
+	 * Also, we don't get interesting results if we are polling
+	 * faster than once in 10ms, so just return the saved value
+	 * in such cases.
+	 */
+	if (diff1 <= 10)
+		return dev_priv->chipset_power;
+
+	count1 = I915_READ(DMIEC);
+	count2 = I915_READ(DDREC);
+	count3 = I915_READ(CSIEC);
+
+	total_count = count1 + count2 + count3;
+
+	/* FIXME: handle per-counter overflow */
+	if (total_count < dev_priv->last_count1) {
+		diff = ~0UL - dev_priv->last_count1;
+		diff += total_count;
+	} else {
+		diff = total_count - dev_priv->last_count1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+		if (cparams[i].i == dev_priv->c_m &&
+		    cparams[i].t == dev_priv->r_t) {
+			m = cparams[i].m;
+			c = cparams[i].c;
+			break;
+		}
+	}
+
+	diff = div_u64(diff, diff1);
+	ret = ((m * diff) + c);
+	ret = div_u64(ret, 10);
+
+	dev_priv->last_count1 = total_count;
+	dev_priv->last_time1 = now;
+
+	dev_priv->chipset_power = ret;
+
+	return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long m, x, b;
+	u32 tsfs;
+
+	tsfs = I915_READ(TSFS);
+
+	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+	x = I915_READ8(TR1);
+
+	b = tsfs & TSFS_INTR_MASK;
+
+	return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+	static const struct v_table {
+		u16 vd; /* in .1 mil */
+		u16 vm; /* in .1 mil */
+	} v_table[] = {
+		{ 0, 0, },
+		{ 375, 0, },
+		{ 500, 0, },
+		{ 625, 0, },
+		{ 750, 0, },
+		{ 875, 0, },
+		{ 1000, 0, },
+		{ 1125, 0, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4250, 3125, },
+		{ 4375, 3250, },
+		{ 4500, 3375, },
+		{ 4625, 3500, },
+		{ 4750, 3625, },
+		{ 4875, 3750, },
+		{ 5000, 3875, },
+		{ 5125, 4000, },
+		{ 5250, 4125, },
+		{ 5375, 4250, },
+		{ 5500, 4375, },
+		{ 5625, 4500, },
+		{ 5750, 4625, },
+		{ 5875, 4750, },
+		{ 6000, 4875, },
+		{ 6125, 5000, },
+		{ 6250, 5125, },
+		{ 6375, 5250, },
+		{ 6500, 5375, },
+		{ 6625, 5500, },
+		{ 6750, 5625, },
+		{ 6875, 5750, },
+		{ 7000, 5875, },
+		{ 7125, 6000, },
+		{ 7250, 6125, },
+		{ 7375, 6250, },
+		{ 7500, 6375, },
+		{ 7625, 6500, },
+		{ 7750, 6625, },
+		{ 7875, 6750, },
+		{ 8000, 6875, },
+		{ 8125, 7000, },
+		{ 8250, 7125, },
+		{ 8375, 7250, },
+		{ 8500, 7375, },
+		{ 8625, 7500, },
+		{ 8750, 7625, },
+		{ 8875, 7750, },
+		{ 9000, 7875, },
+		{ 9125, 8000, },
+		{ 9250, 8125, },
+		{ 9375, 8250, },
+		{ 9500, 8375, },
+		{ 9625, 8500, },
+		{ 9750, 8625, },
+		{ 9875, 8750, },
+		{ 10000, 8875, },
+		{ 10125, 9000, },
+		{ 10250, 9125, },
+		{ 10375, 9250, },
+		{ 10500, 9375, },
+		{ 10625, 9500, },
+		{ 10750, 9625, },
+		{ 10875, 9750, },
+		{ 11000, 9875, },
+		{ 11125, 10000, },
+		{ 11250, 10125, },
+		{ 11375, 10250, },
+		{ 11500, 10375, },
+		{ 11625, 10500, },
+		{ 11750, 10625, },
+		{ 11875, 10750, },
+		{ 12000, 10875, },
+		{ 12125, 11000, },
+		{ 12250, 11125, },
+		{ 12375, 11250, },
+		{ 12500, 11375, },
+		{ 12625, 11500, },
+		{ 12750, 11625, },
+		{ 12875, 11750, },
+		{ 13000, 11875, },
+		{ 13125, 12000, },
+		{ 13250, 12125, },
+		{ 13375, 12250, },
+		{ 13500, 12375, },
+		{ 13625, 12500, },
+		{ 13750, 12625, },
+		{ 13875, 12750, },
+		{ 14000, 12875, },
+		{ 14125, 13000, },
+		{ 14250, 13125, },
+		{ 14375, 13250, },
+		{ 14500, 13375, },
+		{ 14625, 13500, },
+		{ 14750, 13625, },
+		{ 14875, 13750, },
+		{ 15000, 13875, },
+		{ 15125, 14000, },
+		{ 15250, 14125, },
+		{ 15375, 14250, },
+		{ 15500, 14375, },
+		{ 15625, 14500, },
+		{ 15750, 14625, },
+		{ 15875, 14750, },
+		{ 16000, 14875, },
+		{ 16125, 15000, },
+	};
+	if (dev_priv->info->is_mobile)
+		return v_table[pxvid].vm;
+	else
+		return v_table[pxvid].vd;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	struct timespec now, diff1;
+	u64 diff;
+	unsigned long diffms;
+	u32 count;
+
+	if (dev_priv->info->gen != 5)
+		return;
+
+	getrawmonotonic(&now);
+	diff1 = timespec_sub(now, dev_priv->last_time2);
+
+	/* Don't divide by 0 */
+	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+	if (!diffms)
+		return;
+
+	count = I915_READ(GFXEC);
+
+	if (count < dev_priv->last_count2) {
+		diff = ~0UL - dev_priv->last_count2;
+		diff += count;
+	} else {
+		diff = count - dev_priv->last_count2;
+	}
+
+	dev_priv->last_count2 = count;
+	dev_priv->last_time2 = now;
+
+	/* More magic constants... */
+	diff = diff * 1181;
+	diff = div_u64(diff, diffms * 10);
+	dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long t, corr, state1, corr2, state2;
+	u32 pxvid, ext_v;
+
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+	pxvid = (pxvid >> 24) & 0x7f;
+	ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+	state1 = ext_v;
+
+	t = i915_mch_val(dev_priv);
+
+	/* Revel in the empirically derived constants */
+
+	/* Correction factor in 1/100000 units */
+	if (t > 80)
+		corr = ((t * 2349) + 135940);
+	else if (t >= 50)
+		corr = ((t * 964) + 29317);
+	else /* < 50 */
+		corr = ((t * 301) + 1004);
+
+	corr = corr * ((150142 * state1) / 10000 - 78642);
+	corr /= 100000;
+	corr2 = (corr * dev_priv->corr);
+
+	state2 = (corr2 * state1) / 10000;
+	state2 /= 100; /* convert to mW */
+
+	i915_update_gfx_val(dev_priv);
+
+	return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ *   - i915_mch_dev
+ *   - dev_priv->max_delay
+ *   - dev_priv->min_delay
+ *   - dev_priv->fmax
+ *   - dev_priv->gpu_busy
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+	struct drm_i915_private *dev_priv;
+	unsigned long chipset_val, graphics_val, ret = 0;
+
+	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	chipset_val = i915_chipset_val(dev_priv);
+	graphics_val = i915_gfx_val(dev_priv);
+
+	ret = chipset_val + graphics_val;
+
+out_unlock:
+	spin_unlock(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->max_delay > dev_priv->fmax)
+		dev_priv->max_delay--;
+
+out_unlock:
+	spin_unlock(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->max_delay < dev_priv->min_delay)
+		dev_priv->max_delay++;
+
+out_unlock:
+	spin_unlock(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = false;
+
+	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	ret = dev_priv->busy;
+
+out_unlock:
+	spin_unlock(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	dev_priv->max_delay = dev_priv->fstart;
+
+	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+		ret = false;
+
+out_unlock:
+	spin_unlock(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+	void (*link)(void);
+
+	link = symbol_get(ips_link_to_i915_driver);
+	if (link) {
+		link();
+		symbol_put(ips_link_to_i915_driver);
+	}
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+	spin_lock(&mchdev_lock);
+	i915_mch_dev = dev_priv;
+	dev_priv->mchdev_lock = &mchdev_lock;
+	spin_unlock(&mchdev_lock);
+
+	ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+	spin_lock(&mchdev_lock);
+	i915_mch_dev = NULL;
+	spin_unlock(&mchdev_lock);
+}
+
 void intel_init_emon(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2663,9 +3288,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
 	I915_WRITE(WM2_LP_ILK, 0);
 	I915_WRITE(WM1_LP_ILK, 0);
 
-	/* clear masked bit */
 	I915_WRITE(CACHE_MODE_0,
-		   CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 
 	I915_WRITE(GEN6_UCGCTL1,
 		   I915_READ(GEN6_UCGCTL1) |
@@ -2776,6 +3400,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 	}
 
 	gen7_setup_fixed_func_scheduler(dev_priv);
+
+	/* WaDisable4x2SubspanOptimization */
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 }
 
 static void valleyview_init_clock_gating(struct drm_device *dev)
@@ -2821,9 +3449,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
 		intel_flush_display_plane(dev_priv, pipe);
 	}
 
-	I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) |
-		   (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) |
-		   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 }
 
 static void g4x_init_clock_gating(struct drm_device *dev)
@@ -2875,6 +3502,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
 		DSTATE_DOT_CLOCK_GATING;
 	I915_WRITE(D_STATE, dstate);
+
+	if (IS_PINEVIEW(dev))
+		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
 }
 
 static void i85x_init_clock_gating(struct drm_device *dev)
@@ -2931,6 +3561,72 @@ void intel_init_clock_gating(struct drm_device *dev)
 		dev_priv->display.init_pch_clock_gating(dev);
 }
 
+static void gen6_sanitize_pm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 limits, delay, old;
+
+	gen6_gt_force_wake_get(dev_priv);
+
+	old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
+	/* Make sure we continue to get interrupts
+	 * until we hit the minimum or maximum frequencies.
+	 */
+	limits &= ~(0x3f << 16 | 0x3f << 24);
+	delay = dev_priv->cur_delay;
+	if (delay < dev_priv->max_delay)
+		limits |= (dev_priv->max_delay & 0x3f) << 24;
+	if (delay > dev_priv->min_delay)
+		limits |= (dev_priv->min_delay & 0x3f) << 16;
+
+	if (old != limits) {
+		DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
+			  limits, old);
+		I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+	}
+
+	gen6_gt_force_wake_put(dev_priv);
+}
+
+void intel_sanitize_pm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.sanitize_pm)
+		dev_priv->display.sanitize_pm(dev);
+}
+
+/* Starting with Haswell, we have different power wells for
+ * different parts of the GPU. This attempts to enable them all.
+ */
+void intel_init_power_wells(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long power_wells[] = {
+		HSW_PWR_WELL_CTL1,
+		HSW_PWR_WELL_CTL2,
+		HSW_PWR_WELL_CTL4
+	};
+	int i;
+
+	if (!IS_HASWELL(dev))
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+
+	for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
+		int well = I915_READ(power_wells[i]);
+
+		if ((well & HSW_PWR_WELL_STATE) == 0) {
+			I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
+			if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+				DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+		}
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+}
+
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_device *dev)
 {
@@ -2953,6 +3649,12 @@ void intel_init_pm(struct drm_device *dev)
 		/* 855GM needs testing */
 	}
 
+	/* For cxsr */
+	if (IS_PINEVIEW(dev))
+		i915_pineview_get_mem_freq(dev);
+	else if (IS_GEN5(dev))
+		i915_ironlake_get_mem_freq(dev);
+
 	/* For FIFO watermark updates */
 	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
@@ -3007,6 +3709,7 @@ void intel_init_pm(struct drm_device *dev)
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else if (IS_IVYBRIDGE(dev)) {
 			/* FIXME: detect B0+ stepping and use auto training */
 			if (SNB_READ_WM0_LATENCY()) {
@@ -3018,6 +3721,19 @@ void intel_init_pm(struct drm_device *dev)
 				dev_priv->display.update_wm = NULL;
 			}
 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+		} else if (IS_HASWELL(dev)) {
+			if (SNB_READ_WM0_LATENCY()) {
+				dev_priv->display.update_wm = sandybridge_update_wm;
+				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+				dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+			} else {
+				DRM_DEBUG_KMS("Failed to read display plane latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
+			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+			dev_priv->display.sanitize_pm = gen6_sanitize_pm;
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -3071,5 +3787,10 @@ void intel_init_pm(struct drm_device *dev)
 		else
 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
 	}
+
+	/* We attempt to init the necessary power wells early in the initialization
+	 * time, so the subsystems that expect power to be enabled can work.
+	 */
+	intel_init_power_wells(dev);
 }
 

+ 102 - 68
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -401,12 +401,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 	int ret = init_ring_common(ring);
 
 	if (INTEL_INFO(dev)->gen > 3) {
-		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
-		I915_WRITE(MI_MODE, mode);
+		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 		if (IS_GEN7(dev))
 			I915_WRITE(GFX_MODE_GEN7,
-				   GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
-				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 	}
 
 	if (INTEL_INFO(dev)->gen >= 5) {
@@ -415,19 +414,19 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 			return ret;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6) {
-		I915_WRITE(INSTPM,
-			   INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
-
+	if (IS_GEN6(dev)) {
 		/* From the Sandybridge PRM, volume 1 part 3, page 24:
 		 * "If this bit is set, STCunit will have LRA as replacement
 		 *  policy. [...] This bit must be reset.  LRA replacement
 		 *  policy is not supported."
 		 */
 		I915_WRITE(CACHE_MODE_0,
-			   CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 	}
 
+	if (INTEL_INFO(dev)->gen >= 6)
+		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
 	return ret;
 }
 
@@ -621,17 +620,18 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
 
 	if (!dev->irq_enabled)
 		return false;
 
-	spin_lock(&ring->irq_lock);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
 	}
-	spin_unlock(&ring->irq_lock);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
 }
@@ -641,14 +641,15 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
 
-	spin_lock(&ring->irq_lock);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
 		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
 	}
-	spin_unlock(&ring->irq_lock);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static bool
@@ -656,17 +657,18 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
 
 	if (!dev->irq_enabled)
 		return false;
 
-	spin_lock(&ring->irq_lock);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
 		dev_priv->irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
 	}
-	spin_unlock(&ring->irq_lock);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
 }
@@ -676,14 +678,52 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
 
-	spin_lock(&ring->irq_lock);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
 		dev_priv->irq_mask |= ring->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
 	}
-	spin_unlock(&ring->irq_lock);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->irq_mask |= ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -762,6 +802,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
 
 	if (!dev->irq_enabled)
 	       return false;
@@ -771,14 +812,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
 	 * blt/bsd rings on ivb. */
 	gen6_gt_force_wake_get(dev_priv);
 
-	spin_lock(&ring->irq_lock);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
 		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
 	}
-	spin_unlock(&ring->irq_lock);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	return true;
 }
@@ -788,15 +829,16 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
 
-	spin_lock(&ring->irq_lock);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
 		I915_WRITE_IMR(ring, ~0);
 		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
 	}
-	spin_unlock(&ring->irq_lock);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	gen6_gt_force_wake_put(dev_priv);
 }
@@ -858,7 +900,6 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
 
 static void cleanup_status_page(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
 	obj = ring->status_page.obj;
@@ -869,14 +910,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
 	i915_gem_object_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 	ring->status_page.obj = NULL;
-
-	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }
 
 static int init_status_page(struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = ring->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	int ret;
 
@@ -897,7 +935,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
 	ring->status_page.gfx_addr = obj->gtt_offset;
 	ring->status_page.page_addr = kmap(obj->pages[0]);
 	if (ring->status_page.page_addr == NULL) {
-		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 		goto err_unpin;
 	}
 	ring->status_page.obj = obj;
@@ -930,7 +967,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	ring->size = 32 * PAGE_SIZE;
 
 	init_waitqueue_head(&ring->irq_queue);
-	spin_lock_init(&ring->irq_lock);
 
 	if (I915_NEED_GFX_HWS(dev)) {
 		ret = init_status_page(ring);
@@ -951,20 +987,14 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	if (ret)
 		goto err_unref;
 
-	ring->map.size = ring->size;
-	ring->map.offset = dev->agp->base + obj->gtt_offset;
-	ring->map.type = 0;
-	ring->map.flags = 0;
-	ring->map.mtrr = 0;
-
-	drm_core_ioremap_wc(&ring->map, dev);
-	if (ring->map.handle == NULL) {
+	ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
+					 ring->size);
+	if (ring->virtual_start == NULL) {
 		DRM_ERROR("Failed to map ringbuffer.\n");
 		ret = -EINVAL;
 		goto err_unpin;
 	}
 
-	ring->virtual_start = ring->map.handle;
 	ret = ring->init(ring);
 	if (ret)
 		goto err_unmap;
@@ -980,7 +1010,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	return 0;
 
 err_unmap:
-	drm_core_ioremapfree(&ring->map, dev);
+	iounmap(ring->virtual_start);
 err_unpin:
 	i915_gem_object_unpin(obj);
 err_unref:
@@ -1008,7 +1038,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
 	I915_WRITE_CTL(ring, 0);
 
-	drm_core_ioremapfree(&ring->map, ring->dev);
+	iounmap(ring->virtual_start);
 
 	i915_gem_object_unpin(ring->obj);
 	drm_gem_object_unreference(&ring->obj->base);
@@ -1022,7 +1052,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 {
-	unsigned int *virt;
+	uint32_t __iomem *virt;
 	int rem = ring->size - ring->tail;
 
 	if (ring->space < rem) {
@@ -1031,12 +1061,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 			return ret;
 	}
 
-	virt = (unsigned int *)(ring->virtual_start + ring->tail);
-	rem /= 8;
-	while (rem--) {
-		*virt++ = MI_NOOP;
-		*virt++ = MI_NOOP;
-	}
+	virt = ring->virtual_start + ring->tail;
+	rem /= 4;
+	while (rem--)
+		iowrite32(MI_NOOP, virt++);
 
 	ring->tail = 0;
 	ring->space = ring_space(ring);
@@ -1057,9 +1085,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
 	was_interruptible = dev_priv->mm.interruptible;
 	dev_priv->mm.interruptible = false;
 
-	ret = i915_wait_request(ring, seqno, true);
+	ret = i915_wait_request(ring, seqno);
 
 	dev_priv->mm.interruptible = was_interruptible;
+	if (!ret)
+		i915_gem_retire_requests_ring(ring);
 
 	return ret;
 }
@@ -1133,15 +1163,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 		return ret;
 
 	trace_i915_ring_wait_begin(ring);
-	if (drm_core_check_feature(dev, DRIVER_GEM))
-		/* With GEM the hangcheck timer should kick us out of the loop,
-		 * leaving it early runs the risk of corrupting GEM state (due
-		 * to running on almost untested codepaths). But on resume
-		 * timers don't work yet, so prevent a complete hang in that
-		 * case by choosing an insanely large timeout. */
-		end = jiffies + 60 * HZ;
-	else
-		end = jiffies + 3 * HZ;
+	/* With GEM the hangcheck timer should kick us out of the loop,
+	 * leaving it early runs the risk of corrupting GEM state (due
+	 * to running on almost untested codepaths). But on resume
+	 * timers don't work yet, so prevent a complete hang in that
+	 * case by choosing an insanely large timeout. */
+	end = jiffies + 60 * HZ;
 
 	do {
 		ring->head = I915_READ_HEAD(ring);
@@ -1193,7 +1220,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
 
 void intel_ring_advance(struct intel_ring_buffer *ring)
 {
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
 	ring->tail &= ring->size - 1;
+	if (dev_priv->stop_rings & intel_ring_flag(ring))
+		return;
 	ring->write_tail(ring, ring->tail);
 }
 
@@ -1318,8 +1349,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 		else
 			ring->flush = gen4_render_ring_flush;
 		ring->get_seqno = ring_get_seqno;
-		ring->irq_get = i9xx_ring_get_irq;
-		ring->irq_put = i9xx_ring_put_irq;
+		if (IS_GEN2(dev)) {
+			ring->irq_get = i8xx_ring_get_irq;
+			ring->irq_put = i8xx_ring_put_irq;
+		} else {
+			ring->irq_get = i9xx_ring_get_irq;
+			ring->irq_put = i9xx_ring_put_irq;
+		}
 		ring->irq_enable_mask = I915_USER_INTERRUPT;
 	}
 	ring->write_tail = ring_write_tail;
@@ -1366,8 +1402,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	else
 		ring->flush = gen4_render_ring_flush;
 	ring->get_seqno = ring_get_seqno;
-	ring->irq_get = i9xx_ring_get_irq;
-	ring->irq_put = i9xx_ring_put_irq;
+	if (IS_GEN2(dev)) {
+		ring->irq_get = i8xx_ring_get_irq;
+		ring->irq_put = i8xx_ring_put_irq;
+	} else {
+		ring->irq_get = i9xx_ring_get_irq;
+		ring->irq_put = i9xx_ring_put_irq;
+	}
 	ring->irq_enable_mask = I915_USER_INTERRUPT;
 	ring->write_tail = ring_write_tail;
 	if (INTEL_INFO(dev)->gen >= 4)
@@ -1392,20 +1433,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 	if (IS_I830(ring->dev))
 		ring->effective_size -= 128;
 
-	ring->map.offset = start;
-	ring->map.size = size;
-	ring->map.type = 0;
-	ring->map.flags = 0;
-	ring->map.mtrr = 0;
-
-	drm_core_ioremap_wc(&ring->map, dev);
-	if (ring->map.handle == NULL) {
+	ring->virtual_start = ioremap_wc(start, size);
+	if (ring->virtual_start == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
 		return -ENOMEM;
 	}
 
-	ring->virtual_start = (void __force __iomem *)ring->map.handle;
 	return 0;
 }
 

+ 11 - 10
drivers/gpu/drm/i915/intel_ringbuffer.h

@@ -2,7 +2,7 @@
 #define _INTEL_RINGBUFFER_H_
 
 struct  intel_hw_status_page {
-	u32	__iomem	*page_addr;
+	u32		*page_addr;
 	unsigned int	gfx_addr;
 	struct		drm_i915_gem_object *obj;
 };
@@ -56,12 +56,9 @@ struct  intel_ring_buffer {
 	 */
 	u32		last_retired_head;
 
-	spinlock_t	irq_lock;
-	u32		irq_refcount;
+	u32		irq_refcount;		/* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
-	u32		irq_seqno;		/* last seq seem at irq time */
 	u32		trace_irq_seqno;
-	u32		waiting_seqno;
 	u32		sync_seqno[I915_NUM_RINGS-1];
 	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
 	void		(*irq_put)(struct intel_ring_buffer *ring);
@@ -118,11 +115,16 @@ struct  intel_ring_buffer {
 	u32 outstanding_lazy_request;
 
 	wait_queue_head_t irq_queue;
-	drm_local_map_t map;
 
 	void *private;
 };
 
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+	return ring->obj != NULL;
+}
+
 static inline unsigned
 intel_ring_flag(struct intel_ring_buffer *ring)
 {
@@ -152,7 +154,9 @@ static inline u32
 intel_read_status_page(struct intel_ring_buffer *ring,
 		       int reg)
 {
-	return ioread32(ring->status_page.page_addr + reg);
+	/* Ensure that the compiler doesn't optimize away the load. */
+	barrier();
+	return ring->status_page.page_addr[reg];
 }
 
 /**
@@ -170,10 +174,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
  *
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX		0x20
-#define I915_BREADCRUMB_INDEX		0x21
 
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 

+ 17 - 13
drivers/gpu/drm/i915/intel_sdvo.c

@@ -747,18 +747,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
 	uint16_t h_sync_offset, v_sync_offset;
 	int mode_clock;
 
-	width = mode->crtc_hdisplay;
-	height = mode->crtc_vdisplay;
+	width = mode->hdisplay;
+	height = mode->vdisplay;
 
 	/* do some mode translations */
-	h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
-	h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+	h_blank_len = mode->htotal - mode->hdisplay;
+	h_sync_len = mode->hsync_end - mode->hsync_start;
 
-	v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
-	v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	v_blank_len = mode->vtotal - mode->vdisplay;
+	v_sync_len = mode->vsync_end - mode->vsync_start;
 
-	h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
-	v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+	h_sync_offset = mode->hsync_start - mode->hdisplay;
+	v_sync_offset = mode->vsync_start - mode->vdisplay;
 
 	mode_clock = mode->clock;
 	mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -887,17 +887,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
 	};
 	uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
 	uint8_t set_buf_index[2] = { 1, 0 };
-	uint64_t *data = (uint64_t *)&avi_if;
+	uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+	uint64_t *data = (uint64_t *)sdvo_data;
 	unsigned i;
 
 	intel_dip_infoframe_csum(&avi_if);
 
+	/* sdvo spec says that the ecc is handled by the hw, and it looks like
+	 * we must not send the ecc field, either. */
+	memcpy(sdvo_data, &avi_if, 3);
+	sdvo_data[3] = avi_if.checksum;
+	memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
 	if (!intel_sdvo_set_value(intel_sdvo,
 				  SDVO_CMD_SET_HBUF_INDEX,
 				  set_buf_index, 2))
 		return false;
 
-	for (i = 0; i < sizeof(avi_if); i += 8) {
+	for (i = 0; i < sizeof(sdvo_data); i += 8) {
 		if (!intel_sdvo_set_value(intel_sdvo,
 					  SDVO_CMD_SET_HBUF_DATA,
 					  data, 8))
@@ -1578,9 +1585,6 @@ end:
 			intel_sdvo->sdvo_lvds_fixed_mode =
 				drm_mode_duplicate(connector->dev, newmode);
 
-			drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
-					      0);
-
 			intel_sdvo->is_lvds = true;
 			break;
 		}

+ 17 - 12
drivers/gpu/drm/i915/intel_sprite.c

@@ -110,14 +110,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 	 * when scaling is disabled.
 	 */
 	if (crtc_w != src_w || crtc_h != src_h) {
-		dev_priv->sprite_scaling_enabled = true;
-		intel_update_watermarks(dev);
-		intel_wait_for_vblank(dev, pipe);
+		if (!dev_priv->sprite_scaling_enabled) {
+			dev_priv->sprite_scaling_enabled = true;
+			intel_update_watermarks(dev);
+			intel_wait_for_vblank(dev, pipe);
+		}
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 	} else {
-		dev_priv->sprite_scaling_enabled = false;
-		/* potentially re-enable LP watermarks */
-		intel_update_watermarks(dev);
+		if (dev_priv->sprite_scaling_enabled) {
+			dev_priv->sprite_scaling_enabled = false;
+			/* potentially re-enable LP watermarks */
+			intel_update_watermarks(dev);
+		}
 	}
 
 	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -151,6 +155,9 @@ ivb_disable_plane(struct drm_plane *plane)
 	/* Activate double buffered register update */
 	I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
 	POSTING_READ(SPRSURF(pipe));
+
+	dev_priv->sprite_scaling_enabled = false;
+	intel_update_watermarks(dev);
 }
 
 static int
@@ -551,14 +558,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
 	struct drm_intel_sprite_colorkey *set = data;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_mode_object *obj;
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	int ret = 0;
 
-	if (!dev_priv)
-		return -EINVAL;
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
 
 	/* Make sure we don't try to enable both src & dest simultaneously */
 	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -585,14 +591,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
 	struct drm_intel_sprite_colorkey *get = data;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_mode_object *obj;
 	struct drm_plane *plane;
 	struct intel_plane *intel_plane;
 	int ret = 0;
 
-	if (!dev_priv)
-		return -EINVAL;
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
 
 	mutex_lock(&dev->mode_config.mutex);
 

+ 1 - 4
drivers/gpu/drm/i915/intel_tv.c

@@ -1249,11 +1249,8 @@ intel_tv_detect(struct drm_connector *connector, bool force)
 	int type;
 
 	mode = reported_modes[0];
-	drm_mode_set_crtcinfo(&mode, 0);
 
-	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
-		type = intel_tv_detect_type(intel_tv, connector);
-	} else if (force) {
+	if (force) {
 		struct intel_load_detect_pipe tmp;
 
 		if (intel_get_load_detect_pipe(&intel_tv->base, connector,

+ 15 - 0
drivers/gpu/drm/mgag200/Kconfig

@@ -0,0 +1,15 @@
+config DRM_MGAG200
+	tristate "Kernel modesetting driver for MGA G200 server engines"
+	depends on DRM && PCI && EXPERIMENTAL
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	help
+	 This is a KMS driver for the MGA G200 server chips, it
+         does not support the original MGA G200 or any of the desktop
+         chips. It requires 0.3.0 of the modesetting userspace driver,
+         and a version of mga driver that will fail on KMS enabled
+         devices.
+

+ 5 - 0
drivers/gpu/drm/mgag200/Makefile

@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+mgag200-y   := mgag200_main.o mgag200_mode.o \
+	mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+
+obj-$(CONFIG_DRM_MGAG200) += mgag200.o

+ 116 - 0
drivers/gpu/drm/mgag200/mgag200_drv.c

@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+#include "drm_pciids.h"
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+int mgag200_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, mgag200_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+	{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+	{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+	{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
+	{ PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
+	{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void mga_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static const struct file_operations mgag200_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = mgag200_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+	.load = mgag200_driver_load,
+	.unload = mgag200_driver_unload,
+	.fops = &mgag200_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+
+	.gem_init_object = mgag200_gem_init_object,
+	.gem_free_object = mgag200_gem_free_object,
+	.dumb_create = mgag200_dumb_create,
+	.dumb_map_offset = mgag200_dumb_mmap_offset,
+	.dumb_destroy = mgag200_dumb_destroy,
+};
+
+static struct pci_driver mgag200_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = mga_pci_probe,
+	.remove = mga_pci_remove,
+};
+
+static int __init mgag200_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && mgag200_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (mgag200_modeset == 0)
+		return -EINVAL;
+	return drm_pci_init(&driver, &mgag200_pci_driver);
+}
+
+static void __exit mgag200_exit(void)
+{
+	drm_pci_exit(&driver, &mgag200_pci_driver);
+}
+
+module_init(mgag200_init);
+module_exit(mgag200_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");

+ 276 - 0
drivers/gpu/drm/mgag200/mgag200_drv.h

@@ -0,0 +1,276 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat 
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * 	    Matt Turner
+ *	    Dave Airlie
+ */
+#ifndef __MGAG200_DRV_H__
+#define __MGAG200_DRV_H__
+
+#include <video/vga.h>
+
+#include "drm/drm_fb_helper.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "mgag200_reg.h"
+
+#define DRIVER_AUTHOR		"Matthew Garrett"
+
+#define DRIVER_NAME		"mgag200"
+#define DRIVER_DESC		"MGA G200 SE"
+#define DRIVER_DATE		"20110418"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+#define MGAG200FB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+
+#define ATTR_INDEX 0x1fc0
+#define ATTR_DATA 0x1fc1
+
+#define WREG_ATTR(reg, v)					\
+	do {							\
+		RREG8(0x1fda);					\
+		WREG8(ATTR_INDEX, reg);				\
+		WREG8(ATTR_DATA, v);				\
+	} while (0)						\
+
+#define WREG_SEQ(reg, v)					\
+	do {							\
+		WREG8(MGAREG_SEQ_INDEX, reg);			\
+		WREG8(MGAREG_SEQ_DATA, v);			\
+	} while (0)						\
+
+#define WREG_CRT(reg, v)					\
+	do {							\
+		WREG8(MGAREG_CRTC_INDEX, reg);			\
+		WREG8(MGAREG_CRTC_DATA, v);			\
+	} while (0)						\
+
+
+#define WREG_ECRT(reg, v)					\
+	do {							\
+		WREG8(MGAREG_CRTCEXT_INDEX, reg);				\
+		WREG8(MGAREG_CRTCEXT_DATA, v);				\
+	} while (0)						\
+
+#define GFX_INDEX 0x1fce
+#define GFX_DATA 0x1fcf
+
+#define WREG_GFX(reg, v)					\
+	do {							\
+		WREG8(GFX_INDEX, reg);				\
+		WREG8(GFX_DATA, v);				\
+	} while (0)						\
+
+#define DAC_INDEX 0x3c00
+#define DAC_DATA 0x3c0a
+
+#define WREG_DAC(reg, v)					\
+	do {							\
+		WREG8(DAC_INDEX, reg);				\
+		WREG8(DAC_DATA, v);				\
+	} while (0)						\
+
+#define MGA_MISC_OUT 0x1fc2
+#define MGA_MISC_IN 0x1fcc
+
+#define MGAG200_MAX_FB_HEIGHT 4096
+#define MGAG200_MAX_FB_WIDTH 4096
+
+#define MATROX_DPMS_CLEARED (-1)
+
+#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
+#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
+#define to_mga_connector(x) container_of(x, struct mga_connector, base)
+#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
+
+struct mga_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+struct mga_fbdev {
+	struct drm_fb_helper helper;
+	struct mga_framebuffer mfb;
+	struct list_head fbdev_list;
+	void *sysram;
+	int size;
+	struct ttm_bo_kmap_obj mapping;
+};
+
+struct mga_crtc {
+	struct drm_crtc base;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	int last_dpms;
+	bool enabled;
+};
+
+struct mga_mode_info {
+	bool mode_config_initialized;
+	struct mga_crtc *crtc;
+};
+
+struct mga_encoder {
+	struct drm_encoder base;
+	int last_dpms;
+};
+
+
+struct mga_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
+	int data, clock;
+};
+
+struct mga_connector {
+	struct drm_connector base;
+	struct mga_i2c_chan *i2c;
+};
+
+
+struct mga_mc {
+	resource_size_t			vram_size;
+	resource_size_t			vram_base;
+	resource_size_t			vram_window;
+};
+
+enum mga_type {
+	G200_SE_A,
+	G200_SE_B,
+	G200_WB,
+	G200_EV,
+	G200_EH,
+	G200_ER,
+};
+
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+
+struct mga_device {
+	struct drm_device		*dev;
+	unsigned long			flags;
+
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	void __iomem			*rmmio;
+
+	drm_local_map_t			*framebuffer;
+
+	struct mga_mc			mc;
+	struct mga_mode_info		mode_info;
+
+	struct mga_fbdev *mfbdev;
+
+	bool				suspended;
+	int				num_crtc;
+	enum mga_type			type;
+	int				has_sdram;
+	struct drm_display_mode		mode;
+
+	int bpp_shifts[4];
+
+	int fb_mtrr;
+
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+		atomic_t validate_sequence;
+	} ttm;
+
+	u32 reg_1e24; /* SE model number */
+};
+
+
+struct mgag200_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	u32 placements[3];
+	int pin_count;
+};
+#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
+
+static inline struct mgag200_bo *
+mgag200_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct mgag200_bo, bo);
+}
+				/* mga_crtc.c */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			     u16 blue, int regno);
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			     u16 *blue, int regno);
+
+				/* mgag200_mode.c */
+int mgag200_modeset_init(struct mga_device *mdev);
+void mgag200_modeset_fini(struct mga_device *mdev);
+
+				/* mga_fbdev.c */
+int mgag200_fbdev_init(struct mga_device *mdev);
+void mgag200_fbdev_fini(struct mga_device *mdev);
+
+				/* mgag200_main.c */
+int mgag200_framebuffer_init(struct drm_device *dev,
+			     struct mga_framebuffer *mfb,
+			     struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj);
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
+int mgag200_driver_unload(struct drm_device *dev);
+int mgag200_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		       struct drm_gem_object **obj);
+int mgag200_gem_init_object(struct drm_gem_object *obj);
+int mgag200_dumb_create(struct drm_file *file,
+			struct drm_device *dev,
+			struct drm_mode_create_dumb *args);
+int mgag200_dumb_destroy(struct drm_file *file,
+			 struct drm_device *dev,
+			 uint32_t handle);
+void mgag200_gem_free_object(struct drm_gem_object *obj);
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+			 struct drm_device *dev,
+			 uint32_t handle,
+			 uint64_t *offset);
+				/* mga_i2c.c */
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait);
+void mgag200_bo_unreserve(struct mgag200_bo *bo);
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+		      uint32_t flags, struct mgag200_bo **pastbo);
+int mgag200_mm_init(struct mga_device *mdev);
+void mgag200_mm_fini(struct mga_device *mdev);
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int mgag200_bo_unpin(struct mgag200_bo *bo);
+int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+#endif				/* __MGAG200_DRV_H__ */

+ 294 - 0
drivers/gpu/drm/mgag200/mgag200_fb.c

@@ -0,0 +1,294 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Matt Turner
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "mgag200_drv.h"
+
+static void mga_dirty_update(struct mga_fbdev *mfbdev,
+			     int x, int y, int width, int height)
+{
+	int i;
+	struct drm_gem_object *obj;
+	struct mgag200_bo *bo;
+	int src_offset, dst_offset;
+	int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+	int ret;
+	bool unmap = false;
+
+	obj = mfbdev->mfb.obj;
+	bo = gem_to_mga_bo(obj);
+
+	ret = mgag200_bo_reserve(bo, true);
+	if (ret) {
+		DRM_ERROR("failed to reserve fb bo\n");
+		return;
+	}
+
+	if (!bo->kmap.virtual) {
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret) {
+			DRM_ERROR("failed to kmap fb updates\n");
+			mgag200_bo_unreserve(bo);
+			return;
+		}
+		unmap = true;
+	}
+	for (i = y; i < y + height; i++) {
+		/* assume equal stride for now */
+		src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+		memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+
+	}
+	if (unmap)
+		ttm_bo_kunmap(&bo->kmap);
+
+	mgag200_bo_unreserve(bo);
+}
+
+static void mga_fillrect(struct fb_info *info,
+			 const struct fb_fillrect *rect)
+{
+	struct mga_fbdev *mfbdev = info->par;
+	sys_fillrect(info, rect);
+	mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
+			 rect->height);
+}
+
+static void mga_copyarea(struct fb_info *info,
+			 const struct fb_copyarea *area)
+{
+	struct mga_fbdev *mfbdev = info->par;
+	sys_copyarea(info, area);
+	mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
+			 area->height);
+}
+
+static void mga_imageblit(struct fb_info *info,
+			  const struct fb_image *image)
+{
+	struct mga_fbdev *mfbdev = info->par;
+	sys_imageblit(info, image);
+	mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
+			 image->height);
+}
+
+
+static struct fb_ops mgag200fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = mga_fillrect,
+	.fb_copyarea = mga_copyarea,
+	.fb_imageblit = mga_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int mgag200fb_create_object(struct mga_fbdev *afbdev,
+				   struct drm_mode_fb_cmd2 *mode_cmd,
+				   struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = afbdev->helper.dev;
+	u32 bpp, depth;
+	u32 size;
+	struct drm_gem_object *gobj;
+
+	int ret = 0;
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	ret = mgag200_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+	return ret;
+}
+
+static int mgag200fb_create(struct mga_fbdev *mfbdev,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = mfbdev->helper.dev;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct mga_device *mdev = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_gem_object *gobj = NULL;
+	struct device *device = &dev->pdev->dev;
+	struct mgag200_bo *bo;
+	int ret;
+	void *sysram;
+	int size;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+	bo = gem_to_mga_bo(gobj);
+
+	sysram = vmalloc(size);
+	if (!sysram)
+		return -ENOMEM;
+
+	info = framebuffer_alloc(0, device);
+	if (info == NULL)
+		return -ENOMEM;
+
+	info->par = mfbdev;
+
+	ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
+	if (ret)
+		return ret;
+
+	mfbdev->sysram = sysram;
+	mfbdev->size = size;
+
+	fb = &mfbdev->mfb.base;
+
+	/* setup helper */
+	mfbdev->helper.fb = fb;
+	mfbdev->helper.fbdev = info;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	strcpy(info->fix.id, "mgadrmfb");
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &mgag200fb_ops;
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
+	info->apertures->ranges[0].size = mdev->mc.vram_size;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	info->screen_base = sysram;
+	info->screen_size = size;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+	DRM_DEBUG_KMS("allocated %dx%d\n",
+		      fb->width, fb->height);
+	return 0;
+out:
+	return ret;
+}
+
+static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
+					   struct drm_fb_helper_surface_size
+					   *sizes)
+{
+	struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	if (!helper->fb) {
+		ret = mgag200fb_create(mfbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
+static int mga_fbdev_destroy(struct drm_device *dev,
+				struct mga_fbdev *mfbdev)
+{
+	struct fb_info *info;
+	struct mga_framebuffer *mfb = &mfbdev->mfb;
+
+	if (mfbdev->helper.fbdev) {
+		info = mfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (mfb->obj) {
+		drm_gem_object_unreference_unlocked(mfb->obj);
+		mfb->obj = NULL;
+	}
+	drm_fb_helper_fini(&mfbdev->helper);
+	vfree(mfbdev->sysram);
+	drm_framebuffer_cleanup(&mfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+	.gamma_set = mga_crtc_fb_gamma_set,
+	.gamma_get = mga_crtc_fb_gamma_get,
+	.fb_probe = mga_fb_find_or_create_single,
+};
+
+int mgag200_fbdev_init(struct mga_device *mdev)
+{
+	struct mga_fbdev *mfbdev;
+	int ret;
+
+	mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+	if (!mfbdev)
+		return -ENOMEM;
+
+	mdev->mfbdev = mfbdev;
+	mfbdev->helper.funcs = &mga_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+	if (ret) {
+		kfree(mfbdev);
+		return ret;
+	}
+	drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+	drm_fb_helper_initial_config(&mfbdev->helper, 32);
+
+	return 0;
+}
+
+void mgag200_fbdev_fini(struct mga_device *mdev)
+{
+	if (!mdev->mfbdev)
+		return;
+
+	mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
+	kfree(mdev->mfbdev);
+	mdev->mfbdev = NULL;
+}

+ 156 - 0
drivers/gpu/drm/mgag200/mgag200_i2c.c

@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+static int mga_i2c_read_gpio(struct mga_device *mdev)
+{
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+	return RREG8(DAC_DATA);
+}
+
+static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
+{
+	int tmp;
+
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+	tmp = (RREG8(DAC_DATA) & mask) | val;
+	WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+	WREG_DAC(MGA1064_GEN_IO_DATA, 0);
+}
+
+static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
+{
+	if (state)
+		state = 0;
+	else
+		state = mask;
+	mga_i2c_set_gpio(mdev, ~mask, state);
+}
+
+static void mga_gpio_setsda(void *data, int state)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	mga_i2c_set(mdev, i2c->data, state);
+}
+
+static void mga_gpio_setscl(void *data, int state)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	mga_i2c_set(mdev, i2c->clock, state);
+}
+
+static int mga_gpio_getsda(void *data)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
+}
+
+static int mga_gpio_getscl(void *data)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+}
+
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+{
+	struct mga_device *mdev = dev->dev_private;
+	struct mga_i2c_chan *i2c;
+	int ret;
+	int data, clock;
+
+	WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
+	WREG_DAC(MGA1064_GEN_IO_CTL, 0);
+
+	switch (mdev->type) {
+	case G200_SE_A:
+	case G200_SE_B:
+	case G200_EV:
+	case G200_WB:
+		data = 1;
+		clock = 2;
+		break;
+	case G200_EH:
+	case G200_ER:
+		data = 2;
+		clock = 1;
+		break;
+	default:
+		data = 2;
+		clock = 8;
+		break;
+	}
+
+	i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
+	if (!i2c)
+		return NULL;
+
+	i2c->data = data;
+	i2c->clock = clock;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
+
+	i2c->adapter.algo_data = &i2c->bit;
+
+	i2c->bit.udelay = 10;
+	i2c->bit.timeout = 2;
+	i2c->bit.data = i2c;
+	i2c->bit.setsda		= mga_gpio_setsda;
+	i2c->bit.setscl		= mga_gpio_setscl;
+	i2c->bit.getsda		= mga_gpio_getsda;
+	i2c->bit.getscl		= mga_gpio_getscl;
+
+	ret = i2c_bit_add_bus(&i2c->adapter);
+	if (ret) {
+		kfree(i2c);
+		i2c = NULL;
+	}
+	return i2c;
+}
+
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	i2c_del_adapter(&i2c->adapter);
+	kfree(i2c);
+}
+

+ 388 - 0
drivers/gpu/drm/mgag200/mgag200_main.c

@@ -0,0 +1,388 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Matt Turner
+ *          Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "mgag200_drv.h"
+
+static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
+	if (mga_fb->obj)
+		drm_gem_object_unreference_unlocked(mga_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						 struct drm_file *file_priv,
+						 unsigned int *handle)
+{
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs mga_fb_funcs = {
+	.destroy = mga_user_framebuffer_destroy,
+	.create_handle = mga_user_framebuffer_create_handle,
+};
+
+int mgag200_framebuffer_init(struct drm_device *dev,
+			     struct mga_framebuffer *gfb,
+			     struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj)
+{
+	int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+	if (ret) {
+		DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+		return ret;
+	}
+	drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+	gfb->obj = obj;
+	return 0;
+}
+
+static struct drm_framebuffer *
+mgag200_user_framebuffer_create(struct drm_device *dev,
+				struct drm_file *filp,
+				struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct mga_framebuffer *mga_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
+	if (!mga_fb) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(obj);
+		kfree(mga_fb);
+		return ERR_PTR(ret);
+	}
+	return &mga_fb->base;
+}
+
+static const struct drm_mode_config_funcs mga_mode_funcs = {
+	.fb_create = mgag200_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void mga_vram_fini(struct mga_device *mdev)
+{
+	pci_iounmap(mdev->dev->pdev, mdev->rmmio);
+	mdev->rmmio = NULL;
+	if (mdev->mc.vram_base)
+		release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
+}
+
+static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
+{
+	int offset;
+	int orig;
+	int test1, test2;
+	int orig1, orig2;
+
+	/* Probe */
+	orig = ioread16(mem);
+	iowrite16(0, mem);
+
+	for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+		orig1 = ioread8(mem + offset);
+		orig2 = ioread8(mem + offset + 0x100);
+
+		iowrite16(0xaa55, mem + offset);
+		iowrite16(0xaa55, mem + offset + 0x100);
+
+		test1 = ioread16(mem + offset);
+		test2 = ioread16(mem);
+
+		iowrite16(orig1, mem + offset);
+		iowrite16(orig2, mem + offset + 0x100);
+
+		if (test1 != 0xaa55) {
+			break;
+		}
+
+		if (test2) {
+			break;
+		}
+	}
+
+	iowrite16(orig, mem);
+	return offset - 65536;
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int mga_vram_init(struct mga_device *mdev)
+{
+	void __iomem *mem;
+	struct apertures_struct *aper = alloc_apertures(1);
+
+	/* BAR 0 is VRAM */
+	mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
+	mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+
+	aper->ranges[0].base = mdev->mc.vram_base;
+	aper->ranges[0].size = mdev->mc.vram_window;
+	aper->count = 1;
+
+	remove_conflicting_framebuffers(aper, "mgafb", true);
+
+	if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+				"mgadrmfb_vram")) {
+		DRM_ERROR("can't reserve VRAM\n");
+		return -ENXIO;
+	}
+
+	mem = pci_iomap(mdev->dev->pdev, 0, 0);
+
+	mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+
+	pci_iounmap(mdev->dev->pdev, mem);
+
+	return 0;
+}
+
+static int mgag200_device_init(struct drm_device *dev,
+			       uint32_t flags)
+{
+	struct mga_device *mdev = dev->dev_private;
+	int ret, option;
+
+	mdev->type = flags;
+
+	/* Hardcode the number of CRTCs to 1 */
+	mdev->num_crtc = 1;
+
+	pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+	mdev->has_sdram = !(option & (1 << 14));
+
+	/* BAR 0 is the framebuffer, BAR 1 contains registers */
+	mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
+	mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+
+	if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+				"mgadrmfb_mmio")) {
+		DRM_ERROR("can't reserve mmio registers\n");
+		return -ENOMEM;
+	}
+
+	mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+	if (mdev->rmmio == NULL)
+		return -ENOMEM;
+
+	/* stash G200 SE model number for later use */
+	if (IS_G200_SE(mdev))
+		mdev->reg_1e24 = RREG32(0x1e24);
+
+	ret = mga_vram_init(mdev);
+	if (ret) {
+		release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+		return ret;
+	}
+
+	mdev->bpp_shifts[0] = 0;
+	mdev->bpp_shifts[1] = 1;
+	mdev->bpp_shifts[2] = 0;
+	mdev->bpp_shifts[3] = 2;
+	return 0;
+}
+
+void mgag200_device_fini(struct mga_device *mdev)
+{
+	release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+	mga_vram_fini(mdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct mga_device *mdev;
+	int r;
+
+	mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+	if (mdev == NULL)
+		return -ENOMEM;
+	dev->dev_private = (void *)mdev;
+	mdev->dev = dev;
+
+	r = mgag200_device_init(dev, flags);
+	if (r) {
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+		goto out;
+	}
+	r = mgag200_mm_init(mdev);
+	if (r)
+		goto out;
+
+	drm_mode_config_init(dev);
+	dev->mode_config.funcs = (void *)&mga_mode_funcs;
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.prefer_shadow = 1;
+
+	r = mgag200_modeset_init(mdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+out:
+	if (r)
+		mgag200_driver_unload(dev);
+	return r;
+}
+
+int mgag200_driver_unload(struct drm_device *dev)
+{
+	struct mga_device *mdev = dev->dev_private;
+
+	if (mdev == NULL)
+		return 0;
+	mgag200_modeset_fini(mdev);
+	mgag200_fbdev_fini(mdev);
+	drm_mode_config_cleanup(dev);
+	mgag200_mm_fini(mdev);
+	mgag200_device_fini(mdev);
+	kfree(mdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+int mgag200_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj)
+{
+	struct mgag200_bo *astbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+	*obj = &astbo->gem;
+	return 0;
+}
+
+int mgag200_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	u32 handle;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = mgag200_gem_create(dev, args->size, false,
+			     &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+int mgag200_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int mgag200_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+	return 0;
+}
+
+void mgag200_bo_unref(struct mgag200_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+
+}
+
+void mgag200_gem_free_object(struct drm_gem_object *obj)
+{
+	struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
+
+	if (!mgag200_bo)
+		return;
+	mgag200_bo_unref(&mgag200_bo);
+}
+
+
+static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
+{
+	return bo->bo.addr_space_offset;
+}
+
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle,
+		     uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct mgag200_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_mga_bo(obj);
+	*offset = mgag200_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+}

+ 1533 - 0
drivers/gpu/drm/mgag200/mgag200_mode.c

@@ -0,0 +1,1533 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *	    Matt Turner
+ *	    Dave Airlie
+ */
+
+#include <linux/delay.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "mgag200_drv.h"
+
+#define MGAG200_LUT_SIZE 256
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void mga_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	int i;
+
+	if (!crtc->enabled)
+		return;
+
+	WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+
+	for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+		/* VGA registers */
+		WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
+		WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+		WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+	}
+}
+
+static inline void mga_wait_vsync(struct mga_device *mdev)
+{
+	unsigned int count = 0;
+	unsigned int status = 0;
+
+	do {
+		status = RREG32(MGAREG_Status);
+		count++;
+	} while ((status & 0x08) && (count < 250000));
+	count = 0;
+	status = 0;
+	do {
+		status = RREG32(MGAREG_Status);
+		count++;
+	} while (!(status & 0x08) && (count < 250000));
+}
+
+static inline void mga_wait_busy(struct mga_device *mdev)
+{
+	unsigned int count = 0;
+	unsigned int status = 0;
+	do {
+		status = RREG8(MGAREG_Status + 2);
+		count++;
+	} while ((status & 0x01) && (count < 500000));
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+
+	m = n = p = 0;
+	vcomax = 320000;
+	vcomin = 160000;
+	pllreffreq = 25000;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 8; testp > 0; testp /= 2) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testn = 17; testn < 256; testn++) {
+			for (testm = 1; testm < 32; testm++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					m = testm - 1;
+					n = testn - 1;
+					p = testp - 1;
+				}
+			}
+		}
+	}
+
+	if (delta > permitteddelta) {
+		printk(KERN_WARNING "PLL delta too large\n");
+		return 1;
+	}
+
+	WREG_DAC(MGA1064_PIX_PLLC_M, m);
+	WREG_DAC(MGA1064_PIX_PLLC_N, n);
+	WREG_DAC(MGA1064_PIX_PLLC_P, p);
+	return 0;
+}
+
+static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+	int i, j, tmpcount, vcount;
+	bool pll_locked = false;
+	u8 tmp;
+
+	m = n = p = 0;
+	vcomax = 550000;
+	vcomin = 150000;
+	pllreffreq = 48000;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 1; testp < 9; testp++) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testm = 1; testm < 17; testm++) {
+			for (testn = 1; testn < 151; testn++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					n = testn - 1;
+					m = (testm - 1) | ((n >> 1) & 0x80);
+					p = testp - 1;
+				}
+			}
+		}
+	}
+
+	for (i = 0; i <= 32 && pll_locked == false; i++) {
+		if (i > 0) {
+			WREG8(MGAREG_CRTC_INDEX, 0x1e);
+			tmp = RREG8(MGAREG_CRTC_DATA);
+			if (tmp < 0xff)
+				WREG8(MGAREG_CRTC_DATA, tmp+1);
+		}
+
+		/* set pixclkdis to 1 */
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+		WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_REMHEADCTL_CLKDIS;
+		WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+		/* select PLL Set C */
+		tmp = RREG8(MGAREG_MEM_MISC_READ);
+		tmp |= 0x3 << 2;
+		WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+		WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+		udelay(500);
+
+		/* reset the PLL */
+		WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~0x04;
+		WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+		udelay(50);
+
+		/* program pixel pll register */
+		WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
+		WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
+		WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
+
+		udelay(50);
+
+		/* turn pll on */
+		WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= 0x04;
+		WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+		udelay(500);
+
+		/* select the pixel pll */
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+		tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+		WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+		tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+		WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+		/* reset dotclock rate bit */
+		WREG8(MGAREG_SEQ_INDEX, 1);
+		tmp = RREG8(MGAREG_SEQ_DATA);
+		tmp &= ~0x8;
+		WREG8(MGAREG_SEQ_DATA, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+		WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+		vcount = RREG8(MGAREG_VCOUNT);
+
+		for (j = 0; j < 30 && pll_locked == false; j++) {
+			tmpcount = RREG8(MGAREG_VCOUNT);
+			if (tmpcount < vcount)
+				vcount = 0;
+			if ((tmpcount - vcount) > 2)
+				pll_locked = true;
+			else
+				udelay(5);
+		}
+	}
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+	WREG_DAC(MGA1064_REMHEADCTL, tmp);
+	return 0;
+}
+
+static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+	u8 tmp;
+
+	m = n = p = 0;
+	vcomax = 550000;
+	vcomin = 150000;
+	pllreffreq = 50000;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 16; testp > 0; testp--) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testn = 1; testn < 257; testn++) {
+			for (testm = 1; testm < 17; testm++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					n = testn - 1;
+					m = testm - 1;
+					p = testp - 1;
+				}
+			}
+		}
+	}
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+	WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+	tmp = RREG8(MGAREG_MEM_MISC_READ);
+	tmp |= 0x3 << 2;
+	WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+	tmp = RREG8(DAC_DATA);
+	WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+	WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+	WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+	WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+	WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
+
+	udelay(50);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+	WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+	udelay(500);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+	tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+	WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+	tmp = RREG8(DAC_DATA);
+	WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+
+	tmp = RREG8(MGAREG_MEM_MISC_READ);
+	tmp |= (0x3 << 2);
+	WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+	WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+	return 0;
+}
+
+static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+	int i, j, tmpcount, vcount;
+	u8 tmp;
+	bool pll_locked = false;
+
+	m = n = p = 0;
+	vcomax = 800000;
+	vcomin = 400000;
+	pllreffreq = 3333;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 16; testp > 0; testp--) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testm = 1; testm < 33; testm++) {
+			for (testn = 1; testn < 257; testn++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					n = testn - 1;
+					m = (testm - 1) | ((n >> 1) & 0x80);
+					p = testp - 1;
+				}
+				if ((clock * testp) >= 600000)
+					p |= 80;
+			}
+		}
+	}
+	for (i = 0; i <= 32 && pll_locked == false; i++) {
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+		WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+		tmp = RREG8(MGAREG_MEM_MISC_READ);
+		tmp |= 0x3 << 2;
+		WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+		WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+		udelay(500);
+
+		WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
+		WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
+		WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
+
+		udelay(500);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+		tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+		WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+		tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+		WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+		vcount = RREG8(MGAREG_VCOUNT);
+
+		for (j = 0; j < 30 && pll_locked == false; j++) {
+			tmpcount = RREG8(MGAREG_VCOUNT);
+			if (tmpcount < vcount)
+				vcount = 0;
+			if ((tmpcount - vcount) > 2)
+				pll_locked = true;
+			else
+				udelay(5);
+		}
+	}
+
+	return 0;
+}
+
+static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta;
+	unsigned int testr, testn, testm, testo;
+	unsigned int p, m, n;
+	unsigned int computed;
+	int tmp;
+
+	m = n = p = 0;
+	vcomax = 1488000;
+	vcomin = 1056000;
+	pllreffreq = 48000;
+
+	delta = 0xffffffff;
+
+	for (testr = 0; testr < 4; testr++) {
+		if (delta == 0)
+			break;
+		for (testn = 5; testn < 129; testn++) {
+			if (delta == 0)
+				break;
+			for (testm = 3; testm >= 0; testm--) {
+				if (delta == 0)
+					break;
+				for (testo = 5; testo < 33; testo++) {
+					computed = pllreffreq * (testn + 1) /
+						(testr + 1);
+					if (computed < vcomin)
+						continue;
+					if (computed > vcomax)
+						continue;
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm | (testo << 3);
+						n = testn;
+						p = testr | (testr << 3);
+					}
+				}
+			}
+		}
+	}
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+	WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_REMHEADCTL_CLKDIS;
+	WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+	tmp = RREG8(MGAREG_MEM_MISC_READ);
+	tmp |= (0x3<<2) | 0xc0;
+	WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+	WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+	udelay(500);
+
+	WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
+	WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
+	WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
+
+	udelay(50);
+
+	return 0;
+}
+
+static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
+{
+	switch(mdev->type) {
+	case G200_SE_A:
+	case G200_SE_B:
+		return mga_g200se_set_plls(mdev, clock);
+		break;
+	case G200_WB:
+		return mga_g200wb_set_plls(mdev, clock);
+		break;
+	case G200_EV:
+		return mga_g200ev_set_plls(mdev, clock);
+		break;
+	case G200_EH:
+		return mga_g200eh_set_plls(mdev, clock);
+		break;
+	case G200_ER:
+		return mga_g200er_set_plls(mdev, clock);
+		break;
+	}
+	return 0;
+}
+
+static void mga_g200wb_prepare(struct drm_crtc *crtc)
+{
+	struct mga_device *mdev = crtc->dev->dev_private;
+	u8 tmp;
+	int iter_max;
+
+	/* 1- The first step is to warn the BMC of an upcoming mode change.
+	 * We are putting the misc<0> to output.*/
+
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x10;
+	WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+	/* we are putting a 1 on the misc<0> line */
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x10;
+	WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+	/* 2- Second step to mask and further scan request
+	 * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
+	 */
+	WREG8(DAC_INDEX, MGA1064_SPAREREG);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x80;
+	WREG_DAC(MGA1064_SPAREREG, tmp);
+
+	/* 3a- the third step is to verifu if there is an active scan
+	 * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
+	 */
+	iter_max = 300;
+	while (!(tmp & 0x1) && iter_max) {
+		WREG8(DAC_INDEX, MGA1064_SPAREREG);
+		tmp = RREG8(DAC_DATA);
+		udelay(1000);
+		iter_max--;
+	}
+
+	/* 3b- this step occurs only if the remove is actually scanning
+	 * we are waiting for the end of the frame which is a 1 on
+	 * remvsyncsts (XSPAREREG<1>)
+	 */
+	if (iter_max) {
+		iter_max = 300;
+		while ((tmp & 0x2) && iter_max) {
+			WREG8(DAC_INDEX, MGA1064_SPAREREG);
+			tmp = RREG8(DAC_DATA);
+			udelay(1000);
+			iter_max--;
+		}
+	}
+}
+
+static void mga_g200wb_commit(struct drm_crtc *crtc)
+{
+	u8 tmp;
+	struct mga_device *mdev = crtc->dev->dev_private;
+
+	/* 1- The first step is to ensure that the vrsten and hrsten are set */
+	WREG8(MGAREG_CRTCEXT_INDEX, 1);
+	tmp = RREG8(MGAREG_CRTCEXT_DATA);
+	WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+	/* 2- second step is to assert the rstlvl2 */
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x8;
+	WREG8(DAC_DATA, tmp);
+
+	/* wait 10 us */
+	udelay(10);
+
+	/* 3- deassert rstlvl2 */
+	tmp &= ~0x08;
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+	WREG8(DAC_DATA, tmp);
+
+	/* 4- remove mask of scan request */
+	WREG8(DAC_INDEX, MGA1064_SPAREREG);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~0x80;
+	WREG8(DAC_DATA, tmp);
+
+	/* 5- put back a 0 on the misc<0> line */
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~0x10;
+	WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
+
+
+void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+	struct mga_device *mdev = crtc->dev->dev_private;
+	u32 addr;
+	int count;
+
+	while (RREG8(0x1fda) & 0x08);
+	while (!(RREG8(0x1fda) & 0x08));
+
+	count = RREG8(MGAREG_VCOUNT) + 2;
+	while (RREG8(MGAREG_VCOUNT) < count);
+
+	addr = offset >> 2;
+	WREG_CRT(0x0d, (u8)(addr & 0xff));
+	WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+	WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+}
+
+
+/* ast is different - we will force move buffers out of VRAM */
+static int mga_crtc_do_set_base(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, int atomic)
+{
+	struct mga_device *mdev = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct mga_framebuffer *mga_fb;
+	struct mgag200_bo *bo;
+	int ret;
+	u64 gpu_addr;
+
+	/* push the previous fb to system ram */
+	if (!atomic && fb) {
+		mga_fb = to_mga_framebuffer(fb);
+		obj = mga_fb->obj;
+		bo = gem_to_mga_bo(obj);
+		ret = mgag200_bo_reserve(bo, false);
+		if (ret)
+			return ret;
+		mgag200_bo_push_sysram(bo);
+		mgag200_bo_unreserve(bo);
+	}
+
+	mga_fb = to_mga_framebuffer(crtc->fb);
+	obj = mga_fb->obj;
+	bo = gem_to_mga_bo(obj);
+
+	ret = mgag200_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		mgag200_bo_unreserve(bo);
+		return ret;
+	}
+
+	if (&mdev->mfbdev->mfb == mga_fb) {
+		/* if pushing console in kmap it */
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret)
+			DRM_ERROR("failed to kmap fbcon\n");
+
+	}
+	mgag200_bo_unreserve(bo);
+
+	DRM_INFO("mga base %llx\n", gpu_addr);
+
+	mga_set_start_address(crtc, (u32)gpu_addr);
+
+	return 0;
+}
+
+static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+				  struct drm_framebuffer *old_fb)
+{
+	return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int mga_crtc_mode_set(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode,
+				int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	int hdisplay, hsyncstart, hsyncend, htotal;
+	int vdisplay, vsyncstart, vsyncend, vtotal;
+	int pitch;
+	int option = 0, option2 = 0;
+	int i;
+	unsigned char misc = 0;
+	unsigned char ext_vga[6];
+	unsigned char ext_vga_index24;
+	unsigned char dac_index90 = 0;
+	u8 bppshift;
+
+	static unsigned char dacvalue[] = {
+		/* 0x00: */        0,    0,    0,    0,    0,    0, 0x00,    0,
+		/* 0x08: */        0,    0,    0,    0,    0,    0,    0,    0,
+		/* 0x10: */        0,    0,    0,    0,    0,    0,    0,    0,
+		/* 0x18: */     0x00,    0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
+		/* 0x20: */     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		/* 0x28: */     0x00, 0x00, 0x00, 0x00,    0,    0,    0, 0x40,
+		/* 0x30: */     0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
+		/* 0x38: */     0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
+		/* 0x40: */        0,    0,    0,    0,    0,    0,    0,    0,
+		/* 0x48: */        0,    0,    0,    0,    0,    0,    0,    0
+	};
+
+	bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
+
+	switch (mdev->type) {
+	case G200_SE_A:
+	case G200_SE_B:
+		dacvalue[MGA1064_VREF_CTL] = 0x03;
+		dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+		dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
+					     MGA1064_MISC_CTL_VGA8 |
+					     MGA1064_MISC_CTL_DAC_RAM_CS;
+		if (mdev->has_sdram)
+			option = 0x40049120;
+		else
+			option = 0x4004d120;
+		option2 = 0x00008000;
+		break;
+	case G200_WB:
+		dacvalue[MGA1064_VREF_CTL] = 0x07;
+		option = 0x41049120;
+		option2 = 0x0000b000;
+		break;
+	case G200_EV:
+		dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+		dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+					     MGA1064_MISC_CTL_DAC_RAM_CS;
+		option = 0x00000120;
+		option2 = 0x0000b000;
+		break;
+	case G200_EH:
+		dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+					     MGA1064_MISC_CTL_DAC_RAM_CS;
+		option = 0x00000120;
+		option2 = 0x0000b000;
+		break;
+	case G200_ER:
+		dac_index90 = 0;
+		break;
+	}
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
+		else
+			dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
+		break;
+	case 24:
+		dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
+		break;
+	case 32:
+		dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
+		break;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= 0x40;
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= 0x80;
+
+
+	for (i = 0; i < sizeof(dacvalue); i++) {
+		if ((i <= 0x03) ||
+		    (i == 0x07) ||
+		    (i == 0x0b) ||
+		    (i == 0x0f) ||
+		    ((i >= 0x13) && (i <= 0x17)) ||
+		    (i == 0x1b) ||
+		    (i == 0x1c) ||
+		    ((i >= 0x1f) && (i <= 0x29)) ||
+		    ((i >= 0x30) && (i <= 0x37)))
+			continue;
+		if (IS_G200_SE(mdev) &&
+		    ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
+			continue;
+		if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+		    (i >= 0x44) && (i <= 0x4e))
+			continue;
+
+		WREG_DAC(i, dacvalue[i]);
+	}
+
+	if (mdev->type == G200_ER) {
+		WREG_DAC(0x90, dac_index90);
+	}
+
+
+	if (option)
+		pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+	if (option2)
+		pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+	WREG_SEQ(2, 0xf);
+	WREG_SEQ(3, 0);
+	WREG_SEQ(4, 0xe);
+
+	pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
+	if (crtc->fb->bits_per_pixel == 24)
+		pitch = pitch >> (4 - bppshift);
+	else
+		pitch = pitch >> (4 - bppshift);
+
+	hdisplay = mode->hdisplay / 8 - 1;
+	hsyncstart = mode->hsync_start / 8 - 1;
+	hsyncend = mode->hsync_end / 8 - 1;
+	htotal = mode->htotal / 8 - 1;
+
+	/* Work around hardware quirk */
+	if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
+		htotal++;
+
+	vdisplay = mode->vdisplay - 1;
+	vsyncstart = mode->vsync_start - 1;
+	vsyncend = mode->vsync_end - 1;
+	vtotal = mode->vtotal - 2;
+
+	WREG_GFX(0, 0);
+	WREG_GFX(1, 0);
+	WREG_GFX(2, 0);
+	WREG_GFX(3, 0);
+	WREG_GFX(4, 0);
+	WREG_GFX(5, 0x40);
+	WREG_GFX(6, 0x5);
+	WREG_GFX(7, 0xf);
+	WREG_GFX(8, 0xf);
+
+	WREG_CRT(0, htotal - 4);
+	WREG_CRT(1, hdisplay);
+	WREG_CRT(2, hdisplay);
+	WREG_CRT(3, (htotal & 0x1F) | 0x80);
+	WREG_CRT(4, hsyncstart);
+	WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
+	WREG_CRT(6, vtotal & 0xFF);
+	WREG_CRT(7, ((vtotal & 0x100) >> 8) |
+		 ((vdisplay & 0x100) >> 7) |
+		 ((vsyncstart & 0x100) >> 6) |
+		 ((vdisplay & 0x100) >> 5) |
+		 ((vdisplay & 0x100) >> 4) | /* linecomp */
+		 ((vtotal & 0x200) >> 4)|
+		 ((vdisplay & 0x200) >> 3) |
+		 ((vsyncstart & 0x200) >> 2));
+	WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
+		 ((vdisplay & 0x200) >> 3));
+	WREG_CRT(10, 0);
+	WREG_CRT(11, 0);
+	WREG_CRT(12, 0);
+	WREG_CRT(13, 0);
+	WREG_CRT(14, 0);
+	WREG_CRT(15, 0);
+	WREG_CRT(16, vsyncstart & 0xFF);
+	WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
+	WREG_CRT(18, vdisplay & 0xFF);
+	WREG_CRT(19, pitch & 0xFF);
+	WREG_CRT(20, 0);
+	WREG_CRT(21, vdisplay & 0xFF);
+	WREG_CRT(22, (vtotal + 1) & 0xFF);
+	WREG_CRT(23, 0xc3);
+	WREG_CRT(24, vdisplay & 0xFF);
+
+	ext_vga[0] = 0;
+	ext_vga[5] = 0;
+
+	/* TODO interlace */
+
+	ext_vga[0] |= (pitch & 0x300) >> 4;
+	ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
+		((hdisplay & 0x100) >> 7) |
+		((hsyncstart & 0x100) >> 6) |
+		(htotal & 0x40);
+	ext_vga[2] = ((vtotal & 0xc00) >> 10) |
+		((vdisplay & 0x400) >> 8) |
+		((vdisplay & 0xc00) >> 7) |
+		((vsyncstart & 0xc00) >> 5) |
+		((vdisplay & 0x400) >> 3);
+	if (crtc->fb->bits_per_pixel == 24)
+		ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
+	else
+		ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
+	ext_vga[4] = 0;
+	if (mdev->type == G200_WB)
+		ext_vga[1] |= 0x88;
+
+	ext_vga_index24 = 0x05;
+
+	/* Set pixel clocks */
+	misc = 0x2d;
+	WREG8(MGA_MISC_OUT, misc);
+
+	mga_crtc_set_plls(mdev, mode->clock);
+
+	for (i = 0; i < 6; i++) {
+		WREG_ECRT(i, ext_vga[i]);
+	}
+
+	if (mdev->type == G200_ER)
+		WREG_ECRT(24, ext_vga_index24);
+
+	if (mdev->type == G200_EV) {
+		WREG_ECRT(6, 0);
+	}
+
+	WREG_ECRT(0, ext_vga[0]);
+	/* Enable mga pixel clock */
+	misc = 0x2d;
+
+	WREG8(MGA_MISC_OUT, misc);
+
+	if (adjusted_mode)
+		memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+
+	mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+	/* reset tagfifo */
+	if (mdev->type == G200_ER) {
+		u32 mem_ctl = RREG32(MGAREG_MEMCTL);
+		u8 seq1;
+
+		/* screen off */
+		WREG8(MGAREG_SEQ_INDEX, 0x01);
+		seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
+		WREG8(MGAREG_SEQ_DATA, seq1);
+
+		WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
+		udelay(1000);
+		WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+
+		WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+	}
+
+
+	if (IS_G200_SE(mdev)) {
+		if (mdev->reg_1e24 >= 0x02) {
+			u8 hi_pri_lvl;
+			u32 bpp;
+			u32 mb;
+
+			if (crtc->fb->bits_per_pixel > 16)
+				bpp = 32;
+			else if (crtc->fb->bits_per_pixel > 8)
+				bpp = 16;
+			else
+				bpp = 8;
+
+			mb = (mode->clock * bpp) / 1000;
+			if (mb > 3100)
+				hi_pri_lvl = 0;
+			else if (mb > 2600)
+				hi_pri_lvl = 1;
+			else if (mb > 1900)
+				hi_pri_lvl = 2;
+			else if (mb > 1160)
+				hi_pri_lvl = 3;
+			else if (mb > 440)
+				hi_pri_lvl = 4;
+			else
+				hi_pri_lvl = 5;
+
+			WREG8(0x1fde, 0x06);
+			WREG8(0x1fdf, hi_pri_lvl);
+		} else {
+			if (mdev->reg_1e24 >= 0x01)
+				WREG8(0x1fdf, 0x03);
+			else
+				WREG8(0x1fdf, 0x04);
+		}
+	}
+	return 0;
+}
+
+#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
+static int mga_suspend(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	int option;
+
+	if (mdev->suspended)
+		return 0;
+
+	WREG_SEQ(1, 0x20);
+	WREG_ECRT(1, 0x30);
+	/* Disable the pixel clock */
+	WREG_DAC(0x1a, 0x05);
+	/* Power down the DAC */
+	WREG_DAC(0x1e, 0x18);
+	/* Power down the pixel PLL */
+	WREG_DAC(0x1a, 0x0d);
+
+	/* Disable PLLs and clocks */
+	pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+	option &= ~(0x1F8024);
+	pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+	pci_set_power_state(pdev, PCI_D3hot);
+	pci_disable_device(pdev);
+
+	mdev->suspended = true;
+
+	return 0;
+}
+
+static int mga_resume(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	int option;
+
+	if (!mdev->suspended)
+		return 0;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_device(pdev);
+
+	/* Disable sysclk */
+	pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+	option &= ~(0x4);
+	pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+
+	mdev->suspended = false;
+
+	return 0;
+}
+
+#endif
+
+static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	u8 seq1 = 0, crtcext1 = 0;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		seq1 = 0;
+		crtcext1 = 0;
+		mga_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		seq1 = 0x20;
+		crtcext1 = 0x10;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		seq1 = 0x20;
+		crtcext1 = 0x20;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		seq1 = 0x20;
+		crtcext1 = 0x30;
+		break;
+	}
+
+#if 0
+	if (mode == DRM_MODE_DPMS_OFF) {
+		mga_suspend(crtc);
+	}
+#endif
+	WREG8(MGAREG_SEQ_INDEX, 0x01);
+	seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
+	mga_wait_vsync(mdev);
+	mga_wait_busy(mdev);
+	WREG8(MGAREG_SEQ_DATA, seq1);
+	msleep(20);
+	WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
+	crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
+	WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
+
+#if 0
+	if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
+		mga_resume(crtc);
+		drm_helper_resume_force_mode(dev);
+	}
+#endif
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void mga_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	u8 tmp;
+
+	/*	mga_resume(crtc);*/
+
+	WREG8(MGAREG_CRTC_INDEX, 0x11);
+	tmp = RREG8(MGAREG_CRTC_DATA);
+	WREG_CRT(0x11, tmp | 0x80);
+
+	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+		WREG_SEQ(0, 1);
+		msleep(50);
+		WREG_SEQ(1, 0x20);
+		msleep(20);
+	} else {
+		WREG8(MGAREG_SEQ_INDEX, 0x1);
+		tmp = RREG8(MGAREG_SEQ_DATA);
+
+		/* start sync reset */
+		WREG_SEQ(0, 1);
+		WREG_SEQ(1, tmp | 0x20);
+	}
+
+	if (mdev->type == G200_WB)
+		mga_g200wb_prepare(crtc);
+
+	WREG_CRT(17, 0);
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void mga_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	u8 tmp;
+
+	if (mdev->type == G200_WB)
+		mga_g200wb_commit(crtc);
+
+	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+		msleep(50);
+		WREG_SEQ(1, 0x0);
+		msleep(20);
+		WREG_SEQ(0, 0x3);
+	} else {
+		WREG8(MGAREG_SEQ_INDEX, 0x1);
+		tmp = RREG8(MGAREG_SEQ_DATA);
+
+		tmp &= ~0x20;
+		WREG_SEQ(0x1, tmp);
+		WREG_SEQ(0, 3);
+	}
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				  u16 *blue, uint32_t start, uint32_t size)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
+	int i;
+
+	for (i = start; i < end; i++) {
+		mga_crtc->lut_r[i] = red[i] >> 8;
+		mga_crtc->lut_g[i] = green[i] >> 8;
+		mga_crtc->lut_b[i] = blue[i] >> 8;
+	}
+	mga_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void mga_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(mga_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs mga_crtc_funcs = {
+	.gamma_set = mga_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = mga_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+	.dpms = mga_crtc_dpms,
+	.mode_fixup = mga_crtc_mode_fixup,
+	.mode_set = mga_crtc_mode_set,
+	.mode_set_base = mga_crtc_mode_set_base,
+	.prepare = mga_crtc_prepare,
+	.commit = mga_crtc_commit,
+	.load_lut = mga_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void mga_crtc_init(struct drm_device *dev)
+{
+	struct mga_device *mdev = dev->dev_private;
+	struct mga_crtc *mga_crtc;
+	int i;
+
+	mga_crtc = kzalloc(sizeof(struct mga_crtc) +
+			      (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
+			      GFP_KERNEL);
+
+	if (mga_crtc == NULL)
+		return;
+
+	drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
+	mdev->mode_info.crtc = mga_crtc;
+
+	for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+		mga_crtc->lut_r[i] = i;
+		mga_crtc->lut_g[i] = i;
+		mga_crtc->lut_b[i] = i;
+	}
+
+	drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			      u16 blue, int regno)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+	mga_crtc->lut_r[regno] = red >> 8;
+	mga_crtc->lut_g[regno] = green >> 8;
+	mga_crtc->lut_b[regno] = blue >> 8;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			      u16 *blue, int regno)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+	*red = (u16)mga_crtc->lut_r[regno] << 8;
+	*green = (u16)mga_crtc->lut_g[regno] << 8;
+	*blue = (u16)mga_crtc->lut_b[regno] << 8;
+}
+
+/*
+ * The encoder comes after the CRTC in the output pipeline, but before
+ * the connector. It's responsible for ensuring that the digital
+ * stream is appropriately converted into the output format. Setup is
+ * very simple in this case - all we have to do is inform qemu of the
+ * colour depth in order to ensure that it displays appropriately
+ */
+
+/*
+ * These functions are analagous to those in the CRTC code, but are intended
+ * to handle any encoder-specific limitations
+ */
+static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void mga_encoder_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+	return;
+}
+
+static void mga_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void mga_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(mga_encoder);
+}
+
+static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
+	.dpms = mga_encoder_dpms,
+	.mode_fixup = mga_encoder_mode_fixup,
+	.mode_set = mga_encoder_mode_set,
+	.prepare = mga_encoder_prepare,
+	.commit = mga_encoder_commit,
+};
+
+static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
+	.destroy = mga_encoder_destroy,
+};
+
+static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	struct mga_encoder *mga_encoder;
+
+	mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
+	if (!mga_encoder)
+		return NULL;
+
+	encoder = &mga_encoder->base;
+	encoder->possible_crtcs = 0x1;
+
+	drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
+
+	return encoder;
+}
+
+
+static int mga_vga_get_modes(struct drm_connector *connector)
+{
+	struct mga_connector *mga_connector = to_mga_connector(connector);
+	struct edid *edid;
+	int ret = 0;
+
+	edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		connector->display_info.raw_edid = NULL;
+		kfree(edid);
+	}
+	return ret;
+}
+
+static int mga_vga_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	/* FIXME: Add bandwidth and g200se limitations */
+
+	if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+	    mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+	    mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+	    mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+		return MODE_BAD;
+	}
+
+	return MODE_OK;
+}
+
+struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+						  *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj =
+		    drm_mode_object_find(connector->dev, enc_id,
+					 DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+static enum drm_connector_status mga_vga_detect(struct drm_connector
+						   *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static void mga_connector_destroy(struct drm_connector *connector)
+{
+	struct mga_connector *mga_connector = to_mga_connector(connector);
+	mgag200_i2c_destroy(mga_connector->i2c);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+	.get_modes = mga_vga_get_modes,
+	.mode_valid = mga_vga_mode_valid,
+	.best_encoder = mga_connector_best_encoder,
+};
+
+struct drm_connector_funcs mga_vga_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = mga_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = mga_connector_destroy,
+};
+
+static struct drm_connector *mga_vga_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct mga_connector *mga_connector;
+
+	mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
+	if (!mga_connector)
+		return NULL;
+
+	connector = &mga_connector->base;
+
+	drm_connector_init(dev, connector,
+			   &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
+	mga_connector->i2c = mgag200_i2c_create(dev);
+	if (!mga_connector->i2c)
+		DRM_ERROR("failed to add ddc bus\n");
+
+	return connector;
+}
+
+
+int mgag200_modeset_init(struct mga_device *mdev)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int ret;
+
+	mdev->mode_info.mode_config_initialized = true;
+
+	mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+	mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+
+	mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+
+	mga_crtc_init(mdev->dev);
+
+	encoder = mga_encoder_init(mdev->dev);
+	if (!encoder) {
+		DRM_ERROR("mga_encoder_init failed\n");
+		return -1;
+	}
+
+	connector = mga_vga_init(mdev->dev);
+	if (!connector) {
+		DRM_ERROR("mga_vga_init failed\n");
+		return -1;
+	}
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	ret = mgag200_fbdev_init(mdev);
+	if (ret) {
+		DRM_ERROR("mga_fbdev_init failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void mgag200_modeset_fini(struct mga_device *mdev)
+{
+
+}

+ 661 - 0
drivers/gpu/drm/mgag200/mgag200_reg.h

@@ -0,0 +1,661 @@
+/*
+ * MGA Millennium (MGA2064W) functions
+ * MGA Mystique (MGA1064SG) functions
+ *
+ * Copyright 1996 The XFree86 Project, Inc.
+ *
+ * Authors
+ *		Dirk Hohndel
+ *			hohndel@XFree86.Org
+ *		David Dawes
+ *			dawes@XFree86.Org
+ * Contributors:
+ *		Guy DESBIEF, Aix-en-provence, France
+ *			g.desbief@aix.pacwan.net
+ *		MGA1064SG Mystique register file
+ */
+
+
+#ifndef _MGA_REG_H_
+#define _MGA_REG_H_
+
+#define	MGAREG_DWGCTL		0x1c00
+#define	MGAREG_MACCESS		0x1c04
+/* the following is a mystique only register */
+#define MGAREG_MCTLWTST		0x1c08
+#define	MGAREG_ZORG		0x1c0c
+
+#define	MGAREG_PAT0		0x1c10
+#define	MGAREG_PAT1		0x1c14
+#define	MGAREG_PLNWT		0x1c1c
+
+#define	MGAREG_BCOL		0x1c20
+#define	MGAREG_FCOL		0x1c24
+
+#define	MGAREG_SRC0		0x1c30
+#define	MGAREG_SRC1		0x1c34
+#define	MGAREG_SRC2		0x1c38
+#define	MGAREG_SRC3		0x1c3c
+
+#define	MGAREG_XYSTRT		0x1c40
+#define	MGAREG_XYEND		0x1c44
+
+#define	MGAREG_SHIFT		0x1c50
+/* the following is a mystique only register */
+#define MGAREG_DMAPAD		0x1c54
+#define	MGAREG_SGN		0x1c58
+#define	MGAREG_LEN		0x1c5c
+
+#define	MGAREG_AR0		0x1c60
+#define	MGAREG_AR1		0x1c64
+#define	MGAREG_AR2		0x1c68
+#define	MGAREG_AR3		0x1c6c
+#define	MGAREG_AR4		0x1c70
+#define	MGAREG_AR5		0x1c74
+#define	MGAREG_AR6		0x1c78
+
+#define	MGAREG_CXBNDRY		0x1c80
+#define	MGAREG_FXBNDRY		0x1c84
+#define	MGAREG_YDSTLEN		0x1c88
+#define	MGAREG_PITCH		0x1c8c
+
+#define	MGAREG_YDST		0x1c90
+#define	MGAREG_YDSTORG		0x1c94
+#define	MGAREG_YTOP		0x1c98
+#define	MGAREG_YBOT		0x1c9c
+
+#define	MGAREG_CXLEFT		0x1ca0
+#define	MGAREG_CXRIGHT		0x1ca4
+#define	MGAREG_FXLEFT		0x1ca8
+#define	MGAREG_FXRIGHT		0x1cac
+
+#define	MGAREG_XDST		0x1cb0
+
+#define	MGAREG_DR0		0x1cc0
+#define	MGAREG_DR1		0x1cc4
+#define	MGAREG_DR2		0x1cc8
+#define	MGAREG_DR3		0x1ccc
+
+#define	MGAREG_DR4		0x1cd0
+#define	MGAREG_DR5		0x1cd4
+#define	MGAREG_DR6		0x1cd8
+#define	MGAREG_DR7		0x1cdc
+
+#define	MGAREG_DR8		0x1ce0
+#define	MGAREG_DR9		0x1ce4
+#define	MGAREG_DR10		0x1ce8
+#define	MGAREG_DR11		0x1cec
+
+#define	MGAREG_DR12		0x1cf0
+#define	MGAREG_DR13		0x1cf4
+#define	MGAREG_DR14		0x1cf8
+#define	MGAREG_DR15		0x1cfc
+
+#define MGAREG_SRCORG		0x2cb4
+#define MGAREG_DSTORG		0x2cb8
+
+/* add or or this to one of the previous "power registers" to start
+   the drawing engine */
+
+#define MGAREG_EXEC		0x0100
+
+#define	MGAREG_FIFOSTATUS	0x1e10
+#define	MGAREG_Status		0x1e14
+#define MGAREG_CACHEFLUSH       0x1fff
+#define	MGAREG_ICLEAR		0x1e18
+#define	MGAREG_IEN		0x1e1c
+
+#define	MGAREG_VCOUNT		0x1e20
+
+#define	MGAREG_Reset		0x1e40
+
+#define	MGAREG_OPMODE		0x1e54
+
+/* Warp Registers */
+#define MGAREG_WIADDR           0x1dc0
+#define MGAREG_WIADDR2          0x1dd8
+#define MGAREG_WGETMSB          0x1dc8
+#define MGAREG_WVRTXSZ          0x1dcc
+#define MGAREG_WACCEPTSEQ       0x1dd4
+#define MGAREG_WMISC            0x1e70
+
+#define MGAREG_MEMCTL           0x2e08
+
+/* OPMODE register additives */
+
+#define MGAOPM_DMA_GENERAL	(0x00 << 2)
+#define MGAOPM_DMA_BLIT		(0x01 << 2)
+#define MGAOPM_DMA_VECTOR	(0x10 << 2)
+
+/* MACCESS register additives */
+#define MGAMAC_PW8               0x00
+#define MGAMAC_PW16              0x01
+#define MGAMAC_PW24              0x03 /* not a typo */
+#define MGAMAC_PW32              0x02 /* not a typo */
+#define MGAMAC_BYPASS332         0x10000000
+#define MGAMAC_NODITHER          0x40000000
+#define MGAMAC_DIT555            0x80000000
+
+/* DWGCTL register additives */
+
+/* Lines */
+
+#define MGADWG_LINE_OPEN	0x00
+#define MGADWG_AUTOLINE_OPEN	0x01
+#define MGADWG_LINE_CLOSE	0x02
+#define MGADWG_AUTOLINE_CLOSE	0x03
+
+/* Trapezoids */
+#define MGADWG_TRAP		0x04
+#define MGADWG_TEXTURE_TRAP	0x06
+
+/* BitBlts */
+
+#define MGADWG_BITBLT		0x08
+#define MGADWG_FBITBLT		0x0c
+#define MGADWG_ILOAD		0x09
+#define MGADWG_ILOAD_SCALE	0x0d
+#define MGADWG_ILOAD_FILTER	0x0f
+#define MGADWG_ILOAD_HIQH	0x07
+#define MGADWG_ILOAD_HIQHV	0x0e
+#define MGADWG_IDUMP		0x0a
+
+/* atype access to WRAM */
+
+#define MGADWG_RPL		( 0x00 << 4 )
+#define MGADWG_RSTR		( 0x01 << 4 )
+#define MGADWG_ZI		( 0x03 << 4 )
+#define MGADWG_BLK 		( 0x04 << 4 )
+#define MGADWG_I		( 0x07 << 4 )
+
+/* specifies whether bit blits are linear or xy */
+#define MGADWG_LINEAR		( 0x01 << 7 )
+
+/* z drawing mode. use MGADWG_NOZCMP for always */
+
+#define MGADWG_NOZCMP		( 0x00 << 8 )
+#define MGADWG_ZE		( 0x02 << 8 )
+#define MGADWG_ZNE		( 0x03 << 8 )
+#define MGADWG_ZLT		( 0x04 << 8 )
+#define MGADWG_ZLTE		( 0x05 << 8 )
+#define MGADWG_GT		( 0x06 << 8 )
+#define MGADWG_GTE		( 0x07 << 8 )
+
+/* use this to force colour expansion circuitry to do its stuff */
+
+#define MGADWG_SOLID		( 0x01 << 11 )
+
+/* ar register at zero */
+
+#define MGADWG_ARZERO		( 0x01 << 12 )
+
+#define MGADWG_SGNZERO		( 0x01 << 13 )
+
+#define MGADWG_SHIFTZERO	( 0x01 << 14 )
+
+/* See table on 4-43 for bop ALU operations */
+
+/* See table on 4-44 for translucidity masks */
+
+#define MGADWG_BMONOLEF		( 0x00 << 25 )
+#define MGADWG_BMONOWF		( 0x04 << 25 )
+#define MGADWG_BPLAN		( 0x01 << 25 )
+
+/* note that if bfcol is specified and you're doing a bitblt, it causes
+   a fbitblt to be performed, so check that you obey the fbitblt rules */
+
+#define MGADWG_BFCOL   		( 0x02 << 25 )
+#define MGADWG_BUYUV		( 0x0e << 25 )
+#define MGADWG_BU32BGR		( 0x03 << 25 )
+#define MGADWG_BU32RGB		( 0x07 << 25 )
+#define MGADWG_BU24BGR		( 0x0b << 25 )
+#define MGADWG_BU24RGB		( 0x0f << 25 )
+
+#define MGADWG_PATTERN		( 0x01 << 29 )
+#define MGADWG_TRANSC		( 0x01 << 30 )
+#define MGAREG_MISC_WRITE	0x3c2
+#define MGAREG_MISC_READ	0x3cc
+#define MGAREG_MEM_MISC_WRITE       0x1fc2
+#define MGAREG_MEM_MISC_READ        0x1fcc
+
+#define MGAREG_MISC_IOADSEL	(0x1 << 0)
+#define MGAREG_MISC_RAMMAPEN	(0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_VGA25	(0x0 << 2)
+#define MGAREG_MISC_CLK_SEL_VGA28	(0x1 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_PIX	(0x2 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_MSK	(0x3 << 2)
+#define MGAREG_MISC_VIDEO_DIS	(0x1 << 4)
+#define MGAREG_MISC_HIGH_PG_SEL	(0x1 << 5)
+
+/* MMIO VGA registers */
+#define MGAREG_SEQ_INDEX	0x1fc4
+#define MGAREG_SEQ_DATA		0x1fc5
+#define MGAREG_CRTC_INDEX	0x1fd4
+#define MGAREG_CRTC_DATA	0x1fd5
+#define MGAREG_CRTCEXT_INDEX	0x1fde
+#define MGAREG_CRTCEXT_DATA	0x1fdf
+
+
+
+/* MGA bits for registers PCI_OPTION_REG */
+#define MGA1064_OPT_SYS_CLK_PCI   		( 0x00 << 0 )
+#define MGA1064_OPT_SYS_CLK_PLL   		( 0x01 << 0 )
+#define MGA1064_OPT_SYS_CLK_EXT   		( 0x02 << 0 )
+#define MGA1064_OPT_SYS_CLK_MSK   		( 0x03 << 0 )
+
+#define MGA1064_OPT_SYS_CLK_DIS   		( 0x01 << 2 )
+#define MGA1064_OPT_G_CLK_DIV_1   		( 0x01 << 3 )
+#define MGA1064_OPT_M_CLK_DIV_1   		( 0x01 << 4 )
+
+#define MGA1064_OPT_SYS_PLL_PDN   		( 0x01 << 5 )
+#define MGA1064_OPT_VGA_ION   		( 0x01 << 8 )
+
+/* MGA registers in PCI config space */
+#define PCI_MGA_INDEX		0x44
+#define PCI_MGA_DATA		0x48
+#define PCI_MGA_OPTION		0x40
+#define PCI_MGA_OPTION2		0x50
+#define PCI_MGA_OPTION3		0x54
+
+#define RAMDAC_OFFSET		0x3c00
+
+/* TVP3026 direct registers */
+
+#define TVP3026_INDEX		0x00
+#define TVP3026_WADR_PAL	0x00
+#define TVP3026_COL_PAL		0x01
+#define TVP3026_PIX_RD_MSK	0x02
+#define TVP3026_RADR_PAL	0x03
+#define TVP3026_CUR_COL_ADDR	0x04
+#define TVP3026_CUR_COL_DATA	0x05
+#define TVP3026_DATA		0x0a
+#define TVP3026_CUR_RAM		0x0b
+#define TVP3026_CUR_XLOW	0x0c
+#define TVP3026_CUR_XHI		0x0d
+#define TVP3026_CUR_YLOW	0x0e
+#define TVP3026_CUR_YHI		0x0f
+
+/* TVP3026 indirect registers */
+
+#define TVP3026_SILICON_REV	0x01
+#define TVP3026_CURSOR_CTL	0x06
+#define TVP3026_LATCH_CTL	0x0f
+#define TVP3026_TRUE_COLOR_CTL	0x18
+#define TVP3026_MUX_CTL		0x19
+#define TVP3026_CLK_SEL		0x1a
+#define TVP3026_PAL_PAGE	0x1c
+#define TVP3026_GEN_CTL		0x1d
+#define TVP3026_MISC_CTL	0x1e
+#define TVP3026_GEN_IO_CTL	0x2a
+#define TVP3026_GEN_IO_DATA	0x2b
+#define TVP3026_PLL_ADDR	0x2c
+#define TVP3026_PIX_CLK_DATA	0x2d
+#define TVP3026_MEM_CLK_DATA	0x2e
+#define TVP3026_LOAD_CLK_DATA	0x2f
+#define TVP3026_KEY_RED_LOW	0x32
+#define TVP3026_KEY_RED_HI	0x33
+#define TVP3026_KEY_GREEN_LOW	0x34
+#define TVP3026_KEY_GREEN_HI	0x35
+#define TVP3026_KEY_BLUE_LOW	0x36
+#define TVP3026_KEY_BLUE_HI	0x37
+#define TVP3026_KEY_CTL		0x38
+#define TVP3026_MCLK_CTL	0x39
+#define TVP3026_SENSE_TEST	0x3a
+#define TVP3026_TEST_DATA	0x3b
+#define TVP3026_CRC_LSB		0x3c
+#define TVP3026_CRC_MSB		0x3d
+#define TVP3026_CRC_CTL		0x3e
+#define TVP3026_ID		0x3f
+#define TVP3026_RESET		0xff
+
+
+/* MGA1064 DAC Register file */
+/* MGA1064 direct registers */
+
+#define MGA1064_INDEX		0x00
+#define MGA1064_WADR_PAL	0x00
+#define MGA1064_SPAREREG        0x00
+#define MGA1064_COL_PAL		0x01
+#define MGA1064_PIX_RD_MSK	0x02
+#define MGA1064_RADR_PAL	0x03
+#define MGA1064_DATA		0x0a
+
+#define MGA1064_CUR_XLOW	0x0c
+#define MGA1064_CUR_XHI		0x0d
+#define MGA1064_CUR_YLOW	0x0e
+#define MGA1064_CUR_YHI		0x0f
+
+/* MGA1064 indirect registers */
+#define MGA1064_DVI_PIPE_CTL    0x03
+#define MGA1064_CURSOR_BASE_ADR_LOW	0x04
+#define MGA1064_CURSOR_BASE_ADR_HI	0x05
+#define MGA1064_CURSOR_CTL	0x06
+#define MGA1064_CURSOR_COL0_RED	0x08
+#define MGA1064_CURSOR_COL0_GREEN	0x09
+#define MGA1064_CURSOR_COL0_BLUE	0x0a
+
+#define MGA1064_CURSOR_COL1_RED	0x0c
+#define MGA1064_CURSOR_COL1_GREEN	0x0d
+#define MGA1064_CURSOR_COL1_BLUE	0x0e
+
+#define MGA1064_CURSOR_COL2_RED	0x010
+#define MGA1064_CURSOR_COL2_GREEN	0x011
+#define MGA1064_CURSOR_COL2_BLUE	0x012
+
+#define MGA1064_VREF_CTL	0x018
+
+#define MGA1064_MUL_CTL		0x19
+#define MGA1064_MUL_CTL_8bits		0x0
+#define MGA1064_MUL_CTL_15bits		0x01
+#define MGA1064_MUL_CTL_16bits		0x02
+#define MGA1064_MUL_CTL_24bits		0x03
+#define MGA1064_MUL_CTL_32bits		0x04
+#define MGA1064_MUL_CTL_2G8V16bits		0x05
+#define MGA1064_MUL_CTL_G16V16bits		0x06
+#define MGA1064_MUL_CTL_32_24bits		0x07
+
+#define MGA1064_PIX_CLK_CTL		0x1a
+#define MGA1064_PIX_CLK_CTL_CLK_DIS		( 0x01 << 2 )
+#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN	( 0x01 << 3 )
+#define MGA1064_PIX_CLK_CTL_SEL_PCI		( 0x00 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_PLL		( 0x01 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_EXT		( 0x02 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_MSK		( 0x03 << 0 )
+
+#define MGA1064_GEN_CTL		0x1d
+#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS      (0x01 << 5)
+#define MGA1064_MISC_CTL	0x1e
+#define MGA1064_MISC_CTL_DAC_EN                ( 0x01 << 0 )
+#define MGA1064_MISC_CTL_VGA   		( 0x01 << 1 )
+#define MGA1064_MISC_CTL_DIS_CON   		( 0x03 << 1 )
+#define MGA1064_MISC_CTL_MAFC   		( 0x02 << 1 )
+#define MGA1064_MISC_CTL_VGA8   		( 0x01 << 3 )
+#define MGA1064_MISC_CTL_DAC_RAM_CS   		( 0x01 << 4 )
+
+#define MGA1064_GEN_IO_CTL2	0x29
+#define MGA1064_GEN_IO_CTL	0x2a
+#define MGA1064_GEN_IO_DATA	0x2b
+#define MGA1064_SYS_PLL_M	0x2c
+#define MGA1064_SYS_PLL_N	0x2d
+#define MGA1064_SYS_PLL_P	0x2e
+#define MGA1064_SYS_PLL_STAT	0x2f
+
+#define MGA1064_REMHEADCTL     0x30
+#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
+#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
+
+#define MGA1064_REMHEADCTL2     0x31
+
+#define MGA1064_ZOOM_CTL	0x38
+#define MGA1064_SENSE_TST	0x3a
+
+#define MGA1064_CRC_LSB		0x3c
+#define MGA1064_CRC_MSB		0x3d
+#define MGA1064_CRC_CTL		0x3e
+#define MGA1064_COL_KEY_MSK_LSB		0x40
+#define MGA1064_COL_KEY_MSK_MSB		0x41
+#define MGA1064_COL_KEY_LSB		0x42
+#define MGA1064_COL_KEY_MSB		0x43
+#define MGA1064_PIX_PLLA_M	0x44
+#define MGA1064_PIX_PLLA_N	0x45
+#define MGA1064_PIX_PLLA_P	0x46
+#define MGA1064_PIX_PLLB_M	0x48
+#define MGA1064_PIX_PLLB_N	0x49
+#define MGA1064_PIX_PLLB_P	0x4a
+#define MGA1064_PIX_PLLC_M	0x4c
+#define MGA1064_PIX_PLLC_N	0x4d
+#define MGA1064_PIX_PLLC_P	0x4e
+
+#define MGA1064_PIX_PLL_STAT	0x4f
+
+/*Added for G450 dual head*/
+
+#define MGA1064_VID_PLL_STAT    0x8c
+#define MGA1064_VID_PLL_P       0x8D
+#define MGA1064_VID_PLL_M       0x8E
+#define MGA1064_VID_PLL_N       0x8F
+
+/* Modified PLL for G200 Winbond (G200WB) */
+#define MGA1064_WB_PIX_PLLC_M	0xb7
+#define MGA1064_WB_PIX_PLLC_N	0xb6
+#define MGA1064_WB_PIX_PLLC_P	0xb8
+
+/* Modified PLL for G200 Maxim (G200EV) */
+#define MGA1064_EV_PIX_PLLC_M	0xb6
+#define MGA1064_EV_PIX_PLLC_N	0xb7
+#define MGA1064_EV_PIX_PLLC_P	0xb8
+
+/* Modified PLL for G200 EH */
+#define MGA1064_EH_PIX_PLLC_M   0xb6
+#define MGA1064_EH_PIX_PLLC_N   0xb7
+#define MGA1064_EH_PIX_PLLC_P   0xb8
+
+/* Modified PLL for G200 Maxim (G200ER) */
+#define MGA1064_ER_PIX_PLLC_M	0xb7
+#define MGA1064_ER_PIX_PLLC_N	0xb6
+#define MGA1064_ER_PIX_PLLC_P	0xb8
+
+#define MGA1064_DISP_CTL        0x8a
+#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK       0x01
+#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS        0x00
+#define MGA1064_DISP_CTL_DAC1OUTSEL_EN         0x01
+#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK       (0x03 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS        0x00
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1      (0x01 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2      (0x02 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE        (0x03 << 2)
+#define MGA1064_DISP_CTL_PANOUTSEL_MASK        (0x03 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_DIS         0x00
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1       (0x01 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB    (0x02 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656    (0x03 << 5)
+
+#define MGA1064_SYNC_CTL        0x8b
+
+#define MGA1064_PWR_CTL         0xa0
+#define MGA1064_PWR_CTL_DAC2_EN                (0x01 << 0)
+#define MGA1064_PWR_CTL_VID_PLL_EN             (0x01 << 1)
+#define MGA1064_PWR_CTL_PANEL_EN               (0x01 << 2)
+#define MGA1064_PWR_CTL_RFIFO_EN               (0x01 << 3)
+#define MGA1064_PWR_CTL_CFIFO_EN               (0x01 << 4)
+
+#define MGA1064_PAN_CTL         0xa2
+
+/* Using crtc2 */
+#define MGAREG2_C2CTL            0x10
+#define MGAREG2_C2HPARAM         0x14
+#define MGAREG2_C2HSYNC          0x18
+#define MGAREG2_C2VPARAM         0x1c
+#define MGAREG2_C2VSYNC          0x20
+#define MGAREG2_C2STARTADD0      0x28
+
+#define MGAREG2_C2OFFSET         0x40
+#define MGAREG2_C2DATACTL        0x4c
+
+#define MGAREG_C2CTL            0x3c10
+#define MGAREG_C2CTL_C2_EN                     0x01
+
+#define MGAREG_C2_HIPRILVL_M                   (0x07 << 4)
+#define MGAREG_C2_MAXHIPRI_M                   (0x07 << 8)
+
+#define MGAREG_C2CTL_PIXCLKSEL_MASK            (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSELH_MASK           (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_PCICLK          0x00
+#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK          (0x01 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL        (0x02 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL        (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VDCLK           (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL         (0x01 << 1) | (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL       (0x02 << 1) | (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKDIS_MASK            (0x01 << 3)
+#define MGAREG_C2CTL_PIXCLKDIS_DISABLE         (0x01 << 3)
+
+#define MGAREG_C2CTL_CRTCDACSEL_MASK           (0x01 << 20)
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC1          0x00
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC2          (0x01 << 20)
+
+#define MGAREG_C2HPARAM         0x3c14
+#define MGAREG_C2HSYNC          0x3c18
+#define MGAREG_C2VPARAM         0x3c1c
+#define MGAREG_C2VSYNC          0x3c20
+#define MGAREG_C2STARTADD0      0x3c28
+
+#define MGAREG_C2OFFSET         0x3c40
+#define MGAREG_C2DATACTL        0x3c4c
+
+/* video register */
+
+#define MGAREG_BESA1C3ORG	0x3d60
+#define MGAREG_BESA1CORG	0x3d10
+#define MGAREG_BESA1ORG		0x3d00
+#define MGAREG_BESCTL		0x3d20
+#define MGAREG_BESGLOBCTL	0x3dc0
+#define MGAREG_BESHCOORD	0x3d28
+#define MGAREG_BESHISCAL	0x3d30
+#define MGAREG_BESHSRCEND	0x3d3c
+#define MGAREG_BESHSRCLST	0x3d50
+#define MGAREG_BESHSRCST	0x3d38
+#define MGAREG_BESLUMACTL	0x3d40
+#define MGAREG_BESPITCH		0x3d24
+#define MGAREG_BESV1SRCLST	0x3d54
+#define MGAREG_BESV1WGHT	0x3d48
+#define MGAREG_BESVCOORD	0x3d2c
+#define MGAREG_BESVISCAL	0x3d34
+
+/* texture engine registers */
+
+#define MGAREG_TMR0		0x2c00
+#define MGAREG_TMR1		0x2c04
+#define MGAREG_TMR2		0x2c08
+#define MGAREG_TMR3		0x2c0c
+#define MGAREG_TMR4		0x2c10
+#define MGAREG_TMR5		0x2c14
+#define MGAREG_TMR6		0x2c18
+#define MGAREG_TMR7		0x2c1c
+#define MGAREG_TMR8		0x2c20
+#define MGAREG_TEXORG		0x2c24
+#define MGAREG_TEXWIDTH		0x2c28
+#define MGAREG_TEXHEIGHT	0x2c2c
+#define MGAREG_TEXCTL		0x2c30
+#    define MGA_TW4                             (0x00000000)
+#    define MGA_TW8                             (0x00000001)
+#    define MGA_TW15                            (0x00000002)
+#    define MGA_TW16                            (0x00000003)
+#    define MGA_TW12                            (0x00000004)
+#    define MGA_TW32                            (0x00000006)
+#    define MGA_TW8A                            (0x00000007)
+#    define MGA_TW8AL                           (0x00000008)
+#    define MGA_TW422                           (0x0000000A)
+#    define MGA_TW422UYVY                       (0x0000000B)
+#    define MGA_PITCHLIN                        (0x00000100)
+#    define MGA_NOPERSPECTIVE                   (0x00200000)
+#    define MGA_TAKEY                           (0x02000000)
+#    define MGA_TAMASK                          (0x04000000)
+#    define MGA_CLAMPUV                         (0x18000000)
+#    define MGA_TEXMODULATE                     (0x20000000)
+#define MGAREG_TEXCTL2		0x2c3c
+#    define MGA_G400_TC2_MAGIC                  (0x00008000)
+#    define MGA_TC2_DECALBLEND                  (0x00000001)
+#    define MGA_TC2_IDECAL                      (0x00000002)
+#    define MGA_TC2_DECALDIS                    (0x00000004)
+#    define MGA_TC2_CKSTRANSDIS                 (0x00000010)
+#    define MGA_TC2_BORDEREN                    (0x00000020)
+#    define MGA_TC2_SPECEN                      (0x00000040)
+#    define MGA_TC2_DUALTEX                     (0x00000080)
+#    define MGA_TC2_TABLEFOG                    (0x00000100)
+#    define MGA_TC2_BUMPMAP                     (0x00000200)
+#    define MGA_TC2_SELECT_TMU1                 (0x80000000)
+#define MGAREG_TEXTRANS		0x2c34
+#define MGAREG_TEXTRANSHIGH	0x2c38
+#define MGAREG_TEXFILTER	0x2c58
+#    define MGA_MIN_NRST                        (0x00000000)
+#    define MGA_MIN_BILIN                       (0x00000002)
+#    define MGA_MIN_ANISO                       (0x0000000D)
+#    define MGA_MAG_NRST                        (0x00000000)
+#    define MGA_MAG_BILIN                       (0x00000020)
+#    define MGA_FILTERALPHA                     (0x00100000)
+#define MGAREG_ALPHASTART	0x2c70
+#define MGAREG_ALPHAXINC	0x2c74
+#define MGAREG_ALPHAYINC	0x2c78
+#define MGAREG_ALPHACTRL	0x2c7c
+#    define MGA_SRC_ZERO                        (0x00000000)
+#    define MGA_SRC_ONE                         (0x00000001)
+#    define MGA_SRC_DST_COLOR                   (0x00000002)
+#    define MGA_SRC_ONE_MINUS_DST_COLOR         (0x00000003)
+#    define MGA_SRC_ALPHA                       (0x00000004)
+#    define MGA_SRC_ONE_MINUS_SRC_ALPHA         (0x00000005)
+#    define MGA_SRC_DST_ALPHA                   (0x00000006)
+#    define MGA_SRC_ONE_MINUS_DST_ALPHA         (0x00000007)
+#    define MGA_SRC_SRC_ALPHA_SATURATE          (0x00000008)
+#    define MGA_SRC_BLEND_MASK                  (0x0000000f)
+#    define MGA_DST_ZERO                        (0x00000000)
+#    define MGA_DST_ONE                         (0x00000010)
+#    define MGA_DST_SRC_COLOR                   (0x00000020)
+#    define MGA_DST_ONE_MINUS_SRC_COLOR         (0x00000030)
+#    define MGA_DST_SRC_ALPHA                   (0x00000040)
+#    define MGA_DST_ONE_MINUS_SRC_ALPHA         (0x00000050)
+#    define MGA_DST_DST_ALPHA                   (0x00000060)
+#    define MGA_DST_ONE_MINUS_DST_ALPHA         (0x00000070)
+#    define MGA_DST_BLEND_MASK                  (0x00000070)
+#    define MGA_ALPHACHANNEL                    (0x00000100)
+#    define MGA_VIDEOALPHA                      (0x00000200)
+#    define MGA_DIFFUSEDALPHA                   (0x01000000)
+#    define MGA_MODULATEDALPHA                  (0x02000000)
+#define MGAREG_TDUALSTAGE0                      (0x2CF8)
+#define MGAREG_TDUALSTAGE1                      (0x2CFC)
+#    define MGA_TDS_COLOR_ARG2_DIFFUSE          (0x00000000)
+#    define MGA_TDS_COLOR_ARG2_SPECULAR         (0x00000001)
+#    define MGA_TDS_COLOR_ARG2_FCOL             (0x00000002)
+#    define MGA_TDS_COLOR_ARG2_PREVSTAGE        (0x00000003)
+#    define MGA_TDS_COLOR_ALPHA_DIFFUSE         (0x00000000)
+#    define MGA_TDS_COLOR_ALPHA_FCOL            (0x00000004)
+#    define MGA_TDS_COLOR_ALPHA_CURRTEX         (0x00000008)
+#    define MGA_TDS_COLOR_ALPHA_PREVTEX         (0x0000000c)
+#    define MGA_TDS_COLOR_ALPHA_PREVSTAGE       (0x00000010)
+#    define MGA_TDS_COLOR_ARG1_REPLICATEALPHA   (0x00000020)
+#    define MGA_TDS_COLOR_ARG1_INV              (0x00000040)
+#    define MGA_TDS_COLOR_ARG2_REPLICATEALPHA   (0x00000080)
+#    define MGA_TDS_COLOR_ARG2_INV              (0x00000100)
+#    define MGA_TDS_COLOR_ALPHA1INV             (0x00000200)
+#    define MGA_TDS_COLOR_ALPHA2INV             (0x00000400)
+#    define MGA_TDS_COLOR_ARG1MUL_ALPHA1        (0x00000800)
+#    define MGA_TDS_COLOR_ARG2MUL_ALPHA2        (0x00001000)
+#    define MGA_TDS_COLOR_ARG1ADD_MULOUT        (0x00002000)
+#    define MGA_TDS_COLOR_ARG2ADD_MULOUT        (0x00004000)
+#    define MGA_TDS_COLOR_MODBRIGHT_2X          (0x00008000)
+#    define MGA_TDS_COLOR_MODBRIGHT_4X          (0x00010000)
+#    define MGA_TDS_COLOR_ADD_SUB               (0x00000000)
+#    define MGA_TDS_COLOR_ADD_ADD               (0x00020000)
+#    define MGA_TDS_COLOR_ADD2X                 (0x00040000)
+#    define MGA_TDS_COLOR_ADDBIAS               (0x00080000)
+#    define MGA_TDS_COLOR_BLEND                 (0x00100000)
+#    define MGA_TDS_COLOR_SEL_ARG1              (0x00000000)
+#    define MGA_TDS_COLOR_SEL_ARG2              (0x00200000)
+#    define MGA_TDS_COLOR_SEL_ADD               (0x00400000)
+#    define MGA_TDS_COLOR_SEL_MUL               (0x00600000)
+#    define MGA_TDS_ALPHA_ARG1_INV              (0x00800000)
+#    define MGA_TDS_ALPHA_ARG2_DIFFUSE          (0x00000000)
+#    define MGA_TDS_ALPHA_ARG2_FCOL             (0x01000000)
+#    define MGA_TDS_ALPHA_ARG2_PREVTEX          (0x02000000)
+#    define MGA_TDS_ALPHA_ARG2_PREVSTAGE        (0x03000000)
+#    define MGA_TDS_ALPHA_ARG2_INV              (0x04000000)
+#    define MGA_TDS_ALPHA_ADD                   (0x08000000)
+#    define MGA_TDS_ALPHA_ADDBIAS               (0x10000000)
+#    define MGA_TDS_ALPHA_ADD2X                 (0x20000000)
+#    define MGA_TDS_ALPHA_SEL_ARG1              (0x00000000)
+#    define MGA_TDS_ALPHA_SEL_ARG2              (0x40000000)
+#    define MGA_TDS_ALPHA_SEL_ADD               (0x80000000)
+#    define MGA_TDS_ALPHA_SEL_MUL               (0xc0000000)
+
+#define MGAREG_DWGSYNC		0x2c4c
+
+#define MGAREG_AGP_PLL		0x1e4c
+#define MGA_AGP2XPLL_ENABLE		0x1
+#define MGA_AGP2XPLL_DISABLE		0x0
+
+#endif

+ 452 - 0
drivers/gpu/drm/mgag200/mgag200_ttm.c

@@ -0,0 +1,452 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "mgag200_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct mga_device *
+mgag200_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct mga_device, ttm.bdev);
+}
+
+static int
+mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	global_ref = &ast->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &mgag200_ttm_mem_global_init;
+	global_ref->release = &mgag200_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	ast->ttm.bo_global_ref.mem_glob =
+		ast->ttm.mem_global_ref.object;
+	global_ref = &ast->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&ast->ttm.mem_global_ref);
+		return r;
+	}
+	return 0;
+}
+
+void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+	if (ast->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&ast->ttm.mem_global_ref);
+	ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct mgag200_bo *bo;
+
+	bo = container_of(tbo, struct mgag200_bo, bo);
+
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &mgag200_bo_ttm_destroy)
+		return true;
+	return false;
+}
+
+static int
+mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		     struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+	if (!mgag200_ttm_bo_is_mgag200_bo(bo))
+		return;
+
+	mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
+	*pl = mgabo->placement;
+}
+
+static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct mga_device *mdev = mgag200_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int mgag200_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_reserve, bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	int r;
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	return r;
+}
+
+
+static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func mgag200_tt_backend_func = {
+	.destroy = &mgag200_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+	tt->func = &mgag200_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+	return tt;
+}
+
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	return ttm_pool_populate(ttm);
+}
+
+static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver mgag200_bo_driver = {
+	.ttm_tt_create = mgag200_ttm_tt_create,
+	.ttm_tt_populate = mgag200_ttm_tt_populate,
+	.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
+	.init_mem_type = mgag200_bo_init_mem_type,
+	.evict_flags = mgag200_bo_evict_flags,
+	.move = mgag200_bo_move,
+	.verify_access = mgag200_bo_verify_access,
+	.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
+	.io_mem_free = &mgag200_ttm_io_mem_free,
+};
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+	int ret;
+	struct drm_device *dev = mdev->dev;
+	struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+
+	ret = mgag200_ttm_global_init(mdev);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&mdev->ttm.bdev,
+				 mdev->ttm.bo_global_ref.ref.object,
+				 &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		return ret;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		return ret;
+	}
+
+	mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+				    pci_resource_len(dev->pdev, 0),
+				    DRM_MTRR_WC);
+
+	return 0;
+}
+
+void mgag200_mm_fini(struct mga_device *mdev)
+{
+	struct drm_device *dev = mdev->dev;
+	ttm_bo_device_release(&mdev->ttm.bdev);
+
+	mgag200_ttm_global_release(mdev);
+
+	if (mdev->fb_mtrr >= 0) {
+		drm_mtrr_del(mdev->fb_mtrr,
+			     pci_resource_start(dev->pdev, 0),
+			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+		mdev->fb_mtrr = -1;
+	}
+}
+
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
+{
+	u32 c = 0;
+	bo->placement.fpfn = 0;
+	bo->placement.lpfn = 0;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+	if (domain & TTM_PL_FLAG_VRAM)
+		bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+	if (domain & TTM_PL_FLAG_SYSTEM)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+}
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("reserve failed %p\n", bo);
+		return ret;
+	}
+	return 0;
+}
+
+void mgag200_bo_unreserve(struct mgag200_bo *bo)
+{
+	ttm_bo_unreserve(&bo->bo);
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct mgag200_bo **pmgabo)
+{
+	struct mga_device *mdev = dev->dev_private;
+	struct mgag200_bo *mgabo;
+	size_t acc_size;
+	int ret;
+
+	mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
+	if (!mgabo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &mgabo->gem, size);
+	if (ret) {
+		kfree(mgabo);
+		return ret;
+	}
+
+	mgabo->gem.driver_private = NULL;
+	mgabo->bo.bdev = &mdev->ttm.bdev;
+
+	mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
+				       sizeof(struct mgag200_bo));
+
+	ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
+			  ttm_bo_type_device, &mgabo->placement,
+			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+			  mgag200_bo_ttm_destroy);
+	if (ret)
+		return ret;
+
+	*pmgabo = mgabo;
+	return 0;
+}
+
+static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = mgag200_bo_gpu_offset(bo);
+	}
+
+	mgag200_ttm_placement(bo, pl_flag);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+	if (gpu_addr)
+		*gpu_addr = mgag200_bo_gpu_offset(bo);
+	return 0;
+}
+
+int mgag200_bo_unpin(struct mgag200_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int mgag200_bo_push_sysram(struct mgag200_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	if (bo->kmap.virtual)
+		ttm_bo_kunmap(&bo->kmap);
+
+	mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+	if (ret) {
+		DRM_ERROR("pushing to VRAM failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct mga_device *mdev;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	file_priv = filp->private_data;
+	mdev = file_priv->minor->dev->dev_private;
+	return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
+}

+ 7 - 3
drivers/gpu/drm/nouveau/nouveau_state.c

@@ -662,6 +662,12 @@ error:
 	return ret;
 }
 
+static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
+	.set_gpu_state = nouveau_switcheroo_set_state,
+	.reprobe = nouveau_switcheroo_reprobe,
+	.can_switch = nouveau_switcheroo_can_switch,
+};
+
 int
 nouveau_card_init(struct drm_device *dev)
 {
@@ -670,9 +676,7 @@ nouveau_card_init(struct drm_device *dev)
 	int ret, e = 0;
 
 	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
-	vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
-				       nouveau_switcheroo_reprobe,
-				       nouveau_switcheroo_can_switch);
+	vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
 
 	/* Initialise internal driver API hooks */
 	ret = nouveau_init_engine_ptrs(dev);

+ 3 - 2
drivers/gpu/drm/radeon/Makefile

@@ -70,8 +70,9 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
 	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
 	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
-	radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
-	radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o si_blit_shaders.o
+	evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+	atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+	si_blit_shaders.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o

+ 14 - 2
drivers/gpu/drm/radeon/atombios_encoders.c

@@ -1926,7 +1926,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
 
 	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
 		r600_hdmi_enable(encoder);
-		r600_hdmi_setmode(encoder, adjusted_mode);
+		if (ASIC_IS_DCE4(rdev))
+			evergreen_hdmi_setmode(encoder, adjusted_mode);
+		else
+			r600_hdmi_setmode(encoder, adjusted_mode);
 	}
 }
 
@@ -2081,6 +2084,7 @@ radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
 
 static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 {
+	struct radeon_device *rdev = encoder->dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
@@ -2089,8 +2093,16 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
 	     ENCODER_OBJECT_ID_NONE)) {
 		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-		if (dig)
+		if (dig) {
 			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+			if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+				if (rdev->family >= CHIP_R600)
+					dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+				else
+					/* RS600/690/740 have only 1 afmt block */
+					dig->afmt = rdev->mode_info.afmt[0];
+			}
+		}
 	}
 
 	radeon_atom_output_lock(encoder, true);

+ 0 - 5
drivers/gpu/drm/radeon/evergreen.c

@@ -3435,10 +3435,6 @@ int evergreen_init(struct radeon_device *rdev)
 {
 	int r;
 
-	/* This don't do much */
-	r = radeon_gem_init(rdev);
-	if (r)
-		return r;
 	/* Read BIOS */
 	if (!radeon_get_bios(rdev)) {
 		if (ASIC_IS_AVIVO(rdev))
@@ -3550,7 +3546,6 @@ void evergreen_fini(struct radeon_device *rdev)
 	evergreen_pcie_gart_fini(rdev);
 	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
-	radeon_semaphore_driver_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_agp_fini(rdev);
 	radeon_bo_fini(rdev);

+ 0 - 1
drivers/gpu/drm/radeon/evergreen_blit_kms.c

@@ -637,7 +637,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
 	if (rdev->r600_blit.shader_obj)
 		goto done;
 
-	mutex_init(&rdev->r600_blit.mutex);
 	rdev->r600_blit.state_offset = 0;
 
 	if (rdev->family < CHIP_CAYMAN)

+ 5 - 5
drivers/gpu/drm/radeon/evergreen_cs.c

@@ -1057,7 +1057,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
 	uint32_t header, h_idx, reg, wait_reg_mem_info;
 	volatile uint32_t *ib;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 
 	/* parse the WAIT_REG_MEM */
 	r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1215,7 +1215,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 		if (!(evergreen_reg_safe_bm[i] & m))
 			return 0;
 	}
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	switch (reg) {
 	/* force following reg to 0 in an attempt to disable out buffer
 	 * which will need us to better understand how it works to perform
@@ -1896,7 +1896,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 	u32 idx_value;
 
 	track = (struct evergreen_cs_track *)p->track;
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	idx = pkt->idx + 1;
 	idx_value = radeon_get_ib_value(p, idx);
 
@@ -2610,8 +2610,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
 		}
 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
 #if 0
-	for (r = 0; r < p->ib->length_dw; r++) {
-		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib->ptr[r]);
+	for (r = 0; r < p->ib.length_dw; r++) {
+		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
 		mdelay(1);
 	}
 #endif

+ 216 - 0
drivers/gpu/drm/radeon/evergreen_hdmi.c

@@ -0,0 +1,216 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ *          Rafał Miłecki
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+	WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+	WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+	WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+	WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+	WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
+					 uint8_t versionNumber,
+					 uint8_t length,
+					 uint8_t *frame)
+{
+	int i;
+	frame[0] = packetType + versionNumber + length;
+	for (i = 1; i <= length; i++)
+		frame[0] += frame[i];
+	frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_videoinfoframe(
+	struct drm_encoder *encoder,
+	uint8_t color_format,
+	int active_information_present,
+	uint8_t active_format_aspect_ratio,
+	uint8_t scan_information,
+	uint8_t colorimetry,
+	uint8_t ex_colorimetry,
+	uint8_t quantization,
+	int ITC,
+	uint8_t picture_aspect_ratio,
+	uint8_t video_format_identification,
+	uint8_t pixel_repetition,
+	uint8_t non_uniform_picture_scaling,
+	uint8_t bar_info_data_valid,
+	uint16_t top_bar,
+	uint16_t bottom_bar,
+	uint16_t left_bar,
+	uint16_t right_bar
+)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	uint8_t frame[14];
+
+	frame[0x0] = 0;
+	frame[0x1] =
+		(scan_information & 0x3) |
+		((bar_info_data_valid & 0x3) << 2) |
+		((active_information_present & 0x1) << 4) |
+		((color_format & 0x3) << 5);
+	frame[0x2] =
+		(active_format_aspect_ratio & 0xF) |
+		((picture_aspect_ratio & 0x3) << 4) |
+		((colorimetry & 0x3) << 6);
+	frame[0x3] =
+		(non_uniform_picture_scaling & 0x3) |
+		((quantization & 0x3) << 2) |
+		((ex_colorimetry & 0x7) << 4) |
+		((ITC & 0x1) << 7);
+	frame[0x4] = (video_format_identification & 0x7F);
+	frame[0x5] = (pixel_repetition & 0xF);
+	frame[0x6] = (top_bar & 0xFF);
+	frame[0x7] = (top_bar >> 8);
+	frame[0x8] = (bottom_bar & 0xFF);
+	frame[0x9] = (bottom_bar >> 8);
+	frame[0xA] = (left_bar & 0xFF);
+	frame[0xB] = (left_bar >> 8);
+	frame[0xC] = (right_bar & 0xFF);
+	frame[0xD] = (right_bar >> 8);
+
+	evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+	/* Our header values (type, version, length) should be alright, Intel
+	 * is using the same. Checksum function also seems to be OK, it works
+	 * fine for audio infoframe. However calculated value is always lower
+	 * by 2 in comparison to fglrx. It breaks displaying anything in case
+	 * of TVs that strictly check the checksum. Hack it manually here to
+	 * workaround this issue. */
+	frame[0x0] += 2;
+
+	WREG32(AFMT_AVI_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(AFMT_AVI_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+	WREG32(AFMT_AVI_INFO2 + offset,
+		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+	WREG32(AFMT_AVI_INFO3 + offset,
+		frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset;
+
+	if (ASIC_IS_DCE5(rdev))
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (!dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	r600_audio_set_clock(encoder, mode->clock);
+
+	WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+	       HDMI_NULL_SEND); /* send null packets when required */
+
+	WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+	WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+	       HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+	       HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+	       AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+	       AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+	WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+	       HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+	       HDMI_ACR_SOURCE); /* select SW CTS value */
+
+	WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+	       HDMI_NULL_SEND | /* send null packets when required */
+	       HDMI_GC_SEND | /* send general control packets */
+	       HDMI_GC_CONT); /* send general control packets every frame */
+
+	WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+	       HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+	       HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+	       HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+	       HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+	WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+	       AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+	WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+	       HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
+	       HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+	WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+	evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				      0, 0, 0, 0, 0, 0);
+
+	evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+	WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+	WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+	WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+	WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}

+ 0 - 5
drivers/gpu/drm/radeon/ni.c

@@ -1629,10 +1629,6 @@ int cayman_init(struct radeon_device *rdev)
 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	int r;
 
-	/* This don't do much */
-	r = radeon_gem_init(rdev);
-	if (r)
-		return r;
 	/* Read BIOS */
 	if (!radeon_get_bios(rdev)) {
 		if (ASIC_IS_AVIVO(rdev))
@@ -1744,7 +1740,6 @@ void cayman_fini(struct radeon_device *rdev)
 	cayman_pcie_gart_fini(rdev);
 	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
-	radeon_semaphore_driver_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);

+ 21 - 19
drivers/gpu/drm/radeon/r100.c

@@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
 		}
 
 		tmp |= tile_flags;
-		p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+		p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
 	} else
-		p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
+		p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
 	return 0;
 }
 
@@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
 	volatile uint32_t *ib;
 	u32 idx_value;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	track = (struct r100_cs_track *)p->track;
 	c = radeon_get_ib_value(p, idx++) & 0x1F;
 	if (c > 16) {
@@ -1275,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p,
 	unsigned i;
 	unsigned idx;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	idx = pkt->idx;
 	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
 		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -1354,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
 	uint32_t header, h_idx, reg;
 	volatile uint32_t *ib;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 
 	/* parse the wait until */
 	r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -1533,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 	u32 tile_flags = 0;
 	u32 idx_value;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	track = (struct r100_cs_track *)p->track;
 
 	idx_value = radeon_get_ib_value(p, idx);
@@ -1889,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
 	volatile uint32_t *ib;
 	int r;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	idx = pkt->idx + 1;
 	track = (struct r100_cs_track *)p->track;
 	switch (pkt->opcode) {
@@ -2008,6 +2008,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
 	int r;
 
 	track = kzalloc(sizeof(*track), GFP_KERNEL);
+	if (!track)
+		return -ENOMEM;
 	r100_cs_track_clear(p->rdev, track);
 	p->track = track;
 	do {
@@ -3684,7 +3686,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 
 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-	struct radeon_ib *ib;
+	struct radeon_ib ib;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -3700,22 +3702,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 	if (r) {
 		return r;
 	}
-	ib->ptr[0] = PACKET0(scratch, 0);
-	ib->ptr[1] = 0xDEADBEEF;
-	ib->ptr[2] = PACKET2(0);
-	ib->ptr[3] = PACKET2(0);
-	ib->ptr[4] = PACKET2(0);
-	ib->ptr[5] = PACKET2(0);
-	ib->ptr[6] = PACKET2(0);
-	ib->ptr[7] = PACKET2(0);
-	ib->length_dw = 8;
-	r = radeon_ib_schedule(rdev, ib);
+	ib.ptr[0] = PACKET0(scratch, 0);
+	ib.ptr[1] = 0xDEADBEEF;
+	ib.ptr[2] = PACKET2(0);
+	ib.ptr[3] = PACKET2(0);
+	ib.ptr[4] = PACKET2(0);
+	ib.ptr[5] = PACKET2(0);
+	ib.ptr[6] = PACKET2(0);
+	ib.ptr[7] = PACKET2(0);
+	ib.length_dw = 8;
+	r = radeon_ib_schedule(rdev, &ib);
 	if (r) {
 		radeon_scratch_free(rdev, scratch);
 		radeon_ib_free(rdev, &ib);
 		return r;
 	}
-	r = radeon_fence_wait(ib->fence, false);
+	r = radeon_fence_wait(ib.fence, false);
 	if (r) {
 		return r;
 	}

+ 1 - 1
drivers/gpu/drm/radeon/r200.c

@@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
 	u32 tile_flags = 0;
 	u32 idx_value;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	track = (struct r100_cs_track *)p->track;
 	idx_value = radeon_get_ib_value(p, idx);
 	switch (reg) {

+ 2 - 2
drivers/gpu/drm/radeon/r300.c

@@ -604,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 	int r;
 	u32 idx_value;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	track = (struct r100_cs_track *)p->track;
 	idx_value = radeon_get_ib_value(p, idx);
 
@@ -1146,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
 	unsigned idx;
 	int r;
 
-	ib = p->ib->ptr;
+	ib = p->ib.ptr;
 	idx = pkt->idx + 1;
 	track = (struct r100_cs_track *)p->track;
 	switch(pkt->opcode) {

+ 20 - 22
drivers/gpu/drm/radeon/r600.c

@@ -713,6 +713,14 @@ void r600_hpd_init(struct radeon_device *rdev)
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 */
+			continue;
+		}
 		if (ASIC_IS_DCE3(rdev)) {
 			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
 			if (ASIC_IS_DCE32(rdev))
@@ -2363,20 +2371,15 @@ int r600_copy_blit(struct radeon_device *rdev,
 		   unsigned num_gpu_pages,
 		   struct radeon_fence *fence)
 {
+	struct radeon_sa_bo *vb = NULL;
 	int r;
 
-	mutex_lock(&rdev->r600_blit.mutex);
-	rdev->r600_blit.vb_ib = NULL;
-	r = r600_blit_prepare_copy(rdev, num_gpu_pages);
+	r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
 	if (r) {
-		if (rdev->r600_blit.vb_ib)
-			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
-		mutex_unlock(&rdev->r600_blit.mutex);
 		return r;
 	}
-	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
-	r600_blit_done_copy(rdev, fence);
-	mutex_unlock(&rdev->r600_blit.mutex);
+	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+	r600_blit_done_copy(rdev, fence, vb);
 	return 0;
 }
 
@@ -2557,10 +2560,6 @@ int r600_init(struct radeon_device *rdev)
 	if (r600_debugfs_mc_info_init(rdev)) {
 		DRM_ERROR("Failed to register debugfs file for mc !\n");
 	}
-	/* This don't do much */
-	r = radeon_gem_init(rdev);
-	if (r)
-		return r;
 	/* Read BIOS */
 	if (!radeon_get_bios(rdev)) {
 		if (ASIC_IS_AVIVO(rdev))
@@ -2658,7 +2657,6 @@ void r600_fini(struct radeon_device *rdev)
 	r600_vram_scratch_fini(rdev);
 	radeon_agp_fini(rdev);
 	radeon_gem_fini(rdev);
-	radeon_semaphore_driver_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
@@ -2687,7 +2685,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
 
 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 {
-	struct radeon_ib *ib;
+	struct radeon_ib ib;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -2705,18 +2703,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
 		return r;
 	}
-	ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
-	ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
-	ib->ptr[2] = 0xDEADBEEF;
-	ib->length_dw = 3;
-	r = radeon_ib_schedule(rdev, ib);
+	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	ib.ptr[2] = 0xDEADBEEF;
+	ib.length_dw = 3;
+	r = radeon_ib_schedule(rdev, &ib);
 	if (r) {
 		radeon_scratch_free(rdev, scratch);
 		radeon_ib_free(rdev, &ib);
 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 		return r;
 	}
-	r = radeon_fence_wait(ib->fence, false);
+	r = radeon_fence_wait(ib.fence, false);
 	if (r) {
 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 		return r;
@@ -2728,7 +2726,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 		DRM_UDELAY(1);
 	}
 	if (i < rdev->usec_timeout) {
-		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
 	} else {
 		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);

Some files were not shown because too many files changed in this diff